prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assess the appropriate number of clusters within a k-means framework, using
mean runoff and the shape parameter as the cluster variables. Performs the
k-means cluster analysis, manually classifies the ungauged basins within
clusters, and then optimizes k_e and tau_c based on the cluster memberships.
Written by <NAME> for
"Low variability runoff inhibits coupling of climate, tectonics, and
topography in the Greater Caucasus"
If you use this code or derivatives, please cite the original paper.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import gamma
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
from sklearn.preprocessing import StandardScaler
from scipy.optimize import minimize_scalar
from astropy.utils import NumpyRNGContext
import stochastic_threshold as stim
def survive(Q):
Qstar=Q/np.mean(Q)
Qstar_sort=np.sort(Qstar)
Qn=len(Qstar)
Qrank=np.arange(1,Qn+1,1)
Q_freq_excd=(Qn+1-Qrank)/Qn
return Qstar_sort,Q_freq_excd
def bin_Q(Qs,Qf):
bins=np.linspace(10**-4,1,1500)
ix=np.digitize(Qf,bins)
d=np.concatenate((ix.reshape((len(ix),1)),Qs.reshape((len(ix),1)),Qf.reshape((len(ix),1))),axis=1)
df=pd.DataFrame(d,columns=['ix','Qs','Qf'])
m=df.groupby('ix').mean()
q1=df.groupby('ix').quantile(0.25)
q3=df.groupby('ix').quantile(0.75)
Qsm=m['Qs'].to_numpy()
Qfm=m['Qf'].to_numpy()
QsQ1=q1['Qs'].to_numpy()
QsQ3=q3['Qs'].to_numpy()
QfQ1=q1['Qf'].to_numpy()
QfQ3=q3['Qf'].to_numpy()
Qsm=np.flip(Qsm)
Qfm=np.flip(Qfm)
QsQ1=np.flip(QsQ1)
QsQ3=np.flip(QsQ3)
QfQ1=np.flip(QfQ1)
QfQ3=np.flip(QfQ3)
return Qsm,Qfm,QsQ1,QsQ3,QfQ1,QfQ3
def min_k_e_optim(ks,ksu,e,eu,R,c,s,ns,seed):
# Define random samples of ks and E
with NumpyRNGContext(seed):
ks_dist=np.random.normal(ks,ksu,ns)
e_dist=np.random.normal(e,eu,ns)
# Generate container
k_e_ind=np.zeros((ns))
# Define internal minimization function
def min_k_e_internal(X,ksi,ei,R,c,s):
cl=stim.set_constants(R,X,dist_type='weibull')
[ep,_]=stim.stim_one(ksi,c,cl,sc=s)
return np.abs(ep-e)**2
# Begin minimization loop
for i in range(ns):
args=(ks_dist[i],e_dist[i],R,c,s)
res=minimize_scalar(min_k_e_internal,args=args,bounds=[1e-20,1e-6],
method='bounded',
options={'maxiter':500000,'xatol':1e-20})
k_e_ind[i]=res.x
return np.mean(k_e_ind),np.median(k_e_ind),np.std(k_e_ind),np.percentile(k_e_ind,25),np.percentile(k_e_ind,75)
def min_tau_c_optim(k_e,ks,ksu,e,eu,R,c,s,ns,seed):
# Define random samples of ks and E
with NumpyRNGContext(seed):
ks_dist=np.random.normal(ks,ksu,ns)
e_dist=np.random.normal(e,eu,ns)
# Generate container
tau_c_ind=np.zeros((ns))
# Define internal minimization function
def min_tau_c_internal(X,k_e,ksi,ei,R,c,s):
cl=stim.set_constants(R,k_e,dist_type='weibull',tau_c=X)
[ep,_]=stim.stim_one(ksi,c,cl,sc=s)
return np.abs(ep-e)**2
# Begin minimization loop
for i in range(ns):
args=(k_e,ks_dist[i],e_dist[i],R,c,s)
res=minimize_scalar(min_tau_c_internal,args=args,bounds=[10,100],
method='bounded',
options={'maxiter':500000,'xatol':1e-20})
tau_c_ind[i]=res.x
return np.mean(tau_c_ind),np.median(tau_c_ind),np.std(tau_c_ind),np.percentile(tau_c_ind,25),np.percentile(tau_c_ind,75)
def weibull_tail_fit(x,y,thresh):
n=len(x)
ix=np.nonzero(y<thresh)[0][:1][0]
xtrim=x[ix:n]
ytrim=y[ix:n]
N=len(xtrim)
xts=np.log(xtrim)
yts=np.log(-np.log(ytrim))
[lin,r,rnk,sng,V]=np.polyfit(xts,yts,1,full=True)
c=lin[0]
s=np.exp(-1*lin[1]/c)
mn=s*gamma(1+(1/c))
# Convert sum of squared residuals to mean of sum of squared residuals
res=r/N
return c,s,mn,N,res
def weibull_mt(Qs,Qf,mnR,mean_weight,tail_weight):
thresh_array=np.linspace(0.01,0.6,50)
nt=len(thresh_array)
ct=np.zeros((nt))
st=np.zeros((nt))
mnt=np.zeros((nt))
Nt=np.zeros((nt))
res=np.zeros((nt))
for i in range(nt):
try:
[ct[i],st[i],mnt[i],Nt[i],res[i]]=weibull_tail_fit(Qs,Qf,thresh_array[i])
except:
# This except block catches thresholds above which zeros are included
# in the tail fit, which are undefined in ln-ln space
ct[i]=np.NAN
st[i]=np.NAN
mnt[i]=np.NAN
Nt[i]=np.NAN
res[i]=np.NAN
# Find local minimum
impR=mnR*mnt
difR=np.abs(impR-mnR)
runoff_min=(difR/np.nanmax(difR))*mean_weight
tail_min=(res/np.nanmax(res))*tail_weight
lm=tail_min+runoff_min
ix=np.nanargmin(lm)
# Store the minimum values
ct_best=ct[ix]
st_best=st[ix]
mnt_best=mnt[ix]
thresh_best=thresh_array[ix]
return ct_best,st_best,mnt_best,thresh_best
# Load in data from GRDC basins
df=pd.read_csv('result_tables/GRDC_Distribution_Fits.csv')
mR=df['mean_R_obs'].to_numpy()
gdf=pd.read_csv('data_tables/grdc_summary_values.csv')
cb=df['c_best'].to_numpy()
sb=df['s_best'].to_numpy()
cw=df['c_whole'].to_numpy()
sw=df['s_whole'].to_numpy()
mSN=gdf['ssnstd'].to_numpy()
maxZ=gdf['maxz'].to_numpy()/1000
minZ=gdf['minz'].to_numpy()/1000
mnZ=gdf['mnz'].to_numpy()/1000
rZ=maxZ-minZ
ID=gdf['ID'].to_numpy()
# Load in Data from Erosion Rate basins
edf=pd.read_csv('data_tables/gc_ero_master_table.csv')
ecenters=pd.read_csv('data_tables/grdc_outlines/Ebsns.csv')
erun=pd.read_csv('result_tables/estimate_runoff_power.csv')
ksn=edf['mean_ksn'].to_numpy()
e=edf['St_E_rate_m_Myr'].to_numpy()
eu=edf['St_Ext_Unc'].to_numpy()
ksnu=edf['se_ksn'].to_numpy()
emaxZ=edf['max_el'].to_numpy()/1000
eminZ=edf['outlet_elevation'].to_numpy()/1000
erZ=emaxZ-eminZ
emSN=edf['mean_SNOWstd'].to_numpy()
emR=erun['mean_runoff'].to_numpy()
ex=ecenters['lon'].to_numpy()
ey=ecenters['lat'].to_numpy()
# Determine Len
N=len(mR)
### Elbow Plot To Determine Optimal Clusters ####
Xb=np.concatenate((cb.reshape(len(cb),1),mR.reshape(len(mR),1)),axis=1)
# Scale data
scalerb=StandardScaler().fit(Xb)
XSb=scalerb.transform(Xb)
# Set random seed for reproducibility
seed=5
num_iterations=500
inertiasb=[]
distortionsb=[]
K_rng=range(1,15)
for i in K_rng:
kmb=KMeans(n_clusters=i,max_iter=5000,random_state=seed).fit(XSb)
inertiasb.append(kmb.inertia_)
distortionsb.append(sum(np.min(cdist(XSb,kmb.cluster_centers_,'euclidean'),axis=1))
/ XSb.shape[0])
plt.figure(num=1,figsize=(7,4))
ax1=plt.subplot(2,1,1)
ax1.plot(K_rng,inertiasb,'bx-')
plt.xlabel('Number of Clusters')
plt.ylabel('Intertia')
ax2=plt.subplot(2,1,2)
ax2.plot(K_rng,distortionsb,'bx-')
plt.xlabel('Number of Clusters')
plt.ylabel('Distortion')
### Optimal Cluster Number Based on Elbow ###
num_clustb=4
kmb=KMeans(n_clusters=num_clustb,max_iter=5000,random_state=seed).fit(XSb)
### Start Plotting
color_list=['maroon','dodgerblue','darkorange','darkolivegreen','crimson','blue']
### Manually Classify
eidx=np.ones(emR.shape)*4
eidx[np.logical_and(emR>3.5,emaxZ<2.75)]=0
eidx[np.logical_and(emR>3.5,emaxZ>=2.75)]=3
eidx[np.logical_and(emR<3.5,emaxZ<3.1)]=1
eidx[np.logical_and(emR<3.5,emaxZ>=3.1)]=2
## Determine population values
# Empty Arrays
clust_num=np.zeros((num_clustb))
cb_pop=np.zeros((num_clustb))
sb_pop=np.zeros((num_clustb))
mR_pop=np.zeros((num_clustb))
cmb=np.zeros((num_clustb))
smb=np.zeros((num_clustb))
for i in range(num_clustb):
idx=kmb.labels_==i
clust_num[i]=i
cb_pop[i]=np.mean(cb[idx])
sb_pop[i]=np.mean(sb[idx])
mR_pop[i]=np.mean(mR[idx])
ListQs=[]
ListQf=[]
for j in range(len(mR[idx])):
df=pd.read_csv('data_tables/grdc_discharge_time_series/GRDC_'+str(ID[idx][j])+'.csv')
Q=df['Q'].to_numpy()
[Qs,Qf]=survive(Q)
ListQs.append(Qs)
ListQf.append(Qf)
Qsaccum=np.concatenate(ListQs,axis=0)
Qfaccum=np.concatenate(ListQf,axis=0)
[Qsm,Qfm,QsQ1,QsQ3,QfQ1,QfQ3]=bin_Q(Qsaccum,Qfaccum)
[cmb[i],smb[i],_,_]=weibull_mt(Qsm,Qfm,mR_pop[i],1.5,1)
### Optimize k_e and tau_c
k_e_optim=np.zeros((len(e),5))
tau_c_optim=np.zeros((len(e),5))
for i in range(len(e)):
[k_e_optim[i,0],k_e_optim[i,1],
k_e_optim[i,2],k_e_optim[i,3],
k_e_optim[i,4]]=min_k_e_optim(ksn[i],ksnu[i],e[i],eu[i],emR[i],
cmb[eidx[i].astype(int)],smb[eidx[i].astype(int)],
num_iterations,5)
k_e_o=np.zeros((num_clustb))
for i in range(num_clustb):
k_e_o[i]=np.median(k_e_optim[eidx==i,1]) # Use median of individiual estimates
k_e_fix=np.median(k_e_optim)
for i in range(len(e)):
[tau_c_optim[i,0],tau_c_optim[i,1],
tau_c_optim[i,2],tau_c_optim[i,3],
tau_c_optim[i,4]]=min_tau_c_optim(k_e_fix,ksn[i],ksnu[i],e[i],eu[i],emR[i],
cmb[eidx[i].astype(int)],smb[eidx[i].astype(int)],
num_iterations,5)
tau_c_o=np.zeros((num_clustb))
for i in range(num_clustb):
tau_c_o[i]=np.median(tau_c_optim[eidx==i,1]) # Use median of individiual estimates
tau_c_fix=np.median(tau_c_optim)
### Output
cluster_labels=kmb.labels_
data=np.concatenate((ID.reshape((len(ID),1)),cluster_labels.reshape(len(ID),1)),axis=1)
clustdf= | pd.DataFrame(data,columns=['grdc_id','cluster']) | pandas.DataFrame |
"""
Procedures needed for ATE estimation.
Created on Thu Dec 8 15:48:57 2020.
@author: MLechner
# -*- coding: utf-8 -*-
"""
import copy
import pandas as pd
import numpy as np
from mcf import general_purpose as gp
from mcf import general_purpose_estimation as gp_est
def local_centering_new_sample(lc_csvfile, nonlc_csvfile, v_dict,
var_x_type_dict, c_dict):
"""
Generate centered variables and add to file.
Parameters
----------
lc_csvfile : String. csv-file to estimate RF.
nonlc_csvfile : String. csv-file to be used for centering.
v_dict : Dict. Variable names.
var_x_type_dict : Dictionary with variables and type.
c_dict : Dict. Controls.
Returns
-------
new_csv_file : String. csv-file to which centered variables are added.
old_y_name : List of strings. Names of variables to be centered.
new_y_name : List of strings. Names of centered variables.
"""
if c_dict['with_output'] and c_dict['verbose']:
print('\nLocal centering with Random Forests estimated in',
'independent sample')
# 1) Create dummies of ordinal variables
# for each element in y_name do
# 2) estimate RF on l_cent_sample
# 3) predict y for nonlc_sample
# 4) subtract predicted from observed for nonlc_sample
# 5) Create new variable, update centered_y_name
# 6) add new data to nonlc_sample and write to file under same name
max_workers = copy.copy(c_dict['no_parallel'])
lc_data_df = pd.read_csv(lc_csvfile)
nonlc_data_df = pd.read_csv(nonlc_csvfile)
x_names = var_x_type_dict.keys()
lc_x_df = lc_data_df[x_names]
lc_y_df = lc_data_df[v_dict['y_name']]
nonlc_x_df = nonlc_data_df[x_names]
nonlc_y_df = nonlc_data_df[v_dict['y_name']]
if c_dict['with_output']:
print()
print('Independent sample used for local centering.')
print('Number of observations used only for computing E(y|x): ',
len(lc_y_df.index))
names_unordered = []
for x_name in x_names:
if var_x_type_dict[x_name] > 0:
names_unordered.append(x_name)
if names_unordered: # List is not empty
lc_x_dummies = pd.get_dummies(lc_x_df, columns=names_unordered)
nonlc_x_dummies = pd.get_dummies(nonlc_x_df, columns=names_unordered)
x_names_in_both = np.intersect1d(lc_x_dummies.columns,
nonlc_x_dummies.columns)
lc_x_dummies = lc_x_dummies[x_names_in_both]
nonlc_x_dummies = nonlc_x_dummies[x_names_in_both]
lc_x_df = pd.concat([lc_x_df, lc_x_dummies], axis=1)
nonlc_x_df = pd.concat([nonlc_x_df, nonlc_x_dummies], axis=1)
x_train = lc_x_df.to_numpy()
x_pred = nonlc_x_df.to_numpy()
y_m_yx = np.empty(np.shape(nonlc_y_df))
centered_y_name = []
for indx, y_name in enumerate(v_dict['y_name']):
y_train = lc_y_df[y_name].to_numpy()
y_nonlc = nonlc_y_df[y_name].to_numpy()
y_pred, _, _, _, _, _, _ = gp_est.RandomForest_scikit(
x_train, y_train, x_pred, y_name=y_name, boot=c_dict['boot'],
n_min=c_dict['grid_n_min'], no_features=c_dict['m_grid'],
workers=max_workers, pred_p_flag=True,
pred_t_flag=False, pred_oob_flag=False, with_output=True)
y_m_yx[:, indx] = y_nonlc - y_pred # centered outcomes
centered_y_name.append(y_name + 'LC')
y_m_yx_df = pd.DataFrame(data=y_m_yx, columns=centered_y_name)
nonlc_data_df = pd.concat([nonlc_data_df, y_m_yx_df], axis=1)
gp.delete_file_if_exists(nonlc_csvfile)
nonlc_data_df.to_csv(nonlc_csvfile, index=False)
if c_dict['with_output']:
all_y_name = v_dict['y_name'][:]
for name in centered_y_name:
all_y_name.append(name)
gp.print_descriptive_stats_file(
nonlc_csvfile, all_y_name, c_dict['print_to_file'])
return nonlc_csvfile, v_dict['y_name'], centered_y_name
def local_centering_cv(datafiles, v_dict, var_x_type_dict, c_dict):
"""
Compute local centering for cross-validation.
Parameters
----------
datafiles : Tuple of Strings. Names of datafiles.
v_dict : Dict. Variable names.
var_x_type_dict : Dictionary with variables and type.
c_dict : Dict. Controls.
Returns
-------
old_y_name : List of strings. Names of variables to be centered.
new_y_name : List of strings. Names of centered variables.
"""
max_workers = copy.copy(c_dict['no_parallel'])
if c_dict['with_output']:
print()
print('Cross-validation used for local centering.',
' {:2} folds used.'. format(c_dict['l_centering_cv_k']))
seed = 9324561
rng = np.random.default_rng(seed)
add_yx_names = True
centered_y_name = []
names_unordered = []
for file_name in datafiles:
data_df = | pd.read_csv(file_name) | pandas.read_csv |
from .utils import str_sum
import sqlite3
import pandas as pd
from collections import OrderedDict
from tqdm import tqdm
import os
import gc
import numpy as np
# making biom, otus, tsvs into npz file. sparse matrix storage needed.
# keep all important information from raw data (paths, leaves of ontology and phylogeny tree).
class Transformer(object):
def __init__(self, phylogeny, db_file):
self.db_tool = NCBITaxa(db_file=db_file)
self.phylogeny = phylogeny
def _extract_layers(self, count_matrix, included_ranks=None, verbose=10):
"""
Step 1: Dealing with entries not in db
Step 2: Track lineage for each taxonomy and join with count matrix
Step 3: Group count matrix by eack rank and count the abundance, then left join with phylogeny dataframe on rank
Step 4: Fit in a Numpy 3-d array, then swap axes [rank, taxonomy, sampleid] into [sampleid, taxonomy, rank]
:param tsvs:
:return:
"""
if included_ranks == None:
included_ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus']
# Filter, taxonomies with sk in db are kept.
taxas = pd.Series(count_matrix.index.to_list(), index=count_matrix.index)
#with open(self.get_conf_savepath('taxas.txt'), 'w') as f:
#f.write('\n'.join(taxas))
sks = taxas.apply(lambda x: x.split(';')[0].split('__')[1])
sk_indb = self.db_tool.entries_in_db(sks)
if verbose > 0:
print('There will be {}/{} entries droped cause they are not '
'in NCBI taxanomy database'.format((~sk_indb).sum(), sk_indb.shape[0]))
print(sks[~sk_indb])
# Change index for boolean indeces
sk_indb.index = count_matrix.index
#print(count_matrix.describe(percentiles=[]))
cm_keep = count_matrix[sk_indb].astype(np.float32)
#cm_keep = (cm_keep / cm_keep.sum()).astype(np.float32)
del count_matrix, taxas, sks, sk_indb
gc.collect()
# Extract layers for entries data
if verbose > 0:
print('Extracting lineages for taxonomic entries, this may take a few minutes')
multi_entries = pd.Series(cm_keep.index.to_list(), index=cm_keep.index.to_list())
lineages = self._track_lineages(multi_entries=multi_entries)
# Post-process lineages
add_prefix = lambda x: ('sk' if x.name == 'superkingdom' else x.name[0:1]) + '__' + x
str_cumsum = lambda x: pd.Series([';'.join(x[0:i]) for i in range(1, x.shape[0] + 1)], index=x.index)
lineages = lineages.apply(add_prefix, axis=0).apply(str_cumsum, axis=1)
del multi_entries
gc.collect()
# Fill samples in phylogeny dataframe
if verbose > 0:
print('Filling samples in phylogeny matrix')
sampleids = cm_keep.columns.tolist()
fill_in_phylogeny = lambda x: pd.merge(left=self.phylogeny[[x]].copy(),
right=cm_with_lngs.groupby(by=x, as_index=False).sum(), on=[x], how='left',
suffixes=('_x','_y')).set_index(x)[sampleids].fillna(0)
# Setting genus as index
cm_with_lngs = cm_keep.join(lineages)
cm_with_lngs = cm_with_lngs.groupby(by=included_ranks, sort=False, as_index=False).sum()
del cm_keep
gc.collect()
'''if self.phylogeny is not None:
if verbose > 0:
print('Generating matrix for each rank')
# join by index
matrix_by_rank = OrderedDict( zip(included_ranks, map(fill_in_phylogeny, tqdm(self.phylogeny.columns)) ))
# key -> ranks, index -> taxonomies, column name -> sample ids
print(matrix_by_rank['genus'].describe(percentiles=[]))
return matrix_by_rank
else:
if verbose > 0:
print('No default phylogeny tree provided, '
'use all lineages data involved automatically.')'''
#self._updata_phylo(lineages)
matrix_genus = fill_in_phylogeny('genus')
print(matrix_genus.describe(percentiles=[]))
#print(matrix_genus.sum())
return matrix_genus
def _track_lineages(self, multi_entries):
"""
Already filtered, at least one entry of each taxonomy is (in entries) in db.
Overview: entries name -> entries id -> entries lineage ids -> entries lineage names
step 1: get_ids_from_names
step 2: get_lineages_ids (get_ranks)
step 3: get_names_from_ids
:param entries:
:return:
"""
entries = self._fathest_entry_in_db(multi_entries) # series
#print(entries[21])
taxids = self.db_tool.get_ids_from_names(entries.tolist())
lineages_ids = self.db_tool.get_lineage(taxids) # dataframe, fillna?
lineages_ids.index = entries.index
#names = lineages_ids.apply(self.db_tool.get_names_from_ids, axis=1) # prefix ??????????????
id2name = lambda id: self.db_tool.get_names_from_ids([id])[0] if id != 0 else ''
names = lineages_ids.fillna(0).applymap(int).applymap(id2name)
#names.to_csv(self.get_savepath('lineage_names.tsv', type='tmp'), sep='\t')
# lineages_ids has many many nan values, this need to be fixed.
# considering using element-wise applymap
return names
def _fathest_entry_in_db(self, multi_entries):
"""
already filtered, all entries are contained in db.
:param multi_entries:
:return:
"""
entries_se = multi_entries.str.split(';').apply(lambda x: {i.split('__')[0]: i.split('__')[1] for i in x})
# get tidy data
entries_df = pd.DataFrame(entries_se.tolist(), index=entries_se.index).fillna('')
#entries_df = entries_df.applymap(lambda x: x.replace('_', ' '))
isin_db = entries_df.apply(self.db_tool.entries_in_db, axis=1)
# isin_db.to_csv('isin_db.csv')
isfarthest_indb = pd.DataFrame(isin_db.apply(lambda x: x.index == x[x].index[-1], axis=1).values.tolist(),
index=entries_df.index,
columns=entries_df.columns)
farthest_entries = pd.Series(entries_df[isfarthest_indb].fillna('').\
apply(str_sum, axis=1).values.tolist(), index=entries_df.index)
return farthest_entries
def _updata_phylo(self, lineage_names):
lineage_names = lineage_names[['superkingdom','phylum','class','order','family','genus']]
print('Updating phylo: Just keeping Superkingdom to Genus for phylogeny: {}.'.format(lineage_names.shape))
lineage_names = lineage_names.drop_duplicates(subset=['genus'], ignore_index=True)
print('Updating phylo: After droping duplicates: {}.'.format(lineage_names.shape))
self.phylogeny = lineage_names
class NCBITaxa(object):
def __init__(self, db_file=None, in_memory=True):
if in_memory:
print('Initializing in-memory taxonomy database for ultra-fast querying.')
else:
print('Initializing on-disk taxonomy database, consider using in-memory mode to speed up.')
if db_file == None:
self.db_file = '/root/.etetoolkit/taxa.sqlite'
else:
self.db_file = db_file
self.db = self._get_db(in_memory=in_memory)
def _get_db(self, in_memory=True):
print('db file:', self.db_file)
source = sqlite3.connect(self.db_file)
if in_memory:
dest = sqlite3.connect(':memory:')
source.backup(dest)
return dest
else:
return source
def entries_in_db(self, entries):
joined_names = ','.join(map(lambda x: '"{}"'.format(x), entries))
command1 = 'select spname, taxid FROM species WHERE spname IN ({})'.format(joined_names)
name1 = {name for name, _ in self.db.execute(command1).fetchall()}
missing = set(entries) - name1
if missing:
joined_missing = ','.join(map(lambda x: '"{}"'.format(x), missing))
command2 = 'select spname, taxid from synonym where spname IN ({})'.format(joined_missing)
name2 = {name for name, _ in self.db.execute(command2).fetchall()}
else:
name2 = {}
name = name1.union(name2)
if name == set([]):
in_db = | pd.Series(False, index=entries.index) | pandas.Series |
'''This script contains functions for evaluating models and calculating and visualizing metrics'''
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_validate, cross_val_score, RandomizedSearchCV
from sklearn.metrics import precision_score, recall_score, accuracy_score, roc_auc_score, roc_curve, precision_recall_curve, f1_score, fbeta_score, confusion_matrix, classification_report, make_scorer, auc, log_loss
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier, plot_importance
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from imblearn.under_sampling import RandomUnderSampler
from collections import Counter, OrderedDict
from scipy.stats import randint
import time
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
def eval(model_name, model, X, y):
'''This is a function to compare preliminary models.
Takes in model and its name from a dictionary containing instantiated models and their names as
values and keys, respectively, and entire dataframe, partitions data, oversamples minority class
in training data set, and evaluates metrics'''
# Partition data
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size = 0.2, random_state=33, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size = 0.2, random_state=14, stratify=y_tv)
# Oversample minority class in training data
oversample = RandomOverSampler(random_state=0, sampling_strategy='minority')
X_train_os, y_train_os = oversample.fit_resample(X_train, y_train)
# Train model
model.fit(X_train_os, y_train_os)
# Make predictions
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print(model_name, ':')
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('F-beta score: ', fbeta_score(y_val, y_pred, beta=2))
print('ROC-AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
def model_scores(model, X, y):
'''
Takes in an instantiated model and training data, partitions the training data
into training and validation sets, trains the model on training data, and returns
evaluation metrics
'''
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Train model
model.fit(X_train, y_train)
# Make prediction
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('Fbeta score (beta=2): ', fbeta_score(y_val, y_pred, beta=2))
print('ROC AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
def model_scores_os(model, X, y):
'''
Takes in an instantiated model and training data, partitions the training data
into training and validation sets, oversamples the training data, trains the model
on the oversampled training data, and returns evaluation metrics
'''
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Oversample since classes are imbalanced
oversampler = RandomOverSampler(sampling_strategy='minority', random_state=0)
X_train, y_train = oversampler.fit_resample(X_train, y_train)
# Train model
model.fit(X_train, y_train)
# Make prediction
y_pred = model.predict(X_val)
preds = model.predict_proba(X_val)
# Print scores
print('Accuracy score: ', accuracy_score(y_val, y_pred))
print('Precision score: ', precision_score(y_val, y_pred))
print('Recall score: ', recall_score(y_val, y_pred))
print('F1 score: ', f1_score(y_val, y_pred))
print('Fbeta score (beta=2): ', fbeta_score(y_val, y_pred, beta=2))
print('ROC AUC score: ', roc_auc_score(y_val, preds[:,1]), '\n')
# Plot confusion matrix
def plot_cm(y_test, y_pred):
'''
Takes in target variable test set and set of predictions from a model
and returns confusion matrix
'''
# Set up confusion matrix
confusion = confusion_matrix(y_test, y_pred)
# Plot confusion matrix
plt.figure(dpi=100)
sns.heatmap(confusion, cmap=plt.cm.Blues, annot=True, square=True,
xticklabels=['No Death', 'Death'],
yticklabels=['No Death', 'Death'])
plt.xlabel('Predicted death')
plt.ylabel('Actual death')
plt.title('Confusion Matrix')
plt.show()
# Plot precision-recall curve
def plot_pr_curve(y_test, preds):
'''
Takes in target variable test set and set of predictions from a model
and plots precision-recall curve
'''
# Set up precsion-recall curve
precision, recall, thresholds = precision_recall_curve(y_test, preds[:,1])
# Plot P-R curve
plt.figure(dpi=80, figsize=(5,5))
plt.plot(thresholds, precision[1:], label='precision')
plt.plot(thresholds, recall[1:], label='recall')
plt.legend(loc='lower left')
plt.xlabel('Threshold')
plt.title('Precision and Recall Curves')
plt.show()
# Plot ROC curve and return AUC score
def roc_auc_curve(y_test, preds):
'''
Takes in target variable test set and set of predictions from a model,
plots ROC curve, and prints ROC AUC score
'''
# Set up ROC curve
fpr, tpr, thresholds = roc_curve(y_test, preds[:,1])
# Plot ROC curve
plt.figure(figsize=(5,5))
plt.plot(fpr, tpr,lw=2)
plt.plot([0,1],[0,1],c='violet',ls='--')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show()
# Print ROC AUC score
print("ROC AUC score = ", roc_auc_score(y_test, preds[:,1]))
# Cross-validation with stratified KFold (only for models without oversampling)
def cv(model, X_tv, y_tv):
'''
Takes in instantiated model and non-test data set, performs cross validation using
5-fold stratified splits, and returns dataframe of train and test evaluation metrics
'''
# Define scoring metrics
scoring = {'accuracy': 'accuracy', 'precision': 'precision', 'recall': 'recall', 'f1': 'f1',
'fbeta': make_scorer(fbeta_score, beta=2), 'auc': 'roc_auc'}
# Cross-validation using stratified KFolds
kf = StratifiedKFold(n_splits=5, shuffle=False)
# Store results of cross-validation function dictionary
cv_dict = cross_validate(model, X_tv, y_tv, scoring=scoring,
cv=kf, n_jobs=-1, return_train_score=True)
# Prepare dictionary of metrics for converting into dataframe
cv_dict_2 = {
'test_accuracy': np.mean(cv_dict['test_accuracy']),
'train_accuracy': np.mean(cv_dict['train_accuracy']),
'test_precision': np.mean(cv_dict['test_precision']),
'train_precision': np.mean(cv_dict['train_precision']),
'test_recall': np.mean(cv_dict['test_recall']),
'train_recall': np.mean(cv_dict['train_recall']),
'test_f1': np.mean(cv_dict['train_f1']),
'train_f1': np.mean(cv_dict['test_f1']),
'test_fbeta': np.mean(cv_dict['train_fbeta']),
'train_fbeta': np.mean(cv_dict['train_fbeta']),
'test_auc': np.mean(cv_dict['test_auc']),
'train_auc': np.mean(cv_dict['train_auc'])
}
# Convert to dataframe
cv_df = pd.DataFrame.from_dict(cv_dict_2, orient='index', columns=['mean_score'])
return cv_df
# Adjust threshold
def threshold(model, X_test, t):
'''
Takes in model, val/test data, and a designated threshold value and returns dataframe of
evaluation metrics based on threshold
'''
threshold = t
y_pred = model.predict(X_test)
preds = np.where(model.predict_proba(X_test)[:,1] > threshold, 1, 0)
new_df = pd.DataFrame(data=[accuracy_score(y_test, preds), recall_score(y_test, preds),
precision_score(y_test, preds), f1_score(y_test, preds), roc_auc_score(y_test, preds)],
index=["accuracy", "recall", "precision", "f1", "roc_auc"])
return new_df
# Look at coefficents and intercept of model
def model_coef(model, X, y):
# Partition data for cross-validation
X_tv, X_test, y_tv, y_test = train_test_split(X, y, test_size=0.2, random_state=5, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=17, stratify=y)
# Oversample since classes are imbalanced
oversampler = RandomOverSampler(sampling_strategy='minority', random_state=0)
X_train, y_train = oversampler.fit_resample(X_train, y_train)
# Train model
model.fit(X_train, y_train)
# Get coefficients and intercept and format into dataframe
coef = pd.DataFrame(model.coef_, columns=X_train.columns)
coef.append(pd.Series(model.intercept_), ignore_index=True)
return coef.T
def coef_int(model, feat):
'''
Takes in model and list containing names of features/columns and returns dataframe of
coefficients and intercept for model
'''
coef = pd.DataFrame(model.coef_, columns=feat)
coef.append(pd.Series(model.intercept_), ignore_index=True)
return coef
# Compare sampling methods for a model
def compare_sampling(name, model, X_tv, y_tv):
'''
Takes in model and its name (value and key in dictionary of models to compare, respectively)
and non-test data, splits data into training and validation sets, and trains model on:
1) non-resampled training data while adjusting built-in class weight metric of model,
2) training data minority class oversampled using RandomOverSampler,
3) training data minority class oversampled using SMOTE,
4) training data minority class oversampled using ADASYN,
5) training data majority class undersampled using RandomUnderSampler,
and compares evaluation metrics of these iterations on both training and validation data sets
'''
# Partition data
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size = 0.2, random_state=14)
# Model with balancing class weight via model parameter
if name == 'Random Forest':
model_balanced = RandomForestClassifier(class_weight='balanced')
elif name == 'XGBoost':
model_balanced = XGBClassifier(scale_pos_weight=14)
# Train model
model_balanced.fit(X_train, y_train)
# Make predictions
y_pred_train_balanced = model_balanced.predict(X_train)
y_pred_val_balanced = model_balanced.predict(X_val)
# Store evaluation metrics for train and test sets for balanced model in dictionaries
balanced_train = {
'precision': precision_score(y_train, y_pred_train_balanced),
'recall': recall_score(y_train, y_pred_train_balanced),
'f1': f1_score(y_train, y_pred_train_balanced),
'fbeta': fbeta_score(y_train, y_pred_train_balanced, beta=2)
}
balanced_val = {
'precision': precision_score(y_val, y_pred_val_balanced),
'recall': recall_score(y_val, y_pred_val_balanced),
'f1': f1_score(y_val, y_pred_val_balanced),
'fbeta': fbeta_score(y_val, y_pred_val_balanced, beta=2)
}
# Convert dictionaries to dataframe
balanced_scores_df = pd.DataFrame({'train': pd.Series(balanced_train), 'val': pd.Series(balanced_val)})
print('Balanced:')
print(balanced_scores_df, '\n')
# Models with different sampling methods
samplers = {
'Random oversampler': RandomOverSampler(random_state=0, sampling_strategy='minority'),
'SMOTE': SMOTE(random_state=0, sampling_strategy='minority'),
'ADASYN': ADASYN(random_state=0, sampling_strategy='minority'),
'Random undersampler': RandomUnderSampler(random_state=0, sampling_strategy='majority')
}
# For each sampling method: resample training data, evalute model for each sampling method
for name, sampler in samplers.items():
X_train_rs, y_train_rs = sampler.fit_sample(X_train, y_train)
model.fit(X_train_rs, y_train_rs)
y_pred_train, preds_train = model.predict(X_train), model.predict_proba(X_train)
y_pred_val, preds_val = model.predict(X_val), model.predict_proba(X_val)
train = {
'precision': precision_score(y_train, y_pred_train),
'recall': recall_score(y_train, y_pred_train),
'f1': f1_score(y_train, y_pred_train),
'fbeta': fbeta_score(y_train, y_pred_train, beta=2)
}
val = {
'precision': precision_score(y_val, y_pred_val),
'recall': recall_score(y_val, y_pred_val),
'f1': f1_score(y_val, y_pred_val),
'fbeta': fbeta_score(y_val, y_pred_val, beta=2)
}
scores_df = pd.DataFrame({'train': pd.Series(train), 'val': pd.Series(val)})
print(name, ':')
print(scores_df, '\n')
# Compare random oversampling and random undersampling methods
def compare_sampling2(model_name, model, X_tv, y_tv):
'''
Takes in model and its name (value and key in dictionary of models to compare, respectively)
and non-test data, splits data into training and validation sets, and trains model on:
1) non-resampled training data while adjusting built-in class weight metric of model,
2) training data minority class oversampled using RandomOverSampler,
3) training data majority class undersampled using RandomUnderSampler,
and compares evaluation metrics of these iterations on both training and validation data sets
'''
# Partition data
X_train, X_val, y_train, y_val = train_test_split(X_tv, y_tv, test_size = 0.2, random_state=14)
# Instantiate & train model with balancing class weight via model parameter
if model_name == 'Random Forest':
model_balanced = RandomForestClassifier(class_weight='balanced')
elif model_name == 'XGBoost':
model_balanced = XGBClassifier(scale_pos_weight=9)
model_balanced.fit(X_train, y_train)
# Predictions
y_pred_train_balanced = model_balanced.predict(X_train)
y_pred_val_balanced = model_balanced.predict(X_val)
# Dictionaries for scores for class-balanced iteration
balanced_train = {
'precision': precision_score(y_train, y_pred_train_balanced),
'recall': recall_score(y_train, y_pred_train_balanced),
'f1': f1_score(y_train, y_pred_train_balanced),
'fbeta': fbeta_score(y_train, y_pred_train_balanced, beta=2)
}
balanced_val = {
'precision': precision_score(y_val, y_pred_val_balanced),
'recall': recall_score(y_val, y_pred_val_balanced),
'f1': f1_score(y_val, y_pred_val_balanced),
'fbeta': fbeta_score(y_val, y_pred_val_balanced, beta=2)
}
# Convert dictionary to dataframe
balanced_scores_df = pd.DataFrame({'train': | pd.Series(balanced_train) | pandas.Series |
import warnings
warnings.filterwarnings("ignore")
import logging
import os
from os.path import join
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model, save_model, model_from_json
from keras.utils import multi_gpu_model
from utils import utils
import model_skeleton.featuristic as featuristic
import model_skeleton.malfusion as malfusion
import model_skeleton.echelon as echelon
from keras import optimizers
from trend import activation_trend_identification as ati
import config.settings as cnst
from .train_args import DefaultTrainArguments
from plots.plots import plot_partition_epoch_history
from predict import predict
from predict.predict_args import Predict as pObj, DefaultPredictArguments, QStats
import numpy as np
from sklearn.utils import class_weight
import pandas as pd
from plots.plots import display_probability_chart
from analyzers.collect_exe_files import get_partition_data, partition_pkl_files_by_count, partition_pkl_files_by_size
import gc
from shutil import copyfile
def train(args):
""" Function for training Tier-1 model with whole byte sequence data
Args:
args: An object containing all the required parameters for training
Returns:
history: Returns history object from keras training process
"""
train_steps = len(args.t1_x_train) // args.t1_batch_size
args.t1_train_steps = train_steps - 1 if len(args.t1_x_train) % args.t1_batch_size == 0 else train_steps + 1
if args.t1_x_val is not None:
val_steps = len(args.t1_x_val) // args.t1_batch_size
args.t1_val_steps = val_steps - 1 if len(args.t1_x_val) % args.t1_batch_size == 0 else val_steps + 1
args.t1_ear = EarlyStopping(monitor='acc', patience=3)
args.t1_mcp = ModelCheckpoint(join(args.save_path, args.t1_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
data_gen = utils.direct_data_generator(args.t1_x_train, args.t1_y_train)
history = args.t1_model_base.fit(
data_gen,
class_weight=args.t1_class_weights,
steps_per_epoch=args.t1_train_steps,
epochs=args.t1_epochs,
verbose=args.t1_verbose,
callbacks=[args.t1_ear, args.t1_mcp]
# , validation_data=utils.data_generator(args.t1_x_val, args.t1_y_val, args.t1_max_len, args.t1_batch_size,
# args.t1_shuffle) , validation_steps=val_steps
)
# plot_history(history, cnst.TIER1)
return history
def train_by_blocks(args):
""" Function for training Tier-2 model with top activation blocks data
Args:
args: An object containing all the required parameters for training
Returns:
history: Returns history object from keras training process
"""
train_steps = len(args.t2_x_train) // args.t2_batch_size
args.t2_train_steps = train_steps - 1 if len(args.t2_x_train) % args.t2_batch_size == 0 else train_steps + 1
if args.t2_x_val is not None:
val_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = val_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else val_steps + 1
args.t2_ear = EarlyStopping(monitor='acc', patience=3)
args.t2_mcp = ModelCheckpoint(join(args.save_path, args.t2_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
data_gen = utils.data_generator(args.train_partition, args.t2_x_train, args.t2_y_train, args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
history = args.t2_model_base.fit(
data_gen,
class_weight=args.t2_class_weights,
steps_per_epoch=args.t2_train_steps,
epochs=args.t2_epochs,
verbose=args.t2_verbose,
callbacks=[args.t2_ear, args.t2_mcp]
# , validation_data=utils.data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val
# , args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
# , validation_steps=args.val_steps
)
# plot_history(history, cnst.TIER2)
return history
def train_by_section(args):
''' Obsolete: For block-based implementation'''
train_steps = len(args.t2_x_train)//args.t2_batch_size
args.t2_train_steps = train_steps - 1 if len(args.t2_x_train) % args.t2_batch_size == 0 else train_steps + 1
if args.t2_x_val is not None:
val_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = val_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else val_steps + 1
args.t2_ear = EarlyStopping(monitor='acc', patience=3)
args.t2_mcp = ModelCheckpoint(join(args.save_path, args.t2_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
# Check MAX_LEN modification is needed - based on proportion of section vs whole file size
# args.max_len = cnst.MAX_FILE_SIZE_LIMIT + (cnst.CONV_WINDOW_SIZE * len(args.q_sections))
data_gen = utils.direct_data_generator_by_section(args.q_sections, args.t2_x_train, args.t2_y_train)
history = args.t2_model_base.fit(
data_gen,
class_weight=args.t2_class_weights,
steps_per_epoch=len(args.t2_x_train)//args.t2_batch_size + 1,
epochs=args.t2_epochs,
verbose=args.t2_verbose,
callbacks=[args.t2_ear, args.t2_mcp]
# , validation_data=utils.data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val
# , args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
# , validation_steps=args.val_steps
)
# plot_history(history, cnst.TIER2)
return history
def change_model(model, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = model_from_json(model.to_json())
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def change_hydra(model, ech_model, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = ech_model # model_from_json(model.to_json())
print("Updating Layer weights")
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def get_model1(args):
""" Function to prepare model required for Tier-1's training/prediction.
Args:
args: An object with required parameters/hyper-parameters for loading, configuring and compiling
Returns:
model1: Returns a Tier-1 model
"""
model1 = None
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER1:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER1 - " + args.pretrained_t1_model_name)
model1 = load_model(args.model_path + args.pretrained_t1_model_name, compile=False)
print("\n\n\nChanging model input ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n")
logging.info(str(model1.summary()))
model1 = change_model(model1, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model1.summary()))
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
if cnst.NUM_GPU > 1:
multi_gpu_model1 = multi_gpu_model(model1, gpus=cnst.NUM_GPU)
# multi_gpu_model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return multi_gpu_model1
else:
logging.info("[ CAUTION ] : Resuming with old model")
model1 = load_model(args.model_path + args.t1_model_name, compile=False)
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
if cnst.NUM_GPU > 1:
multi_gpu_model1 = multi_gpu_model(model1, gpus=cnst.NUM_GPU)
# multi_gpu_model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
return multi_gpu_model1
else:
logging.info("[CAUTION]: Proceeding training with custom model skeleton")
if args.byte:
premodel = load_model(args.model_path + args.pretrained_t1_model_name, compile=False)
echmodel = echelon.model(args.t1_max_len, args.t1_win_size)
change_hydra(premodel, echmodel)
elif args.featuristic:
model1 = featuristic.model(args.total_features)
elif args.fusion:
model1 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# param_dict = {'lr': [0.00001, 0.0001, 0.001, 0.1]}
# model_gs = GridSearchCV(model, param_dict, cv=10)
# model1.summary()
return model1
def get_model2(args):
'''Obsolete: For block-based implementation'''
model2 = None
optimizer = optimizers.Adam(lr=0.001)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER2:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER2 - " + args.pretrained_t2_model_name)
model2 = load_model(args.model_path + args.pretrained_t2_model_name, compile=False)
print("\n\n\nChanging model input ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n")
logging.info(str(model2.summary()))
model2 = change_model(model2, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model2.summary()))
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
logging.info("[ CAUTION ] : Resuming with old model")
model2 = load_model(args.model_path + args.t2_model_name, compile=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
# logging.info("*************************** CREATING new model *****************************")
if args.byte:
model2 = echelon.model(args.t2_max_len, args.t2_win_size)
elif args.featuristic:
model2 = featuristic.model(len(args.selected_features))
elif args.fusion:
model2 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# model2.summary()
return model2
def get_block_model2(args):
""" Function to prepare model required for Tier-2's training/prediction - For top activation block implementation.
Model's input shape is set to a reduced value specified in TIER2_NEW_INPUT_SHAPE parameter in settings.
Args:
args: An object with required parameters/hyper-parameters for loading, configuring and compiling
Returns:
model2: Returns a Tier-2 model
"""
model2 = None
optimizer = optimizers.Adam(lr=0.001)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER2:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER2 - " + args.pretrained_t2_model_name)
model2 = load_model(args.model_path + args.pretrained_t2_model_name, compile=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
logging.info("[ CAUTION ] : Resuming with old model")
model2 = load_model(args.model_path + args.t1_model_name, compile=False)
logging.info(str(model2.summary()))
model2 = change_model(model2, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model2.summary()))
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
# logging.info("*************************** CREATING new model *****************************")
if args.byte:
model2 = echelon.model(args.t2_max_len, args.t2_win_size)
elif args.featuristic:
model2 = featuristic.model(len(args.selected_features))
elif args.fusion:
model2 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# model2.summary()
return model2
def train_tier1(args):
# logging.info("************************ TIER 1 TRAINING - STARTED ****************************
# Samples:", len(args.t1_x_train))
if args.tier1:
if args.byte:
return train(args)
# logging.info("************************ TIER 1 TRAINING - ENDED ****************************")
def train_tier2(args):
# logging.info("************************ TIER 2 TRAINING - STARTED ****************************")
if args.tier2:
if args.byte:
return train_by_section(args)
# print("************************ TIER 2 TRAINING - ENDED ****************************")
def evaluate_tier1(args):
""" Function to evaluate the Tier-1 model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with Tier-1 evaluation loss and accuracy
"""
eval_steps = len(args.t1_x_val) // args.t1_batch_size
args.t1_val_steps = eval_steps - 1 if len(args.t1_x_val) % args.t1_batch_size == 0 else eval_steps + 1
history = args.t1_model_base.evaluate_generator(
# utils.train_data_generator(args.val_partition, args.t1_x_val, args.t1_y_val, args.t1_max_len, args.t1_batch_size, args.t1_shuffle),
utils.direct_data_generator(args.t1_x_val, args.t1_y_val),
steps=args.t1_val_steps,
verbose=args.t1_verbose
)
# plot_history(history, cnst.TIER1)
return history
def evaluate_tier2(args):
""" Function to evaluate the Tier-2 model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with Tier-2 evaluation loss and accuracy
"""
eval_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = eval_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else eval_steps + 1
history = args.t2_model_base.evaluate_generator(
# utils.train_data_generator_by_section(args.spartition, args.q_sections, args.t2_x_val, args.t2_y_val, args.t2_max_len, args.t2_batch_size, args.t2_shuffle),
utils.direct_data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val),
steps=args.t2_val_steps,
verbose=args.t2_verbose
)
# plot_history(history, cnst.TIER2)
return history
def evaluate_tier2_block(args):
""" Function to evaluate the Tier-2 block-based model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with block-model evaluation loss and accuracy
"""
eval_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = eval_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else eval_steps + 1
history = args.t2_model_base.evaluate_generator(
utils.data_generator(args.val_partition, args.t2_x_val, args.t2_y_val, args.t2_max_len, args.t2_batch_size, args.t2_shuffle),
steps=args.t2_val_steps,
verbose=args.t2_verbose
)
# plot_history(history, cnst.TIER2)
return history
def init(model_idx, train_partitions, val_partitions, fold_index):
""" Module for Training and Validation
# ##################################################################################################################
# OBJECTIVES:
# 1) Train Tier-1 and select its decision threshold for classification using Training data
# 2) Perform ATI over training data and select influential (Qualified) sections to be used by Tier-2
# 3) Train Tier-2 on selected PE sections' top activation blocks
# 4) Save trained models for Tier-1 and Tier-2
# ##################################################################################################################
Args:
model_idx: Default 0 for byte sequence models. Do not change.
train_partitions: list of partition indexes to be used for Training
val_partitions: list of partition indexes to be used for evaluation and validation
fold_index: current fold of cross-validation
Returns:
None (Resultant data are stored in CSV for further use)
"""
t_args = DefaultTrainArguments()
if cnst.EXECUTION_TYPE[model_idx] == cnst.BYTE: t_args.byte = True
elif cnst.EXECUTION_TYPE[model_idx] == cnst.FEATURISTIC: t_args.featuristic = True
elif cnst.EXECUTION_TYPE[model_idx] == cnst.FUSION: t_args.fusion = True
t_args.t1_model_name = cnst.TIER1_MODELS[model_idx] + "_" + str(fold_index) + ".h5"
t_args.t2_model_name = cnst.TIER2_MODELS[model_idx] + "_" + str(fold_index) + ".h5"
t_args.t1_best_model_name = cnst.TIER1_MODELS[model_idx] + "_" + str(fold_index) + "_best.h5"
t_args.t2_best_model_name = cnst.TIER2_MODELS[model_idx] + "_" + str(fold_index) + "_best.h5"
# logging.info("################################## TRAINING TIER-1 ###########################################")
# partition_tracker_df = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "partition_tracker_"+str(fold_index)+".csv")
if not cnst.SKIP_TIER1_TRAINING:
logging.info("************************ TIER 1 TRAINING - STARTED ****************************")
t_args.t1_model_base = get_model1(t_args)
best_val_loss = float('inf')
best_val_acc = 0
epochs_since_best = 0
mean_trn_loss = []
mean_trn_acc = []
mean_val_loss = []
mean_val_acc = []
cwy = []
for tp_idx in train_partitions:
cwdf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(tp_idx) + ".csv", header=None)
cwy = np.concatenate([cwy, cwdf.iloc[:, 1].values])
t_args.t1_class_weights = class_weight.compute_class_weight('balanced', np.unique(cwy), cwy)
for epoch in range(cnst.EPOCHS): # External Partition Purpose
logging.info("[ PARTITION LEVEL TIER-1 EPOCH : %s ]", epoch+1)
cur_trn_loss = []
cur_trn_acc = []
for tp_idx in train_partitions:
logging.info("Training on partition: %s", tp_idx)
tr_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(tp_idx) + ".csv", header=None)
t_args.t1_x_train, t_args.t1_x_val, t_args.t1_y_train, t_args.t1_y_val = tr_datadf.iloc[:, 0].values, None, tr_datadf.iloc[:, 1].values, None
# t_args.t1_class_weights = class_weight.compute_class_weight('balanced',
# np.unique(t_args.t1_y_train), t_args.t1_y_train) # Class Imbalance Tackling - Setting class weights
t_args.train_partition = get_partition_data(None, None, tp_idx, "t1")
t_history = train_tier1(t_args)
cur_trn_loss.append(t_history.history['loss'][0])
cur_trn_acc.append(t_history.history['accuracy'][0])
del t_args.train_partition
gc.collect()
cnst.USE_PRETRAINED_FOR_TIER1 = False
cur_val_loss = []
cur_val_acc = []
# Evaluating after each epoch for early stopping over validation loss
logging.info("Evaluating on validation data . . .")
for vp_idx in val_partitions:
val_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(vp_idx) + ".csv", header=None)
t_args.t1_x_train, t_args.t1_x_val, t_args.t1_y_train, t_args.t1_y_val = None, val_datadf.iloc[:, 0].values, None, val_datadf.iloc[:, 1].values
t_args.val_partition = get_partition_data(None, None, vp_idx, "t1")
v_history = evaluate_tier1(t_args)
cur_val_loss.append(v_history[0])
cur_val_acc.append(v_history[1])
del t_args.val_partition
gc.collect()
mean_trn_loss.append(np.mean(cur_trn_loss))
mean_trn_acc.append(np.mean(cur_trn_acc))
mean_val_loss.append(np.mean(cur_val_loss))
mean_val_acc.append(np.mean(cur_val_acc))
if mean_val_loss[epoch] < best_val_loss:
best_val_loss = mean_val_loss[epoch]
try:
copyfile(join(t_args.save_path, t_args.t1_model_name), join(t_args.save_path, t_args.t1_best_model_name))
except Exception as e:
logging.exception("Saving EPOCH level best model failed for Tier1")
epochs_since_best = 0
logging.info("Current Epoch Loss: %s\tCurrent Epoch Acc: %s\tUpdating best loss: %s", str(mean_val_loss[epoch]).ljust(25), str(mean_val_acc[epoch]).ljust(25), best_val_loss)
else:
logging.info("Current Epoch Loss: %s\tCurrent Epoch Acc: %s", mean_val_loss[epoch], mean_val_acc[epoch])
epochs_since_best += 1
logging.info('{} epochs passed since best val loss of {}'.format(epochs_since_best, best_val_loss))
if cnst.EARLY_STOPPING_PATIENCE_TIER1 <= epochs_since_best:
logging.info('Triggering early stopping as no improvement found since last {} epochs! Best Loss: {}'.format(epochs_since_best, best_val_loss))
try:
copyfile(join(t_args.save_path, t_args.t1_best_model_name), join(t_args.save_path, t_args.t1_model_name))
except Exception as e:
logging.exception("Retrieving EPOCH level best model failed for Tier1")
break
if epoch + 1 == cnst.EPOCHS:
try:
copyfile(join(t_args.save_path, t_args.t1_best_model_name), join(t_args.save_path, t_args.t1_model_name))
except Exception as e:
logging.exception("Retrieving EPOCH level best model failed for Tier1.")
del t_args.t1_model_base
gc.collect()
plot_partition_epoch_history(mean_trn_acc, mean_val_acc, mean_trn_loss, mean_val_loss, "Tier1_F" + str(fold_index+1))
logging.info("************************ TIER 1 TRAINING - ENDED ****************************")
else:
cnst.USE_PRETRAINED_FOR_TIER1 = False # Use model trained through Echelon
logging.info("SKIPPED: Tier-1 Training process")
if cnst.ONLY_TIER1_TRAINING:
return
# TIER-1 PREDICTION OVER TRAINING DATA [Select THD1]
min_boosting_bound = None
max_thd1 = None
b1val_partition_count = 0
if not cnst.SKIP_TIER1_VALIDATION:
logging.info("*** Prediction over Validation data in TIER-1 to select THD1 and Boosting Bound")
pd.DataFrame().to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_val_" + str(fold_index) + "_pkl.csv", header=None, index=None)
for vp_idx in val_partitions:
val_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p"+str(vp_idx)+".csv", header=None)
predict_t1_val_data = pObj(cnst.TIER1, cnst.TIER1_TARGET_FPR, val_datadf.iloc[:, 0].values, val_datadf.iloc[:, 1].values)
predict_t1_val_data.partition = get_partition_data(None, None, vp_idx, "t1")
predict_t1_val_data = predict.predict_tier1(model_idx, predict_t1_val_data, fold_index)
predict_t1_val_data = predict.select_thd_get_metrics_bfn_mfp(cnst.TIER1, predict_t1_val_data)
min_boosting_bound = predict_t1_val_data.boosting_upper_bound if min_boosting_bound is None or predict_t1_val_data.boosting_upper_bound < min_boosting_bound else min_boosting_bound
max_thd1 = predict_t1_val_data.thd if max_thd1 is None or predict_t1_val_data.thd > max_thd1 else max_thd1
del predict_t1_val_data.partition # Release Memory
gc.collect()
val_b1datadf = pd.concat([pd.DataFrame(predict_t1_val_data.xB1), pd.DataFrame(predict_t1_val_data.yB1)], axis=1)
val_b1datadf.to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_val_"+str(fold_index)+"_pkl.csv", header=None, index=None, mode='a')
val_b1datadf = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_val_"+str(fold_index)+"_pkl.csv", header=None)
b1val_partition_count = partition_pkl_files_by_count("b1_val", fold_index, val_b1datadf.iloc[:, 0], val_b1datadf.iloc[:, 1]) if cnst.PARTITION_BY_COUNT else partition_pkl_files_by_size("b1_val", fold_index, val_b1datadf.iloc[:, 0], val_b1datadf.iloc[:, 1])
pd.DataFrame([{"b1_train": None, "b1_val": b1val_partition_count, "b1_test": None}]).to_csv(os.path.join(cnst.DATA_SOURCE_PATH, "b1_partition_tracker_" + str(fold_index) + ".csv"), index=False)
pd.DataFrame([{"thd1": max_thd1, "thd2": None, "boosting_bound": min_boosting_bound}]).to_csv(os.path.join(cnst.PROJECT_BASE_PATH + cnst.ESC + "out" + cnst.ESC + "result" + cnst.ESC, "training_outcomes_" + str(fold_index) + ".csv"), index=False)
else:
logging.info("SKIPPED: Prediction over Validation data in TIER-1 to select THD1 and Boosting Bound")
tier1_val_outcomes = pd.read_csv(os.path.join(cnst.PROJECT_BASE_PATH + cnst.ESC + "out" + cnst.ESC + "result" + cnst.ESC, "training_outcomes_" + str(fold_index) + ".csv"))
max_val_thd1 = tier1_val_outcomes["thd1"][0]
min_val_boosting_bound = tier1_val_outcomes["boosting_bound"][0]
if not cnst.SKIP_TIER1_TRAINING_PRED:
logging.info("*** Prediction over Training data in TIER-1 to generate B1 data for TIER-2 Training")
| pd.DataFrame() | pandas.DataFrame |
import shutil
from typing import Dict
from functools import reduce
from pathlib import Path
from operator import mul
import tensorflow as tf
import pandas as pd
import numpy as np
import deprecation
from termcolor import colored
from tensorflow.contrib.training import checkpoints_iterator
from common.utils import get_logger
from common.utils import wait
from PIL import Image
import const
def get_variables_to_train(trainable_scopes, logger):
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if trainable_scopes is None or trainable_scopes == "":
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in trainable_scopes.split(",")]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
for var in variables_to_train:
logger.info("vars to train > {}".format(var.name))
return variables_to_train
def show_models(logger):
trainable_variables = set(tf.contrib.framework.get_variables(collection=tf.GraphKeys.TRAINABLE_VARIABLES))
all_variables = tf.contrib.framework.get_variables()
trainable_vars = tf.trainable_variables()
total_params = 0
logger.info(colored(f">> Start of shoing all variables", "cyan", attrs=["bold"]))
for v in all_variables:
is_trainable = v in trainable_variables
count_params = reduce(mul, v.get_shape().as_list(), 1)
total_params += count_params
color = "cyan" if is_trainable else "green"
logger.info(colored((
f">> {v.name} {v.dtype} : {v.get_shape().as_list()}, {count_params} ... {total_params} "
f"(is_trainable: {is_trainable})"
), color))
logger.info(colored(
f">> End of showing all variables // Number of variables: {len(all_variables)}, "
f"Number of trainable variables : {len(trainable_vars)}, "
f"Total prod + sum of shape: {total_params}",
"cyan", attrs=["bold"]))
return total_params
def ckpt_iterator(checkpoint_dir, min_interval_secs=0, timeout=None, timeout_fn=None, logger=None):
for ckpt_path in checkpoints_iterator(checkpoint_dir, min_interval_secs, timeout, timeout_fn):
if Path(ckpt_path).suffix == const.CKPT_EXTENSION:
if logger is not None:
logger.info(f"[skip] {ckpt_path}")
else:
yield ckpt_path
class BestKeeper(object):
def __init__(
self,
metric_with_modes,
dataset_name,
directory,
logger=None,
epsilon=0.00005,
score_file="scores.tsv",
metric_best: Dict={},
):
"""Keep best model's checkpoint by each datasets & metrics
Args:
metric_with_modes: Dict, metric_name: mode
if mode is 'min', then it means that minimum value is best, for example loss(MSE, MAE)
if mode is 'max', then it means that maximum value is best, for example Accuracy, Precision, Recall
dataset_name: str, dataset name on which metric be will be calculated
directory: directory path for saving best model
epsilon: float, threshold for measuring the new optimum, to only focus on significant changes.
Because sometimes early-stopping gives better generalization results
"""
if logger is not None:
self.log = logger
else:
self.log = get_logger("BestKeeper")
self.score_file = score_file
self.metric_best = metric_best
self.log.info(colored(f"Initialize BestKeeper: Monitor {dataset_name} & Save to {directory}",
"yellow", attrs=["underline"]))
self.log.info(f"{metric_with_modes}")
self.x_better_than_y = {}
self.directory = Path(directory)
self.output_temp_dir = self.directory / f"{dataset_name}_best_keeper_temp"
for metric_name, mode in metric_with_modes.items():
if mode == "min":
self.metric_best[metric_name] = self.load_metric_from_scores_tsv(
directory / dataset_name / metric_name / score_file,
metric_name,
np.inf,
)
self.x_better_than_y[metric_name] = lambda x, y: np.less(x, y - epsilon)
elif mode == "max":
self.metric_best[metric_name] = self.load_metric_from_scores_tsv(
directory / dataset_name / metric_name / score_file,
metric_name,
-np.inf,
)
self.x_better_than_y[metric_name] = lambda x, y: np.greater(x, y + epsilon)
else:
raise ValueError(f"Unsupported mode : {mode}")
def load_metric_from_scores_tsv(
self,
full_path: Path,
metric_name: str,
default_value: float,
) -> float:
def parse_scores(s: str):
if len(s) > 0:
return float(s)
else:
return default_value
if full_path.exists():
with open(full_path, "r") as f:
header = f.readline().strip().split("\t")
values = list(map(parse_scores, f.readline().strip().split("\t")))
metric_index = header.index(metric_name)
return values[metric_index]
else:
return default_value
def monitor(self, dataset_name, eval_scores):
metrics_keep = {}
is_keep = False
for metric_name, score in self.metric_best.items():
score = eval_scores[metric_name]
if self.x_better_than_y[metric_name](score, self.metric_best[metric_name]):
old_score = self.metric_best[metric_name]
self.metric_best[metric_name] = score
metrics_keep[metric_name] = True
is_keep = True
self.log.info(colored("[KeepBest] {} {:.6f} -> {:.6f}, so keep it!".format(
metric_name, old_score, score), "blue", attrs=["underline"]))
else:
metrics_keep[metric_name] = False
return is_keep, metrics_keep
def save_best(self, dataset_name, metrics_keep, ckpt_glob):
for metric_name, is_keep in metrics_keep.items():
if is_keep:
keep_path = self.directory / Path(dataset_name) / Path(metric_name)
self.keep_checkpoint(keep_path, ckpt_glob)
self.keep_converted_files(keep_path)
def save_scores(self, dataset_name, metrics_keep, eval_scores, meta_info=None):
eval_scores_with_meta = eval_scores.copy()
if meta_info is not None:
eval_scores_with_meta.update(meta_info)
for metric_name, is_keep in metrics_keep.items():
if is_keep:
keep_path = self.directory / Path(dataset_name) / Path(metric_name)
if not keep_path.exists():
keep_path.mkdir(parents=True)
df = pd.DataFrame( | pd.Series(eval_scores_with_meta) | pandas.Series |
# -*- coding: utf-8 -*-
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
import os
import pytest
import pandas as pd
from nlp.spacy_tokenizer import MultilingualTokenizer
def test_tokenize_df_english():
input_df = pd.DataFrame({"input_text": ["I hope nothing. I fear nothing. I am free. 💩 😂 #OMG"]})
tokenizer = MultilingualTokenizer()
output_df = tokenizer.tokenize_df(df=input_df, text_column="input_text", language="en")
tokenized_document = output_df[tokenizer.tokenized_column][0]
assert len(tokenized_document) == 15
def test_tokenize_df_japanese():
input_df = | pd.DataFrame({"input_text": ["期一会。 異体同心。 そうです。"]}) | pandas.DataFrame |
import json
import os
import re
from shutil import copyfile
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import PIL
from PIL import Image
import math
import torch
import torchvision
import torchvision.transforms as transforms
df_labels = pd.read_csv('stage_2_labels_processed.csv')
df_splits = pd.read_csv('splits_with_migration_index.csv')
image_name = df_labels[['filename']]
image_name_split = df_splits[['filename']]
image_split = df_splits[['split']]
loader_train, loader_val, loader_test = pd.DataFrame({}),pd.DataFrame({}), | pd.DataFrame({}) | pandas.DataFrame |
class Preprocessing:
#Assumption 1 - Data Columns For Train & Test Will Be Same
#Assumption 2 - Ordinal & Bit Switches Will Not Be Pushed In Nominal Function
#Assumption 3 - Train Categorical Will Be SuperSet & Test Will Be SubSet, Else Model To Be ReCreated
def LoadData(self, FileName, HeaderMissing="No"):
# Supports excel,csv,tsv,xml,json,orc,parquet,avro
import pandas as pd
FileType = FileName.split(".")
FileType = FileType[len(FileType)-1].lower()
if FileType == 'xls':
if HeaderMissing =="Yes":
return pd.read_excel(FileName, header=None)
else:
return pd.read_excel(FileName)
if FileType == 'xlsx':
if HeaderMissing =="Yes":
return pd.read_excel(FileName, header=None)
else:
return pd.read_excel(FileName)
if FileType == 'csv':
if HeaderMissing =="Yes":
return pd.read_csv(FileName, header=None)
else:
return pd.read_csv(FileName)
if FileType == 'tsv':
if HeaderMissing =="Yes":
return pd.read_csv(FileName, header=None, sep='\t')
else:
return | pd.read_csv(FileName, sep='\t') | pandas.read_csv |
"""
This script runs time-series forecasting via ARIMA. It contains all the methods necessary
to simulate a time series forecasting task. Below simulation uses daily minimum temp. and tries
to predict min. temp. for given date.
Steps to follow:
1. Pre-processing raw data. `preprocess_raw_data()`
2. Splitting data into two as train and test. `split_data(2)`
3. Fit model to the train dataset and save model object. `fit_and_save_model()`
4. Make prediction for test datasets. `predict_test_wt_arima()`
5. Measure the accuracy of predictions for test period. Afterwards save it
to the local. `measure_accuracy()`
6. Use forecast function to have a point estimate for a given date.
`forecast_wt_arima_for_date(input_date)`
What is ARIMA?
ARIMA is the most common method used in time series forecasting. It is an acronym for
AutoregRessive Integrated Moving Average. ARIMA is a model that can be fitted to time series data
in order to better understand or predict future points in the series.
Details of the dataset:
This dataset describes the minimum daily temperatures over 10 years (1981-1990)
in the city Melbourne, Australia.
The units are in degrees Celsius and there are 3650 observations.
The source of the data is credited as the Australian Bureau of Meteorology
"""
import logging
import math
import os
import pandas as pd
import pickle
import sqlite3
from pmdarima import auto_arima
from datetime import date
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def run_training():
#TODO
preprocess_raw_data()
split_data(2)
fit_and_save_model()
predict_test_wt_arima()
measure_accuracy()
def run_prediction():
#TODO
forecast_wt_arima_for_date(str(date.today()))
def read_data(df_phase):
"""
This function reads necessary data from local for the steps of the simulation.
:param df_phase: Read data for which step of the simulation.
Options: ['raw_data', 'processed', 'train_model', 'test_model', 'train_predicted',
'test_predicted']
:return: DataFrame read from local.
"""
if df_phase == 'raw_data':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'raw_data', 'daily_minimum_temp.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'processed':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'processed_df.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'train_model':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'train_df.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'test_model':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'test_df.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
elif df_phase == 'test_predicted':
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'predicted_test.csv')
df = pd.read_csv(data_path, error_bad_lines=False)
return df
def preprocess_raw_data():
"""
Reads raw data from local and makes pre-processing necessary to use dataset with ARIMA.
Function assumes that the date column is named as 'Date'. It saves prep-processed dataset
the local.
"""
raw_df = read_data('raw_data')
raw_df['Date'] = list(map(lambda x: pd.to_datetime(x), raw_df['Date']))
raw_df = raw_df.sort_values('Date')
procesed_df = raw_df.rename(index=str,
columns={'Daily minimum temperatures in Melbourne, '
'Australia, 1981-1990': 'y'})
for sub in procesed_df['y']:
if '?' in sub:
procesed_df.loc[procesed_df['y'] == sub, 'y'] = sub.split('?')[1]
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'processed_df.csv')
os.makedirs(os.path.dirname(data_path), exist_ok=True)
procesed_df.to_csv(path_or_buf=data_path, index=False, header=True)
def split_data(n_weeks_to_test=2):
"""
Reads preprocessed data from local and splits it to test/train and saves it to
local. test_df.csv and train_df.csv can be found under `data/interim` path.
:param n_weeks_to_test: Number of weeks for the test data. Default is 2.
"""
preprocessed_data = read_data('processed')
n_days_for_test = n_weeks_to_test * 7
test_df = preprocessed_data[-n_days_for_test:]
train_df = preprocessed_data[:-n_days_for_test]
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join('data', 'interim')
os.makedirs(os.path.dirname(data_path), exist_ok=True)
combined_path_test = os.path.join(repo_path, data_path, 'test_df.csv')
combined_path_train = os.path.join(repo_path, data_path, 'train_df.csv')
train_df.to_csv(path_or_buf=combined_path_train, index=False, header=True)
test_df.to_csv(path_or_buf=combined_path_test, index=False, header=True)
def fit_and_save_model():
"""
Runs Prophet for the train dataframe. It reads data from local and saves the model
object to the local. Model can be found under `data/model/arima.pkl`
"""
train_df = read_data('train_model')
train_df['Date'] = list(map(lambda x: pd.to_datetime(x), train_df['Date']))
train_df = train_df.set_index('Date')
model = auto_arima(train_df, start_p=1, start_q=1,
test='adf',
max_p=1, max_q=1, m=12,
start_P=0, seasonal=True,
d=None, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
model_path = os.path.join(repo_path, 'data', 'model', 'arima.pkl')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
with open(model_path, "wb") as f:
pickle.dump(model, f)
def predict_test_wt_arima():
"""
Reads test dataframe and model object from local and makes prediction.
Data with predicted values for test dataframe will be saved to local.
"""
test_df = read_data('test_model')
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
model_path = os.path.join(repo_path, 'data', 'model', 'arima.pkl')
with open(model_path, 'rb') as f:
model = pickle.load(f)
fitted, confint = model.predict(n_periods=len(test_df), return_conf_int=True)
predicted_test = pd.merge(
pd.DataFrame(fitted), pd.DataFrame(confint), right_index=True, left_index=True)
predicted_test = predicted_test.rename(index=str,
columns={'0_x': 'yhat',
'0_y': 'yhat_lower',
1: 'yhat_upper'})
data_path = os.path.join(repo_path, 'data', 'interim')
combined_path_test = os.path.join(data_path, 'predicted_test.csv')
predicted_test.to_csv(path_or_buf=combined_path_test, index=False, header=True)
def calculate_mape(y, yhat):
"""
Calculates Mean Average Percentage Error.
:param y: Actual values as series
:param yhat: Predicted values as series
:return: MAPE as percentage
"""
y = y.replace(0, np.nan)
error_daily = y - yhat
abs_daily_error = list(map(abs, error_daily))
relative_abs_daily_error = abs_daily_error / y
mape = (np.nansum(relative_abs_daily_error) / np.sum(~np.isnan(y)))*100
return mape
def calculate_rmse(y, yhat):
"""
Calculates Root Mean Square Error
:param y: Actual values as series
:param yhat: Predicted values as series
:return: RMSE value
"""
error_sqr = (y - yhat)**2
error_sqr_rooted = list(map(lambda x: math.sqrt(x), error_sqr))
rmse = sum(error_sqr_rooted) / len(error_sqr_rooted)
return rmse
def measure_accuracy():
"""
Uses the above defined accuracy metrics and calculates accuracy for both test series in
terms of MAPE and RMSE. Saves those results to local as a csv file.
:return: A dictionary with accuracy metrics for test dataset.
"""
test_df = read_data('test_model')
predicted_test = read_data('test_predicted')
mape_test = calculate_mape(test_df['y'], predicted_test['yhat'])
rmse_test = calculate_rmse(test_df['y'], predicted_test['yhat'])
days_in_test = len(test_df)
accuracy_dict = {'mape_test': [mape_test],
'rmse_test': [rmse_test],
'days_in_test': [days_in_test]}
acc_df = | pd.DataFrame(accuracy_dict) | pandas.DataFrame |
from typing import List, Tuple
import pandas as pd
from datetime import datetime
from os import makedirs
import json
import networkx as nx
from ..data_structures import Case, Cluster
from ..visualization import save_graphviz
class Metrics:
"""
Controls the computation of metrics during the stream processing
and writes the results into files periodically (check points)
"""
case_columns: "list[str]"
cluster_columns: "list[str]"
additional_attributes: "list[str]"
def __init__(self, file_name: str, additional_attributes: "list[str]" = []):
"""
Creates the paths for the outputs and initializes the metrics attributes
Parameters
--------------------------------------
file_name: str
Process name, used for the path creation
"""
self.case_metrics = []
self.cluster_metrics = []
self.file_name = file_name
self.additional_attributes = additional_attributes
self.path_to_pmg_metrics = f"output/metrics/{file_name}_process_model_graphs"
self.path_to_pmg_vis = f"output/visualization/{file_name}_process_model_graphs"
self.path_to_drifts = "output/visualization/drifts"
self.path_to_case_metrics = "output/metrics/case_metrics"
self.path_to_cluster_metrics = "output/metrics/cluster_metrics"
try:
makedirs(self.path_to_pmg_metrics, exist_ok=True)
makedirs(self.path_to_pmg_vis, exist_ok=True)
makedirs(self.path_to_drifts, exist_ok=True)
makedirs(self.path_to_case_metrics, exist_ok=True)
makedirs(self.path_to_cluster_metrics, exist_ok=True)
self.case_columns, self.cluster_columns = self.generate_column_names()
| pd.DataFrame(columns=self.case_columns) | pandas.DataFrame |
import json
import pathlib
import pickle
import shutil
from collections import defaultdict
from multiprocessing.pool import Pool
from typing import Dict, List, Optional, Set, Tuple
import click
import pandas as pd
import requests
import zstandard
from tqdm import trange
from tqdm.auto import tqdm
from src.helpers import constants
Pairs = List[Tuple[int, int]]
COLUMNS = ["score", "title", "subreddit", "link", "timestamp"]
COLUMN_TYPES = ["int", "str", "str", "str", "int"]
def _download_url(stem: str) -> str:
return f"https://files.pushshift.io/reddit/submissions/{stem}.zst"
def _zst_path(stem: str) -> pathlib.Path:
return constants.RAW_DIR / f"{stem}.zst"
def _txt_path(stem: str) -> pathlib.Path:
return constants.RAW_DIR / f"{stem}.txt"
def _posts_path(subreddit: str) -> pathlib.Path:
return constants.SUBREDDITS_DIR / f"{subreddit}.posts"
def _pairs_path(subreddit: str) -> pathlib.Path:
return constants.SUBREDDITS_DIR / f"{subreddit}.pairs"
def _maybe_build_dirs():
constants.DATA_DIR.mkdir(parents=True, exist_ok=True)
constants.SUBREDDITS_DIR.mkdir(parents=True, exist_ok=True)
constants.RAW_DIR.mkdir(parents=True, exist_ok=True)
constants.MODELS_DIR.mkdir(parents=True, exist_ok=True)
constants.FIGURES_DIR.mkdir(parents=True, exist_ok=True)
constants.TABLES_DIR.mkdir(parents=True, exist_ok=True)
def _download_zst_file(stem: str) -> None:
print(f"Downloading data for {stem}")
url, zst_path = _download_url(stem), _zst_path(stem)
with requests.get(url, stream=True) as r:
assert r.ok, f"Error downloading file from {url}."
with zst_path.open("wb") as f:
shutil.copyfileobj(r.raw, f)
def _decompress_txt_file(stem: str):
print(f"Decompressing data for {stem}")
zst_path, txt_path = _zst_path(stem), _txt_path(stem)
with zst_path.open("rb") as ifh, txt_path.open("wb") as ofh:
zstandard.ZstdDecompressor(max_window_size=2 ** 31).copy_stream(ifh, ofh)
zst_path.unlink()
def _maybe_download_dataset() -> None:
stems = [stem for stem in constants.DATASET_PATHS if not (_txt_path(stem)).exists()]
if not stems:
return
with Pool(len(stems)) as pool:
pool.map(_download_zst_file, stems)
for stem in stems:
_decompress_txt_file(stem)
def _gather_stem_subreddit_posts(subreddits: Set[str], stem: str) -> Dict[str, list]:
stem_subreddit_posts = defaultdict(list)
for line in _txt_path(stem).open("r"):
row = json.loads(line)
if "subreddit" in row and row["subreddit"] in subreddits:
if row["stickied"]:
continue # artificially inflated
if row["score"] < 2:
continue # Match cats and captions
stem_subreddit_posts[row["subreddit"]].append(
[
row["score"],
row["title"],
row["subreddit"],
row["permalink"],
row["created_utc"],
]
)
return stem_subreddit_posts
def _gather_subreddit_posts(subreddits: List[str]) -> Dict[str, pd.DataFrame]:
"""Preprocess a subreddit, downloading the dataset if necessary."""
subreddit_posts = defaultdict(list)
print(f"Preprocessing subreddits {subreddits}")
with Pool(len(constants.DATASET_PATHS)) as pool:
for stem_subreddit_posts in pool.starmap(
_gather_stem_subreddit_posts,
[(subreddits, stem) for stem in constants.DATASET_PATHS],
):
for subreddit in subreddits:
subreddit_posts[subreddit].extend(stem_subreddit_posts[subreddit])
subreddit_dfs = {}
for subreddit in subreddits:
print(f"Finalizing subreddit {subreddit}")
subreddit_dfs[subreddit] = (
pd.DataFrame(subreddit_posts[subreddit], columns=COLUMNS)
.astype(dict(zip(COLUMNS, COLUMN_TYPES)))
.sort_values(by="timestamp")
)
subreddit_dfs[subreddit].to_csv(_posts_path(subreddit))
return subreddit_dfs
def _gather_single_subreddit_pairs(subreddit: str, posts: pd.DataFrame):
pairs: Pairs = []
matched = set()
N = len(posts)
for i in trange(N, desc=f"Pairing r/{subreddit} posts"):
if i in matched:
continue # skip already paired i's
i_score = posts.iloc[i]["score"]
i_ts = posts.iloc[i]["timestamp"]
for j in range(i + 1, N):
if posts.iloc[j]["timestamp"] - i_ts > constants.PAIRING_INTERVAL_RANGE_S:
break # break if window is too large
if j in matched:
continue # skip already paired j's
j_score = posts.iloc[j]["score"]
if i_score <= 2 * j_score and j_score <= 2 * i_score:
continue # one must be at least double the other
if abs(i_score - j_score) < 20:
continue # difference must be at least 20
pairs.append((i, j))
matched |= {i, j} # add to matched
break
with _pairs_path(subreddit).open("wb") as f:
pickle.dump(pairs, f)
return pairs
def _gather_subreddit_pairs(subs_to_posts: Dict[str, pd.DataFrame]) -> Dict[str, Pairs]:
return {
subreddit: _gather_single_subreddit_pairs(subreddit, posts)
for subreddit, posts in subs_to_posts.items()
}
@click.command(short_help="Preprocess a set of subreddits for future usage.")
@click.argument("subreddits", nargs=-1)
def preprocess(
subreddits: List[str],
) -> Tuple[Dict[str, pd.DataFrame], Dict[str, Pairs]]:
subreddits = sorted(
sub
for sub in (subreddits or constants.DEFAULT_SUBREDDITS)
if not (_posts_path(sub).exists() and _pairs_path(sub).exists())
)
_maybe_build_dirs()
_maybe_download_dataset()
posts = _gather_subreddit_posts(subreddits)
pairs = _gather_subreddit_pairs(posts)
return posts, pairs
def verify(subreddit: str):
"""Verify that the given subreddit has been preprocessed."""
assert (
_posts_path(subreddit).exists() and _pairs_path(subreddit).exists()
), f"{subreddit} has not been preprocessed; run `python -m src.preprocess {subreddit}`"
def load(subreddit: str) -> Tuple[pd.DataFrame, Pairs]:
verify(subreddit)
with _posts_path(subreddit).open("r") as f:
posts = | pd.read_csv(f) | pandas.read_csv |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Dataloader for the Argoverse 2 (AV2) sensor dataset."""
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from functools import cached_property
from pathlib import Path
from typing import Dict, Final, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from rich.progress import track
from av2.datasets.sensor.constants import RingCameras, StereoCameras
from av2.datasets.sensor.utils import convert_path_to_named_record
from av2.geometry.camera.pinhole_camera import PinholeCamera
from av2.structures.cuboid import CuboidList
from av2.structures.sweep import Sweep
from av2.structures.timestamped_image import TimestampedImage
from av2.utils.io import TimestampedCitySE3EgoPoses, read_city_SE3_ego, read_feather, read_img
from av2.utils.metric_time import TimeUnit, to_metric_time
logger = logging.Logger(__name__)
LIDAR_PATTERN: Final[str] = "*/sensors/lidar/*.feather"
CAMERA_PATTERN: Final[str] = "*/sensors/cameras/*/*.jpg"
Millisecond = TimeUnit.Millisecond
Nanosecond = TimeUnit.Nanosecond
Second = TimeUnit.Second
# for both ring cameras, and stereo cameras.
CAM_FPS: Final[int] = 20
LIDAR_FRAME_RATE_HZ: Final[int] = 10
# constants defined in milliseconds
# below evaluates to 50 ms
CAM_SHUTTER_INTERVAL_MS: Final[float] = to_metric_time(ts=1 / CAM_FPS, src=Second, dst=Millisecond)
# below evaluates to 100 ms
LIDAR_SWEEP_INTERVAL_MS: Final[float] = to_metric_time(ts=1 / LIDAR_FRAME_RATE_HZ, src=Second, dst=Millisecond)
ALLOWED_TIMESTAMP_BUFFER_MS: Final[int] = 2 # allow 2 ms of buffer
LIDAR_SWEEP_INTERVAL_W_BUFFER_MS: Final[float] = LIDAR_SWEEP_INTERVAL_MS + ALLOWED_TIMESTAMP_BUFFER_MS
LIDAR_SWEEP_INTERVAL_W_BUFFER_NS: Final[float] = to_metric_time(
ts=LIDAR_SWEEP_INTERVAL_W_BUFFER_MS, src=Millisecond, dst=Nanosecond
)
@dataclass
class SynchronizedSensorData:
"""Represents information associated with a single sweep.
Enables motion compensation between the sweep and associated images.
Args:
sweep: lidar sweep.
timestamp_city_SE3_ego_dict: mapping from vehicle timestamp to the egovehicle's pose in the city frame.
log_id: unique identifier for the AV2 vehicle log.
sweep_number: index of the sweep in [0, N-1], of all N sweeps in the log.
num_sweeps_in_log: number of sweeps in the log.
annotations: cuboids that have been annotated within the sweep, or None.
synchronized_imagery: mapping from camera name to timestamped imagery, or None.
"""
sweep: Sweep
timestamp_city_SE3_ego_dict: TimestampedCitySE3EgoPoses
log_id: str
sweep_number: int
num_sweeps_in_log: int
annotations: Optional[CuboidList] = None
synchronized_imagery: Optional[Dict[str, TimestampedImage]] = None
@dataclass
class SensorDataloader:
"""
Sensor dataloader for the Argoverse 2 sensor dataset.
NOTE: We build a cache of sensor records and synchronization information to reduce I/O overhead.
Args:
sensor_dataset_dir: Path to the sensor dataset directory.
with_annotations: Flag to return annotations in the __getitem__ method.
with_cams: Flag to load and return synchronized imagery in the __getitem__ method.
with_cache: Flag to enable file directory caching.
matching_criterion: either "nearest" or "forward".
Returns:
AV2 Sensor dataset.
"""
sensor_dataset_dir: Path
with_annotations: bool = True
with_cache: bool = True
cam_names: Tuple[Union[RingCameras, StereoCameras], ...] = tuple(RingCameras) + tuple(StereoCameras)
matching_criterion = "nearest"
sensor_records: pd.DataFrame = field(init=False)
# Initialize synchronized metadata variable.
# This is only populated when self.use_imagery is set.
sync_records: Optional[pd.DataFrame] = None
def __post_init__(self) -> None:
"""Index the dataset for fast sensor data lookup.
Synchronization database and sensor records are separate tables. Sensor records are an enumeration of
the records. The synchronization database is a hierarchichal index (Pandas MultiIndex) that functions
as a lookup table with correspondences between nearest images.
Given reference LiDAR timestamp -> obtain 7 closest ring camera + 2 stereo camera timestamps.
First level: Log id (1000 uuids)
Second level: Sensor name (lidar, ring_front_center, ring_front_left, ..., stereo_front_right).
Third level: Nanosecond timestamp (64-bit integer corresponding to vehicle time when the data was collected).
SENSOR RECORDS:
log_id sensor_name timestamp_ns
0c6e62d7-bdfa-3061-8d3d-03b13aa21f68 lidar 315971436059707000
lidar 315971436159903000
lidar 315971436260099000
lidar 315971436359632000
lidar 315971436459828000
... ...
ff0dbfc5-8a7b-3a6e-8936-e5e812e45408 stereo_front_right 315972918949927214
stereo_front_right 315972918999927217
stereo_front_right 315972919049927212
stereo_front_right 315972919099927219
stereo_front_right 315972919149927218
SYNCHRONIZATION RECORDS:
ring_front_center stereo_front_right
log_id sensor_name timestamp_ns
0c6e62d7-bdfa-3061-8d3d-03b13aa21f68 lidar 315971436059707000 315971436049927217 ... 315971436049927215
315971436159903000 315971436149927219 ... 315971436149927217
315971436260099000 315971436249927221 ... 315971436249927219
315971436359632000 315971436349927219 ... 315971436349927221
315971436459828000 315971436449927218 ... 315971436449927207
... ... ... ...
ff0dbfc5-8a7b-3a6e-8936-e5e812e45408 lidar 315972918660124000 315972918649927220 ... 315972918649927214
315972918759657000 315972918749927214 ... 315972918749927212
315972918859853000 315972918849927218 ... 315972918849927213
315972918960050000 315972918949927220 ... 315972918949927214
315972919060249000 315972919049927214 ... 315972919049927212
"""
# Load log_id, sensor_type, and timestamp_ns information.
self.sensor_records = self._load_sensor_records()
# Populate synchronization database.
if self.cam_names:
sync_records_path = self.sensor_dataset_dir / "._sync_records"
# If caching is enabled AND the path exists, then load from the cache file.
if self.with_cache and sync_records_path.exists():
self.sync_records = read_feather(sync_records_path)
else:
self.sync_records = self._build_sync_records()
# If caching is enabled and we haven't created the cache, then save to disk.
if self.with_cache and not sync_records_path.exists():
self.sync_records.to_feather(str(sync_records_path))
# Finally, create a MultiIndex set the sync records index and sort it.
self.sync_records = self.sync_records.set_index(keys=["log_id", "sensor_name", "timestamp_ns"]).sort_index()
@cached_property
def num_logs(self) -> int:
"""Return the number of unique logs."""
return len(self.sensor_records.index.unique("log_id"))
@cached_property
def num_sweeps(self) -> int:
"""Return the number of unique lidar sweeps."""
return int(self.sensor_counts["lidar"])
@cached_property
def sensor_counts(self) -> pd.Series:
"""Return the number of records for each sensor."""
sensor_counts: pd.Series = self.sensor_records.index.get_level_values("sensor_name").value_counts()
return sensor_counts
@property
def num_sensors(self) -> int:
"""Return the number of sensors present throughout the dataset."""
return len(self.sensor_counts)
def _load_sensor_records(self) -> pd.DataFrame:
"""Load the sensor records from the root directory.
We glob the filesystem for all LiDAR and camera filepaths, and then convert each file path
to a "sensor record".
A sensor record is a 3-tuple consisting of the following:
log_id: uuid corresponding to ~15 seconds of sensor data.
sensor_name: the name of the sensor (e.g., 'lidar').
timestamp_ns: vehicle nanosecond timestamp at which the sensor data was recorded.
Returns:
Sensor record index.
"""
logger.info("Building metadata ...")
# Create the cache file path.
sensor_records_path = self.sensor_dataset_dir / "._sensor_records"
if sensor_records_path.exists():
logger.info("Cache found. Loading from disk ...")
sensor_records = read_feather(sensor_records_path)
else:
lidar_records = self.populate_lidar_records()
# Load camera records if enabled.
if self.cam_names:
logger.info("Loading camera data ...")
cam_records = self.populate_image_records()
# Concatenate lidar and camera records.
sensor_records = pd.concat([lidar_records, cam_records])
else:
sensor_records = lidar_records
# Save the metadata if caching is enable.
if self.with_cache:
sensor_records.reset_index(drop=True).to_feather(str(sensor_records_path))
# Set index as tuples of the form: (log_id, sensor_name, timestamp_ns) and sort the index.
# sorts by log_id, and then by sensor name, and then by timestamp.
sensor_records = sensor_records.set_index(["log_id", "sensor_name", "timestamp_ns"]).sort_index()
# Return all of the sensor records.
return sensor_records
def populate_lidar_records(self) -> pd.DataFrame:
"""Obtain (log_id, sensor_name, timestamp_ns) 3-tuples for all LiDAR sweeps in the dataset.
Returns:
DataFrame of shape (N,3) with `log_id`, `sensor_name`, and `timestamp_ns` columns.
N is the number of sweeps for all logs in the dataset, and the `sensor_name` column
should be populated with `lidar` in every entry.
"""
lidar_paths = sorted(self.sensor_dataset_dir.glob(LIDAR_PATTERN), key=lambda x: int(x.stem))
lidar_record_list = [
convert_path_to_named_record(x) for x in track(lidar_paths, description="Loading lidar records ...")
]
# Concatenate into single dataframe (list-of-dicts to DataFrame).
lidar_records = pd.DataFrame(lidar_record_list)
return lidar_records
def populate_image_records(self) -> pd.DataFrame:
"""Obtain (log_id, sensor_name, timestamp_ns) 3-tuples for all images in the dataset.
Returns:
DataFrame of shape (N,3) with `log_id`, `sensor_name`, and `timestamp_ns` columns.
N is the total number of images for all logs in the dataset, and the `sensor_name` column
should be populated with the name of the camera that captured the corresponding image in
every entry.
"""
# Get sorted list of camera paths.
cam_paths = sorted(self.sensor_dataset_dir.glob(CAMERA_PATTERN), key=lambda x: int(x.stem))
# Load entire set of camera records.
cam_record_list = [
convert_path_to_named_record(x) for x in track(cam_paths, description="Loading camera records ...")
]
# Concatenate into single dataframe (list-of-dicts to DataFrame).
cam_records = pd.DataFrame(cam_record_list)
return cam_records
def __len__(self) -> int:
"""Return the number of lidar sweeps in the dataset.
The lidar sensor operates at 10 Hz. There are roughly 15 seconds of sensor data per log.
Returns:
Number of lidar sweeps.
"""
return self.num_sweeps
def __iter__(self) -> SensorDataloader:
"""Initialize pointer to the current iterate."""
self._ptr = 0
return self
def __next__(self) -> SynchronizedSensorData:
"""Return the next datum in the dataset."""
result = self.__getitem__(self._ptr)
self._ptr += 1
return result
def __getitem__(self, idx: int) -> SynchronizedSensorData:
"""Load the lidar point cloud and optionally the camera imagery and annotations.
Grab the lidar sensor data and optionally the camera sensor data and annotations at the lidar record
corresponding to the specified index.
Args:
idx: Index in [0, self.num_sweeps - 1].
Returns:
Mapping from sensor name to data for the lidar record corresponding to the specified index.
"""
# Grab the lidar record at the specified index.
# Selects data at a particular level of a MultiIndex.
record: Tuple[str, int] = self.sensor_records.xs(key="lidar", level=1).iloc[idx].name
# Grab the identifying record fields.
log_id, timestamp_ns = record
log_lidar_records = self.sensor_records.xs((log_id, "lidar")).index
num_frames = len(log_lidar_records)
idx = np.where(log_lidar_records == timestamp_ns)[0].item()
sensor_dir = self.sensor_dataset_dir / log_id / "sensors"
lidar_feather_path = sensor_dir / "lidar" / f"{str(timestamp_ns)}.feather"
sweep = Sweep.from_feather(lidar_feather_path=lidar_feather_path)
log_dir = self.sensor_dataset_dir / log_id
timestamp_city_SE3_ego_dict = read_city_SE3_ego(log_dir=log_dir)
# Construct output datum.
datum = SynchronizedSensorData(
sweep=sweep,
log_id=log_id,
timestamp_city_SE3_ego_dict=timestamp_city_SE3_ego_dict,
sweep_number=idx,
num_sweeps_in_log=num_frames,
)
# Load annotations if enabled.
if self.with_annotations:
datum.annotations = self._load_annotations(log_id, timestamp_ns)
# Load camera imagery if enabled.
if self.cam_names:
datum.synchronized_imagery = self._load_synchronized_cams(sensor_dir, log_id, timestamp_ns)
# Return datum at the specified index.
return datum
def _build_sync_records(self) -> pd.DataFrame:
"""Build the synchronization records for lidar-camera synchronization.
This function builds a set of records to efficiently associate auxiliary sensors
to a target sensor. We use this function to associate the nanosecond vehicle
timestamps of the lidar sweep to the nearest images from all 9 cameras (7 ring + 2 stereo).
Once this dataframe is built, synchronized data can be queried in O(1) time.
NOTE: This function is NOT intended to be used outside of SensorDataset initialization.
Returns:
(self.num_sweeps, self.num_sensors) DataFrame where each row corresponds to the nanosecond camera
timestamp that is closest (in absolute value) to the corresponding nanonsecond lidar sweep timestamp.
"""
logger.info("Building synchronization database ...")
# Get unique log ids from the entire set of sensor data records.
log_ids: List[str] = self.sensor_records.index.unique(level="log_id").to_list()
# Create list to store synchronized data frames.
sync_list: List[pd.DataFrame] = []
# Iterate over all log ids.
for log_id in track(log_ids, description="Building sync records ..."):
# Select records associated with the current log id.
log_sensor_records = self.sensor_records.xs(key=log_id, level=0, drop_level=False)
# Get unique sensor names for a particular log.
# If the entire dataset is available, each log should have 7 ring cameras
# and 2 stereo cameras. The uniqueness check is required in case a subset of
# the data is being used by the end-user.
sensor_names: List[str] = log_sensor_records.index.unique(level="sensor_name").tolist()
# Remove lidar since we're using it as the reference sensor.
sensor_names.remove("lidar")
# Get lidar records for the selected log.
target_records = log_sensor_records.xs(key="lidar", level=1, drop_level=False).reset_index()
for sensor_name in sensor_names:
# Obtain tuples, convert tuples back to DataFrame, then rename `timestamp_ns' col, to sensor name,
# and finally remove the `log_id` column, to leave only a single column of timestamps.
src_records: pd.DataFrame = (
log_sensor_records.xs(sensor_name, level=1)
.reset_index()
.rename({"timestamp_ns": sensor_name}, axis=1)
.drop(["log_id"], axis=1)
)
# Match on the closest nanosecond timestamp.
# we do not pad the values, as NaN entries are meaningful.
target_records = pd.merge_asof(
target_records,
src_records,
left_on="timestamp_ns",
right_on=sensor_name,
direction=self.matching_criterion,
tolerance=int(LIDAR_SWEEP_INTERVAL_W_BUFFER_NS),
)
sync_list.append(target_records)
return pd.concat(sync_list).reset_index(drop=True)
def get_closest_img_fpath(self, log_id: str, cam_name: str, lidar_timestamp_ns: int) -> Optional[Path]:
"""Find the filepath to the image from a particular a camera, w/ closest timestamp to a lidar sweep timestamp.
Args:
log_id: unique ID of vehicle log.
cam_name: name of camera.
lidar_timestamp_ns: integer timestamp of LiDAR sweep capture, in nanoseconds
Returns:
img_fpath, string representing path to image, or else None.
Raises:
RuntimeError: if the synchronization database (sync_records) has not been created.
"""
if self.sync_records is None:
raise RuntimeError("Requested synchronized data, but the synchronization database has not been created.")
if lidar_timestamp_ns not in self.sync_records.loc[(log_id, "lidar")].index:
# this timestamp does not correspond to any LiDAR sweep.
return None
# Create synchronization key.
key = (log_id, "lidar", lidar_timestamp_ns)
# Grab the synchronization record.
timestamp_ns = self.sync_records.loc[key, cam_name]
if | pd.isna(timestamp_ns) | pandas.isna |
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from nose.tools import (assert_equal,
assert_almost_equal,
raises,
ok_,
eq_)
from rsmtool.preprocessor import (FeaturePreprocessor,
FeatureSubsetProcessor,
FeatureSpecsProcessor)
class TestFeaturePreprocessor:
def setUp(self):
self.fpp = FeaturePreprocessor()
def test_select_candidates_with_N_or_more_items(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'candidate': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_select_candidates_with_N_or_more_items_all_included(self):
data = pd.DataFrame({'candidate': ['a'] * 2 + ['b'] * 2 + ['c'] * 2,
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, data)
assert_equal(len(df_excluded), 0)
def test_select_candidates_with_N_or_more_items_all_excluded(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 4)
assert_frame_equal(df_excluded, data)
assert_equal(len(df_included), 0)
def test_select_candidates_with_N_or_more_items_custom_name(self):
data = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'ID': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2, 'ID')
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_rename_no_columns(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'candidate', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2',
'length', 'raw', 'candidate')
assert_array_equal(df.columns,
['spkitemid', 'sc1', 'sc2', 'length', 'raw',
'candidate', 'feature1', 'feature2'])
def test_rename_no_columns_some_values_none(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
def test_rename_no_used_columns_but_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2',
'##length##', 'feature1', 'feature2'])
def test_rename_used_columns(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'SR', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', 'SR', None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'feature1', 'feature2'])
def test_rename_used_columns_and_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'raw', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'##raw##', 'feature1', 'feature2'])
def test_rename_used_columns_with_swapped_names(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'raw', 'words', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'sc2', 'sc1', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc2', 'sc1', '##raw##',
'length', 'feature1', 'feature2'])
def test_rename_used_columns_but_not_features(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'length', 'feature2'])
df = self.fpp.rename_default_columns(df, ['length'], 'id', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'feature2'])
def test_rename_candidate_column(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'apptNo', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [],
'spkitemid', 'sc1', 'sc2', None, None, 'apptNo')
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##',
'candidate', 'feature1', 'feature2'])
def test_rename_candidate_named_sc2(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'question', 'l1', 'score'])
df_renamed = self.fpp.rename_default_columns(df, [],
'id', 'sc1', None, None, 'score', 'sc2')
assert_array_equal(df_renamed.columns, ['spkitemid', 'sc1',
'candidate', 'question', 'l1', 'raw'])
@raises(KeyError)
def test_check_subgroups_missing_columns(self):
df = pd.DataFrame(columns=['a', 'b', 'c'])
subgroups = ['a', 'd']
FeaturePreprocessor.check_subgroups(df, subgroups)
def test_check_subgroups_nothing_to_replace(self):
df = pd.DataFrame({'a': ['1', '2'],
'b': ['32', '34'],
'd': ['abc', 'def']})
subgroups = ['a', 'd']
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df)
def test_check_subgroups_replace_empty(self):
df = pd.DataFrame({'a': ['1', ''],
'b': [' ', '34'],
'd': ['ab c', ' ']})
subgroups = ['a', 'd']
df_expected = pd.DataFrame({'a': ['1', 'No info'],
'b': [' ', '34'],
'd': ['ab c', 'No info']})
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df_expected)
def test_filter_on_column(self):
bad_df = pd.DataFrame({'spkitemlab': np.arange(1, 9, dtype='int64'),
'sc1': ['00', 'TD', '02', '03'] * 2})
df_filtered_with_zeros = pd.DataFrame({'spkitemlab': [1, 3, 4, 5, 7, 8],
'sc1': [0.0, 2.0, 3.0] * 2})
df_filtered = pd.DataFrame({'spkitemlab': [3, 4, 7, 8], 'sc1': [2.0, 3.0] * 2})
(output_df_with_zeros,
output_excluded_df_with_zeros) = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=False)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
assert_frame_equal(output_df_with_zeros, df_filtered_with_zeros)
assert_frame_equal(output_df, df_filtered)
def test_filter_on_column_all_non_numeric(self):
bad_df = pd.DataFrame({'sc1': ['A', 'I', 'TD', 'TD'] * 2,
'spkitemlab': range(1, 9)})
expected_df_excluded = bad_df.copy()
expected_df_excluded.drop('sc1', axis=1, inplace=True)
df_filtered, df_excluded = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
ok_(df_filtered.empty)
ok_("sc1" not in df_filtered.columns)
assert_frame_equal(df_excluded, expected_df_excluded, check_dtype=False)
def test_filter_on_column_std_epsilon_zero(self):
# Test that the function exclude columns where std is returned as
# very low value rather than 0
data = {'id': np.arange(1, 21, dtype='int64'),
'feature_ok': np.arange(1, 21),
'feature_zero_sd': [1.5601] * 20}
bad_df = pd.DataFrame(data=data)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df,
'feature_zero_sd',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
good_df = bad_df[['id', 'feature_ok']].copy()
assert_frame_equal(output_df, good_df)
ok_(output_excluded_df.empty)
def test_filter_on_column_with_inf(self):
# Test that the function exclude columns where feature value is 'inf'
data = pd.DataFrame({'feature_1': [1.5601, 0, 2.33, 11.32],
'feature_ok': np.arange(1, 5)})
data['feature_with_inf'] = 1 / data['feature_1']
data['id'] = np.arange(1, 5, dtype='int64')
bad_df = data[np.isinf(data['feature_with_inf'])].copy()
good_df = data[~np.isinf(data['feature_with_inf'])].copy()
bad_df.reset_index(drop=True, inplace=True)
good_df.reset_index(drop=True, inplace=True)
output_df, output_excluded_df = self.fpp.filter_on_column(data, 'feature_with_inf',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
assert_frame_equal(output_df, good_df)
assert_frame_equal(output_excluded_df, bad_df)
def test_filter_on_flag_column_empty_flag_dictionary(self):
# no flags specified, keep the data frame as is
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 0, 0, 0],
'flag2': [1, 2, 2, 1]})
flag_dict = {}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.5, 1.1, 2.2, 3.6]})
flag_dict = {'flag1': [0.5, 1.1, 2.2, 3.6, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['a', 'b', 'c', 'd']})
flag_dict = {'flag1': ['a', 'b', 'c', 'd', 'e']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.0, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['4', '1', '2', '3.5']})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [4.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': ['1', '2', '3.5', '4', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.0']})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.0', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.5]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.5', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.5', 2, 3.5]})
flag_dict = {'flag1': [0.0, 1.5, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.5']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 2, 3.5, 'TD']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_mixed_type_column_mixed_type_dict_filter_preserve_type(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS']})
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS']})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_int_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': [1, 2, 3, 4, 5, 6],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 2, 2, 3, 4, None]}, dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': [2, 3, 5],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': [1, 4, 6],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_float_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.2, 2.1, 2.1, 3.3, 4.2, None]})
flag_dict = {'flag1': [2.1, 4.2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2.1, 2.1, 4.2]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.2, 3.3, None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_str_flag_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': ['a', 'b', 'b', 'c', 'd', None]})
flag_dict = {'flag1': ['b', 'd']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': ['b', 'b', 'd']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': ['a', 'c', None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2.0, 'TD', 2.0, None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2.0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2.0, 2.0]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.5, 2, 2, 'TD', 4, None]},
dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.5, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_same_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [1, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD'], 'flag2': [0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [1, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_different_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [2, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD', 'NS'], 'flag2': [0, 2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [2, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
@raises(KeyError)
def test_filter_on_flag_column_missing_columns(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['1', '1', '1', '1'],
'flag2': ['1', '2', '2', '1']})
flag_dict = {'flag3': ['0'], 'flag2': ['1', '2']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
@raises(ValueError)
def test_filter_on_flag_column_nothing_left(self):
bad_df = pd.DataFrame({'spkitemid': ['a1', 'b1', 'c1', 'd1'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 0, 20, 14],
'flag2': [1, 1.0, 'TD', '03']})
flag_dict = {'flag1': [1, 0, 14], 'flag2': ['TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(bad_df, flag_dict)
def test_remove_outliers(self):
# we want to test that even if we pass in a list of
# integers, we still get the right clamped output
data = [1, 1, 2, 2, 1, 1] * 10 + [10]
ceiling = np.mean(data) + 4 * np.std(data)
clamped_data = FeaturePreprocessor.remove_outliers(data)
assert_almost_equal(clamped_data[-1], ceiling)
def test_generate_feature_names_subset(self):
reserved_column_names = ['reserved_col1', 'reserved_col2']
expected = ['col_1']
df = pd.DataFrame({'reserved_col1': ['X', 'Y', 'Z'],
'reserved_col2': ['Q', 'R', 'S'],
'col_1': [1, 2, 3],
'col_2': ['A', 'B', 'C']})
subset = 'A'
feature_subset = pd.DataFrame({'Feature': ['col_1', 'col_2', 'col_3'],
'A': [1, 0, 0],
'B': [1, 1, 1]})
feat_names = self.fpp.generate_feature_names(df,
reserved_column_names,
feature_subset,
subset)
eq_(feat_names, expected)
def test_generate_feature_names_none(self):
reserved_column_names = ['reserved_col1', 'reserved_col2']
expected = ['col_1', 'col_2']
df = pd.DataFrame({'reserved_col1': ['X', 'Y', 'Z'],
'reserved_col2': ['Q', 'R', 'S'],
'col_1': [1, 2, 3],
'col_2': ['A', 'B', 'C']})
feat_names = self.fpp.generate_feature_names(df,
reserved_column_names,
feature_subset_specs=None,
feature_subset=None)
eq_(feat_names, expected)
def test_model_name_builtin_model(self):
model_name = 'LinearRegression'
model_type = self.fpp.check_model_name(model_name)
eq_(model_type, 'BUILTIN')
def test_model_name_skll_model(self):
model_name = 'AdaBoostRegressor'
model_type = self.fpp.check_model_name(model_name)
eq_(model_type, 'SKLL')
@raises(ValueError)
def test_model_name_wrong_name(self):
model_name = 'random_model'
self.fpp.check_model_name(model_name)
def test_trim(self):
values = np.array([1.4, 8.5, 7.4])
expected = np.array([1.4, 8.4998, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8)
assert_array_equal(actual, expected)
def test_trim_with_list(self):
values = [1.4, 8.5, 7.4]
expected = np.array([1.4, 8.4998, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8)
assert_array_equal(actual, expected)
def test_trim_with_custom_tolerance(self):
values = [0.6, 8.4, 7.4]
expected = np.array([0.75, 8.25, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8, 0.25)
assert_array_equal(actual, expected)
def test_preprocess_feature_fail(self):
np.random.seed(10)
values = np.random.random(size=1000)
values = np.append(values, np.array([10000000]))
mean = values.mean()
std = values.std()
expected = values.copy()
expected[-1] = mean + 4 * std
actual = self.fpp.preprocess_feature(values,
'A',
'raw',
mean,
std)
assert_array_equal(actual, expected)
def test_preprocess_feature_with_outlier(self):
np.random.seed(10)
values = np.random.random(size=1000)
values = np.append(values, np.array([10000000]))
mean = values.mean()
std = values.std()
expected = values.copy()
expected[-1] = mean + 4 * std
actual = self.fpp.preprocess_feature(values,
'A',
'raw',
mean,
std,
exclude_zero_sd=True)
assert_array_equal(actual, expected)
def test_preprocess_features(self):
train = pd.DataFrame({'A': [1, 2, 4, 3]})
test = pd.DataFrame({'A': [4, 3, 2, 1]})
train_expected = (train['A'] - train['A'].mean()) / train['A'].std()
train_expected = pd.DataFrame(train_expected)
test_expected = (test['A'] - test['A'].mean()) / test['A'].std()
test_expected = pd.DataFrame(test_expected)
info_expected = pd.DataFrame({'feature': ['A'],
'sign': [1],
'train_mean': [train.A.mean()],
'train_sd': [train.A.std()],
'train_transformed_mean': [train.A.mean()],
'train_transformed_sd': [test.A.std()],
'transform': ['raw']})
specs = pd.DataFrame({'feature': ['A'],
'transform': ['raw'],
'sign': [1]})
(train_processed,
test_processed,
info_processed) = self.fpp.preprocess_features(train, test, specs)
assert_frame_equal(train_processed.sort_index(axis=1),
train_expected.sort_index(axis=1))
assert_frame_equal(test_processed.sort_index(axis=1),
test_expected.sort_index(axis=1))
assert_frame_equal(info_processed.sort_index(axis=1),
info_expected.sort_index(axis=1))
def test_filter_data_features(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 12, 11, 12],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
df_filtered_features_expected = pd.DataFrame({'spkitemid': [1, 2, 3, 4],
'sc1': [1.0, 2.0, 3.0, 1.0],
'feature1': [1.0, 3.0, 4.0, 1.0],
'feature2': [1.0, 3.0, 2.0, 2.0]})
df_filtered_features_expected = df_filtered_features_expected[['spkitemid',
'sc1',
'feature1',
'feature2']]
data = pd.DataFrame(data)
(df_filtered_features,
_,
_,
_,
_,
_,
_,
_,
_,
_) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
assert_frame_equal(df_filtered_features,
df_filtered_features_expected)
def test_filter_data_correct_features_and_length_in_other_columns(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(_,
_,
df_filtered_other_columns,
_,
_,
_,
_,
_,
_,
feature_names) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
eq_(feature_names, ['feature1', 'feature2'])
assert '##LENGTH##' in df_filtered_other_columns.columns
def test_filter_data_length_in_other_columns(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(_,
_,
df_filtered_other_columns,
_,
_,
_,
_,
_,
_,
feature_names) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
eq_(feature_names, ['feature1', 'feature2'])
assert '##LENGTH##' in df_filtered_other_columns.columns
@raises(ValueError)
def test_filter_data_min_candidates_raises_value_error(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[],
min_candidate_items=5)
def test_filter_data_with_min_candidates(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(df_filtered_features,
_,
_,
_,
_,
df_filtered_human_scores,
_,
_,
_,
_) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[],
min_candidate_items=2)
eq_(df_filtered_features.shape[0], 2)
assert all(col in df_filtered_human_scores.columns
for col in ['sc1', 'sc2'])
def test_filter_data_id_candidate_equal(self):
data = {'LENGTH': [10, 12, 18, 21],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'D'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(_,
df_filtered_metadata,
_,
_,
_,
_,
_,
_,
_,
_) = self.fpp.filter_data(data,
'h1',
'candidate',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
expected = pd.DataFrame({'spkitemid': ['A', 'B', 'C', 'D'],
'candidate': ['A', 'B', 'C', 'D']})
expected = expected[['spkitemid', 'candidate']]
assert_frame_equal(df_filtered_metadata, expected)
class TestFeatureSpecsProcessor:
def test_generate_default_specs(self):
fnames = ['Grammar', 'Vocabulary', 'Pronunciation']
df_specs = FeatureSpecsProcessor.generate_default_specs(fnames)
assert_equal(len(df_specs), 3)
assert_equal(df_specs['feature'][0], 'Grammar')
assert_equal(df_specs['transform'][1], 'raw')
assert_equal(df_specs['sign'][2], 1.0)
def test_generate_specs_from_data_with_negative_sign(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
data = {'Grammar': np.random.randn(10),
'Fluency': np.random.randn(10),
'Discourse': np.random.randn(10),
'r1': np.random.choice(4, 10),
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Fluency',
'Discourse'],
'r1',
feature_subset_specs,
'SYS1')
assert_equal(len(df_specs), 3)
assert_array_equal(df_specs['feature'], ['Grammar', 'Fluency', 'Discourse'])
assert_array_equal(df_specs['sign'], [-1.0, 1.0, -1.0])
def test_generate_specs_from_data_with_default_sign(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
data = {'Grammar': np.random.randn(10),
'Fluency': np.random.randn(10),
'Discourse': np.random.randn(10),
'r1': np.random.choice(4, 10),
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Fluency',
'Discourse'],
'r1',
feature_subset_specs,
feature_sign=None)
assert_equal(len(df_specs), 3)
assert_array_equal(df_specs['feature'], ['Grammar', 'Fluency', 'Discourse'])
assert_array_equal(df_specs['sign'], [1.0, 1.0, 1.0])
def test_generate_specs_from_data_with_transformation(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
r1 = np.random.choice(range(1, 5), 10)
data = {'Grammar': np.random.randn(10),
'Vocabulary': r1**2,
'Discourse': np.random.randn(10),
'r1': r1,
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Vocabulary',
'Discourse'],
'r1',
feature_subset_specs,
'SYS1')
assert_array_equal(df_specs['feature'], ['Grammar', 'Vocabulary', 'Discourse'])
assert_equal(df_specs['transform'][1], 'sqrt')
def test_generate_specs_from_data_when_transformation_changes_sign(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
r1 = np.random.choice(range(1, 5), 10)
data = {'Grammar': np.random.randn(10),
'Vocabulary': 1 / r1,
'Discourse': np.random.randn(10),
'r1': r1,
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Vocabulary',
'Discourse'],
'r1',
feature_subset_specs,
'SYS1')
assert_equal(df_specs['feature'][1], 'Vocabulary')
assert_equal(df_specs['transform'][1], 'addOneInv')
assert_equal(df_specs['sign'][1], -1)
def test_generate_specs_from_data_no_subset_specs(self):
np.random.seed(10)
data = {'Grammar': np.random.randn(10),
'Fluency': np.random.randn(10),
'Discourse': np.random.randn(10),
'r1': np.random.choice(4, 10),
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Fluency',
'Discourse'],
'r1')
assert_equal(len(df_specs), 3)
assert_array_equal(df_specs['feature'], ['Grammar', 'Fluency', 'Discourse'])
assert_array_equal(df_specs['sign'], [1.0, 1.0, 1.0])
def test_validate_feature_specs(self):
df_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_new_feature_specs = FeatureSpecsProcessor.validate_feature_specs(df_feature_specs)
assert_frame_equal(df_feature_specs, df_new_feature_specs)
def test_validate_feature_specs_with_Feature_as_column(self):
df_feature_specs = pd.DataFrame({'Feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_expected_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_new_feature_specs = FeatureSpecsProcessor.validate_feature_specs(df_feature_specs)
assert_frame_equal(df_new_feature_specs, df_expected_feature_specs)
def test_validate_feature_specs_sign_to_float(self):
df_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': ['1', '1', '-1'],
'transform': ['raw', 'inv', 'sqrt']})
df_expected_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_new_feature_specs = FeatureSpecsProcessor.validate_feature_specs(df_feature_specs)
assert_frame_equal(df_new_feature_specs, df_expected_feature_specs)
def test_validate_feature_specs_add_default_values(self):
df_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3']})
df_expected_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': [1, 1, 1],
'transform': ['raw', 'raw', 'raw']})
df_new_feature_specs = FeatureSpecsProcessor.validate_feature_specs(df_feature_specs)
| assert_frame_equal(df_new_feature_specs, df_expected_feature_specs) | pandas.testing.assert_frame_equal |
# -*- coding: UTF-8 -*-
import pandas as pd
print("*"*10,"Series and DataFrame simple example","*"*10)
obj = pd.Series([40,12,-3,25])
print(obj)
print(obj.index)
print(obj.values)
print(obj>15)
print(obj[obj>15])
print(obj.mean())
dict = obj.to_dict()
print(dict.values())
dic={'one':pd.Series([1.,2.,3.,4.],index=['a','b','c','d']),'two':pd.Series([3.,4.,5.,6.],index=['a','b','c','d'])}
df = pd.DataFrame(dic)
print(df)
df.to_excel('dataframe.xls');
a = pd.read_excel('dataframe.xls')
print(a)
data= | pd.read_csv('closeprice.csv',dtype={'seq':str},encoding='gbk') | pandas.read_csv |
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import os.path
from os import path
import pandas as pd
import pickle
import numpy as np
import options
def load_debt_price_data(debt_price_source: options.DebtPriceSource):
"""
Load the debt price data, either from an external data source, or generated by the debt price regression model.
If the DebtPriceSource is set to EXTERNAL, check if there are existing Google spreadsheet credentials,
otherwise load the debt price data from a local CSV.
Tabular data format example:
'''csv
timestamp, seconds_passed, price_move
2020-09-07 10:05:33, 0, 1
2020-09-07 11:31:58, 5185, 0.9993389888
'''
If the DebtPriceSource is set to DEBT_MARKET_MODEL, load the scikit-learn regression model trained on historical MakerDAO Dai dataset,
and return the debt price data as a Pandas dataframe.
"""
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from autosklearn.regression import AutoSklearnRegressor
from autosklearn.metrics import mean_squared_error as auto_mean_squared_error
test_dfs = []
if debt_price_source == options.DebtPriceSource.EXTERNAL.value:
if path.exists('./credentials/spreadsheet-credentials.json'):
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'
]
credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials/spreadsheet-credentials.json', scope) # Your json file here
gc = gspread.authorize(credentials)
test_spreadsheet = gc.open('debt-price-test-data')
print(gc.list_spreadsheet_files())
worksheet_list = test_spreadsheet.worksheets()
test_dfs = [pd.DataFrame(ws.get_all_values()[1:], columns=ws.get_all_values()[0]).copy() for ws in worksheet_list]
else:
debt_price_dataframe = pd.read_csv('./tests/data/default_debt_price_source.csv')
test_dfs = [debt_price_dataframe]
elif debt_price_source == options.DebtPriceSource.DEBT_MARKET_MODEL.value:
# Load the historical debt market dataset
debt_market_df = | pd.read_csv('models/market_model/data/debt_market_df.csv', index_col='date', parse_dates=True) | pandas.read_csv |
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
#Code starts here
data = pd.read_csv(path)
data_sample = data.sample(n=sample_size,random_state=0)
sample_mean = data_sample['installment'].mean()
sample_std = data_sample['installment'].std()
margin_of_error = (z_critical*sample_std)/(math.sqrt(2000))
confidence_interval = (sample_mean - margin_of_error, sample_mean + margin_of_error)
true_mean = data['installment'].mean()
print('sample mean is ', sample_mean)
print('confidence interval are ', confidence_interval)
print('true mean is ',true_mean)
# --------------
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#Different sample sizes to take
sample_size=np.array([20,50,100])
m = []
#Code starts here
fig,axes = plt.subplots(3,1)
for i in range (len(sample_size)):
for j in range(1000):
m = data['installment'].sample(n=sample_size[i]).mean()
mean_series = pd.Series(m)
axes[i].plot(mean_series)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
import pandas as pd
#Code starts here
data = | pd.read_csv(path) | pandas.read_csv |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
s.adjust(1000)
s.allocate(100, 'c1')
s.allocate(100, 'c2')
c1 = s['c1']
c2 = s['c2']
assert c1.multiplier == 2
assert isinstance( c2, FixedIncomeSecurity)
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 = s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', update=True)
assert s.root.stale == True
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
# Check that rebalance with update=False
# does not mark the node as stale
s.rebalance(0.6, 'c1', update=False)
assert s.root.stale == False
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.loc['c1', dts[1]] = 105
data.loc['c2', dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert m.price == 100
assert s1.value == 0
assert s2.value == 0
# now allocate directly to child
s1.allocate(500)
assert m.capital == 500
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.weight == 0
# allocate to child2 via parent method
m.allocate(500, 's2')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000
assert s2.price == 100
# now allocate and incur commission fee
s1.allocate(500, 'c1')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000.0
assert s2.price == 100
def test_strategy_tree_proper_universes():
def do_nothing(x):
return True
child1 = Strategy('c1', [do_nothing], ['b', 'c'])
parent = Strategy('m', [do_nothing], [child1, 'a'])
child1 = parent['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(
{'a': pd.Series(data=1, index=dts, name='a'),
'b': pd.Series(data=2, index=dts, name='b'),
'c': pd.Series(data=3, index=dts, name='c')})
parent.setup(data, test_data1 = 'test1')
assert len(parent.children) == 1
assert 'c1' in parent.children
assert len(parent._universe.columns) == 2
assert 'c1' in parent._universe.columns
assert 'a' in parent._universe.columns
assert len(child1._universe.columns) == 2
assert 'b' in child1._universe.columns
assert 'c' in child1._universe.columns
assert parent._has_strat_children
assert len(parent._strat_children) == 1
assert parent.get_data( 'test_data1' ) == 'test1'
# New child strategy with parent (and using dictionary notation}
child2 = Strategy('c2', [do_nothing], {'a' : SecurityBase(''), 'b' : ''}, parent=parent)
# Setup the child from the parent, but pass in some additional data
child2.setup_from_parent(test_data2 = 'test2')
assert 'a' in child2._universe.columns
assert 'b' in child2._universe.columns
assert 'c2' in parent._universe.columns
# Make sure child has data from the parent and the additional data
assert child2.get_data('test_data1') == 'test1'
assert child2.get_data('test_data2') == 'test2'
assert len(parent._strat_children) == 2
def test_strategy_tree_paper():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['a'], data=100.)
data['a'].loc[dts[1]] = 101
data['a'].loc[dts[2]] = 102
s = Strategy('s',
[bt.algos.SelectWhere(data > 100),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
m = Strategy('m', [], [s])
s = m['s']
m.setup(data)
m.update(dts[0])
m.run()
assert m.price == 100
assert s.price == 100
assert s._paper_trade
assert s._paper.price == 100
s.update(dts[1])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert s.price == 100
s.update(dts[2])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert np.allclose(s.price, 100. * (102 / 101.))
def test_dynamic_strategy():
def do_nothing(x):
return True
# Start with an empty parent
parent = Strategy('p', [do_nothing], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
parent.setup( data )
# NOTE: Price of the sub-strategy won't be correct in this example because
# we are not using the algo stack to impact weights, and so the paper
# trading strategy does not see the same actions as we are doing.
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
trade = Strategy('c1_vs_c2', [], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Go long 'c1' and short 'c2'
trade.rebalance( 1., 'c1')
trade.rebalance( -1., 'c2')
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
# On this step, we close the trade, and allocate capital back to the parent
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
def test_dynamic_strategy2():
# Start with an empty parent
parent = Strategy('p', [], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
data['c1'][dts[3]] = 101.
data['c2'][dts[3]] = 99.
parent.setup( data )
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
def trade_c1_vs_c2( strategy ):
if strategy.now == dts[1]:
strategy.rebalance( 1., 'c1')
strategy.rebalance( -1., 'c2')
trade = Strategy('c1_vs_c2', [trade_c1_vs_c2], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Run the strategy for the timestep
parent.run()
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert np.isnan( parent.universe[ trade.name ][ dts[0] ] )
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
trade = parent[ trade.name ]
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
aae( trade.price, 110.)
# Next we close the trade by flattening positions
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
aae( trade.price, 110.)
# Finally we allocate capital back to the parent to be re-deployed
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
aae( trade.price, 110.) # Price stays the same even after capital de-allocated
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
assert parent.value == 1e6 + 10 * 1e3
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
# Paper trading price, as asset prices have moved, paper trading price
# keeps updating. Note that if the flattening of the position was part
# of the definition of trade_c1_vs_c2, then the paper trading price
# would be fixed after flattening, as it would apply to both real and paper.
aae( trade.price, 102.)
aae( parent.universe[ trade.name ][ dts[i] ], 102. )
def test_outlays():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
#calling outlays should automatically update the strategy, since stale
assert c1.outlays[dts[0]] == (4 * 105)
assert c2.outlays[dts[0]] == (5 * 95)
assert c1.data['outlay'][dts[0]] == (4 * 105)
assert c2.data['outlay'][dts[0]] == (5 * 95)
i = 1
s.update(dts[i], data.loc[dts[i]])
c1.allocate(-400)
c2.allocate(100)
# out update
assert c1.outlays[dts[1]] == (-4 * 100)
assert c2.outlays[dts[1]] == 100
assert c1.data['outlay'][dts[1]] == (-4 * 100)
assert c2.data['outlay'][dts[1]] == 100
def test_child_weight_above_1():
# check for child weights not exceeding 1
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(np.random.randn(3, 2) + 100,
index=dts, columns=['c1', 'c2'])
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1e6)
s.allocate(1e6, 'c1')
c1 = s['c1']
assert c1.weight <= 1
def test_fixed_commissions():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
# fixed $1 commission per transaction
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
# out update
s.update(dts[i])
assert c1.value == 400
assert c2.value == 400
assert s.capital == 198
# de-alloc 100 from c1. This should force c1 to sell 2 units to raise at
# least 100 (because of commissions)
c1.allocate(-100)
s.update(dts[i])
assert c1.value == 200
assert s.capital == 198 + 199
# allocate 100 to c2. This should leave things unchaged, since c2 cannot
# buy one unit since the commission will cause total outlay to exceed
# allocation
c2.allocate(100)
s.update(dts[i])
assert c2.value == 400
assert s.capital == 198 + 199
# ok try again w/ 101 allocation. This time, it should work
c2.allocate(101)
s.update(dts[i])
assert c2.value == 500
assert s.capital == 198 + 199 - 101
# ok now let's close the whole position. Since we are closing, we expect
# the allocation to go through, even though the outlay > amount
c2.allocate(-500)
s.update(dts[i])
assert c2.value == 0
assert s.capital == 198 + 199 - 101 + 499
# now we are going to go short c2
# we want to 'raise' 100 dollars. Since we need at a minimum 100, but we
# also have commissions, we will actually short 2 units in order to raise
# at least 100
c2.allocate(-100)
s.update(dts[i])
assert c2.value == -200
assert s.capital == 198 + 199 - 101 + 499 + 199
def test_degenerate_shorting():
# can have situation where you short infinitely if commission/share > share
# price
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
# $1/share commission
s.set_commissions(lambda q, p: abs(q) * 1)
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
# c1 trades at 0.01
data = pd.DataFrame(index=dts, columns=['c1'], data=0.01)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
try:
c1.allocate(-10)
assert False
except Exception as e:
assert 'full_outlay should always be approaching amount' in str(e)
def test_securitybase_allocate():
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
data['c1'][dts[0]] = 91.40246706608193
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# not integer positions
c1.integer_positions = False
# set the full_outlay and amount
full_outlay = 1999.693706988672
amount = 1999.6937069886717
c1.allocate(amount)
# the results that we want to be true
assert np.isclose(full_outlay ,amount,rtol=0.)
# check that the quantity wasn't decreased and the full_outlay == amount
# we can get the full_outlay that was calculated by
# original capital - current capital
assert np.isclose(full_outlay, original_capital - s._capital, rtol=0.)
def test_securitybase_allocate_commisions():
date_span = pd.date_range(start='10/1/2017', end='10/11/2017', freq='B')
numper = len(date_span.values)
comms = 0.01
data = [[10, 15, 20, 25, 30, 35, 40, 45],
[10, 10, 10, 10, 20, 20, 20, 20],
[20, 20, 20, 30, 30, 30, 40, 40],
[20, 10, 20, 10, 20, 10, 20, 10]]
data = [[row[i] for row in data] for i in range(len(data[0]))] # Transpose
price = pd.DataFrame(data=data, index=date_span)
price.columns = ['a', 'b', 'c', 'd']
# price = price[['a', 'b']]
sig1 = pd.DataFrame(price['a'] >= price['b'] + 10, columns=['a'])
sig2 = pd.DataFrame(price['a'] < price['b'] + 10, columns=['b'])
signal = sig1.join(sig2)
signal1 = price.diff(1) > 0
signal2 = price.diff(1) < 0
tw = price.copy()
tw.loc[:,:] = 0 # Initialize Set everything to 0
tw[signal1] = -1.0
tw[signal2] = 1.0
s1 = bt.Strategy('long_short', [bt.algos.WeighTarget(tw),
bt.algos.RunDaily(),
bt.algos.Rebalance()])
####now we create the Backtest , commissions=(lambda q, p: abs(p * q) * comms)
t = bt.Backtest(s1, price, initial_capital=1000000, commissions=(lambda q, p: abs(p * q) * comms), progress_bar=False)
####and let's run it!
res = bt.run(t)
########################
def test_strategybase_tree_transact():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.transact(1)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.transact(5)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now transact the parent since weights are nonzero
s.transact(2)
assert c1.position == 6
assert c1.value == 600
assert s.capital == 1000 - 600
assert s.value == 1000
assert c1.weight == 600.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_transact_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.transact(1)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now transact in c1
s.transact(5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_transact_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.transact(1)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now transact directly in child. No weights, so nothing happens
s1.transact(1)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now transact directly in child of child
s1.allocate(500)
c1.transact(2)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now transact directly in child again
s1.transact(5)
assert s1.value == 500
assert s1.capital == 500 - 400
assert c1.value == 400
assert c1.weight == 400.0 / 500
assert c1.position == 4
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_precision():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
c3 = SecurityBase('c3')
s = StrategyBase('p', [c1, c2, c3])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=1.)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1.0)
s.rebalance(0.1, 'c1')
s.rebalance(0.1, 'c2')
s.rebalance(0.1, 'c3')
s.adjust(-0.7)
aae( s.capital, 0. )
aae( s.value, 0.3 )
aae( s.price, 100. )
assert s.capital != 0 # Due to numerical precision
assert s.value != 0.3 # Created non-zero value out of numerical precision errors
assert s.price != 100.
# Make sure we can still update and calculate return
i=1
s.update(dts[i])
aae( s.price, 100. )
aae( s.value, 0.3 )
assert s.price != 100.
assert s.value != 0.3
def test_securitybase_transact():
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
price = 91.40246706608193
data['c1'][dts[0]] = 91.40246706608193
s.setup(data)
i = 0
s.update(dts[i])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# not integer positions
c1.integer_positions = False
# set the full_outlay and amount
q = 1000.
amount = q * price
c1.transact(q)
assert np.isclose( c1.value, amount, rtol=0.)
assert np.isclose( c1.weight, amount/original_capital, rtol=0.)
assert c1.position == q
assert np.isclose( c1.outlays[0], amount, rtol=0.)
assert np.isclose( s.capital, (original_capital - amount) )
assert s.weight == 1
assert s.value == original_capital
assert np.isclose( s.outlays[c1.name][0], amount, rtol=0.)
# Call again on the same step (and again) to make sure all updates are working
c1.transact(q)
c1.transact(q)
assert c1.position == 3*q
assert np.isclose( c1.outlays[0], 3*amount, rtol=0.)
assert np.isclose( c1.value, 3*amount, rtol=0.)
assert np.isclose( s.capital, (original_capital - 3*amount) )
assert s.weight == 1
assert s.value == original_capital
assert np.isclose( s.outlays[c1.name][0], 3*amount, rtol=0.)
def test_security_setup_positions():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
assert c1.position == 0
assert len(c1.positions) == 1
assert c1.positions[0] == 0
assert c2.position == 0
assert len(c2.positions) == 1
assert c2.positions[0] == 0
def test_couponpayingsecurity_setup():
c1 = CouponPayingSecurity('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.1)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
assert 'coupon' in c1.data
assert c1.coupon == 0.0
assert len(c1.coupons) == 1
assert c1.coupons[0] == 0.0
assert 'holding_cost' in c1.data
assert c1.holding_cost == 0.0
assert len(c1.holding_costs) == 1
assert c1.holding_costs[0] == 0.0
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_couponpayingsecurity_setup_costs():
c1 = CouponPayingSecurity('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.)
cost_long = pd.DataFrame(index=dts, columns=['c1'], data=0.01)
cost_short = pd.DataFrame(index=dts, columns=['c1'], data=0.05)
s.setup(data, coupons=coupons, cost_long=cost_long, cost_short=cost_short)
i = 0
s.update(dts[i])
assert 'coupon' in c1.data
assert c1.coupon == 0.0
assert len(c1.coupons) == 1
assert c1.coupons[0] == 0.0
assert 'holding_cost' in c1.data
assert c1.holding_cost == 0.0
assert len(c1.holding_costs) == 1
assert c1.holding_costs[0] == 0.0
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_couponpayingsecurity_carry():
c1 = CouponPayingSecurity('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=1.)
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.)
coupons['c1'][dts[0]] = 0.1
cost_long = pd.DataFrame(index=dts, columns=['c1'], data=0.)
cost_long['c1'][dts[0]] = 0.01
cost_short = pd.DataFrame(index=dts, columns=['c1'], data=0.05)
s.setup(data, coupons=coupons, cost_long=cost_long, cost_short=cost_short)
i = 0
s.update(dts[i])
# allocate 1000 to strategy
original_capital = 1000.
s.adjust(original_capital)
# set the full_outlay and amount
q = 1000.
c1.transact(q)
assert c1.coupon == 100.
assert len(c1.coupons) == 1
assert c1.coupons[0] == 100.
assert c1.holding_cost == 10.
assert len(c1.holding_costs) == 1
assert c1.holding_costs[0] == 10.
assert s.capital == 0.
assert s.cash[0] == 0.
# On this step, the coupon/costs will be accounted for from the last holding
i = 1
s.update(dts[i])
assert c1.coupon == 0.
assert len(c1.coupons) == 2
assert c1.coupons[1] == 0.
assert c1.holding_cost == 0.
assert len(c1.holding_costs) == 2
assert c1.holding_costs[1] == 0.
assert s.capital == 100. - 10.
assert s.cash[0] == 0.
assert s.cash[1] == 100. - 10.
# Go short q
c1.transact( -2*q )
# Note cost is positive even though we are short.
assert c1.holding_cost == 50.
assert len(c1.holding_costs) == 2
assert c1.holding_costs[1] == 50.
def test_couponpayingsecurity_transact():
c1 = CouponPayingSecurity('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
price = 91.40246706608193
data['c1'][dts[0]] = 91.40246706608193
data['c1'][dts[1]] = 91.40246706608193
coupon = 0.1
coupons = pd.DataFrame(index=dts, columns=['c1'], data=0.)
coupons['c1'][dts[0]] = coupon
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# set the full_outlay and amount
q = 1000.
amount = q * price
c1.transact(q)
# The coupon is nonzero, but will only be counted in "value" the next day
assert c1.coupon == coupon * q
assert len(c1.coupons) == 1
assert c1.coupons[0] == coupon * q
assert np.isclose( c1.value, amount, rtol=0.)
assert np.isclose( c1.weight, amount/original_capital, rtol=0.)
assert c1.position == q
assert s.capital == (original_capital - amount)
assert s.cash[0] == (original_capital - amount)
assert s.weight == 1
assert s.value == original_capital
assert c1._capital == coupon * q
# On this step, the coupon will be paid
i = 1
s.update(dts[i])
new_capital = original_capital + coupon * q
assert c1.coupon == 0
assert len(c1.coupons) == 2
assert c1.coupons[0] == coupon * q
assert c1.coupons[1] == 0
assert np.isclose( c1.value, amount, rtol=0.)
assert np.isclose( c1.weight, amount/new_capital, rtol=0.)
assert c1.position == q
assert s.capital == (new_capital - amount)
assert s.weight == 1
assert s.value == new_capital
assert s.cash[0] == (original_capital - amount)
assert s.cash[1] == (new_capital - amount)
assert c1._capital == 0
# Close the position
c1.transact(-q)
assert c1.coupon == 0
assert len(c1.coupons) == 2
assert c1.coupons[0] == coupon * q
assert c1.coupons[1] == 0
assert np.isclose( c1.value, 0., rtol=0.)
assert np.isclose( c1.weight, 0./new_capital, rtol=0.)
assert c1.position == 0
assert s.capital == new_capital
assert s.weight == 1
assert s.value == new_capital
assert s.cash[0] == (original_capital - amount)
assert s.cash[1] == new_capital
assert c1._capital == 0
def test_bidoffer():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
bidoffer = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=1.)
bidoffer['c1'][dts[0]] = 2
bidoffer['c2'][dts[0]] = 1.5
s.setup(data, bidoffer=bidoffer)
s.adjust(100000)
i = 0
s.update(dts[i])
assert c1.bidoffer == 2
assert len(c1.bidoffers) == 1
assert c1.bidoffers[0] == 2
assert c2.bidoffer == 1.5
assert len(c2.bidoffers) == 1
assert c2.bidoffers[0] == 1.5
# Check the outlays are adjusted for bid/offer
s.set_commissions( lambda q,p : 0.1 )
total, outlay, fee, bidoffer = c1.outlay( 100 )
assert bidoffer == 100 * 1
assert fee == 0.1
assert outlay == 100 * (105 + 1)
assert total == outlay + fee
total, outlay, fee, bidoffer = c1.outlay( -100 )
assert bidoffer == 100 * 1
assert fee == 0.1
assert outlay == -100 * (105 - 1)
assert total == outlay + fee
total, outlay, fee, bidoffer = c2.outlay( 100 )
assert bidoffer == 100 * 0.75
assert fee == 0.1
assert outlay == 100 * (95 + 0.75)
assert total == outlay + fee
total, outlay, fee, bidoffer = c2.outlay( -100 )
assert bidoffer == 100 * 0.75
assert fee == 0.1
assert outlay == -100 * (95 - 0.75)
assert total == outlay + fee
# Do some transactions, and check that bidoffer_paid is updated
c1.transact(100)
assert c1.bidoffer_paid == 100 * 1
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
c1.transact(100)
assert c1.bidoffer_paid == 200 * 1
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
c2.transact(-100)
assert c2.bidoffer_paid == 100 * 0.75
assert c2.bidoffers_paid[i] == c2.bidoffer_paid
assert s.bidoffer_paid == 100 * 0.75 + 200 * 1
assert s.bidoffers_paid[i] == s.bidoffer_paid
assert s.fees.iloc[i] == 3 * 0.1
i = 1
s.update(dts[i])
assert c1.bidoffer_paid == 0.
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert c2.bidoffer_paid == 0.
assert c2.bidoffers_paid[i] == c2.bidoffer_paid
assert s.bidoffer_paid == 0.
assert s.bidoffers_paid[i] == s.bidoffer_paid
assert s.fees[i] == 0.
def test_outlay_custom():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
s.setup(data)
s.adjust(100000)
i = 0
s.update(dts[i])
# Check the outlays are adjusted for custom prices
s.set_commissions( lambda q,p : 0.1*p )
total, outlay, fee, bidoffer = c1.outlay( 100, 106 )
assert bidoffer == 100 * 1
assert fee == 0.1 * 106
assert outlay == 100 * (106)
assert total == outlay + fee
total, outlay, fee, bidoffer = c1.outlay( -100, 106 )
assert bidoffer == -100 * 1
assert fee == 0.1 * 106
assert outlay == -100 * 106
assert total == outlay + fee
def test_bidoffer_custom():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
# Note: In order to access bidoffer_paid,
# need to pass bidoffer kwarg during setup
s.setup(data, bidoffer = {})
s.adjust(100000)
i = 0
s.update(dts[i])
c1.transact(100, price=106)
assert c1.bidoffer_paid == 100 * 1
assert s.bidoffer_paid == c1.bidoffer_paid
assert s.capital == 100000 - 100*106
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert s.bidoffers_paid[i] == s.bidoffer_paid
c1.transact(100, price=106)
assert c1.bidoffer_paid == 200 * 1
assert s.bidoffer_paid == c1.bidoffer_paid
assert s.capital == 100000 - 100*106 - 100*106
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert s.bidoffers_paid[i] == s.bidoffer_paid
c1.transact(-100, price=107)
assert c1.bidoffer_paid == 0
assert s.bidoffer_paid == c1.bidoffer_paid
assert s.capital == 100000 - 100*106 - 100*106 + 100*107
assert c1.bidoffers_paid[i] == c1.bidoffer_paid
assert s.bidoffers_paid[i] == s.bidoffer_paid
def test_security_notional_value():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
c5 = FixedIncomeSecurity('c5')
s = StrategyBase('p', children = [c1, c2, c3, c4, c5])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']; c5 = s['c5']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
c1.transact(1000)
c2.transact(1000)
c3.transact(1000)
c4.transact(1000)
c5.transact(1000)
for c in [ c1, c2, c3, c4, c5 ]:
assert c.position == 1000
assert c.price == 100
assert c1.notional_value == 1000*100.
assert c2.notional_value == 1000
assert c3.notional_value == 0
assert c4.notional_value == 0
assert c5.notional_value == 1000
for c in [ c1, c2, c3, c4, c5 ]:
assert len( c.notional_values ) == 1
assert c.notional_values[ dts[i] ] == c.notional_value
assert s.notional_value == 2000 + 1000*100 # Strategy notional value always positive
i = 1
s.update(dts[i])
c1.transact(-3000)
c2.transact(-3000)
c3.transact(-3000)
c4.transact(-3000)
c5.transact(-3000)
for c in [ c1, c2, c3, c4, c5 ]:
assert c.position == -2000
assert c.price == 100
assert c1.notional_value == -2000*100.
assert c2.notional_value == -2000
assert c3.notional_value == 0
assert c4.notional_value == 0
assert c5.notional_value == -2000
for c in [ c1, c2, c3, c4, c5 ]:
assert len( c.notional_values ) == 2
assert c.notional_values[ dts[i] ] == c.notional_value
assert s.notional_value == 2000*100 + 4000 # Strategy notional value always positive
# FixedIncomeStrategy Tests
def test_fi_strategy_flag():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', children = [s1, s2])
assert s.fixed_income == False
s = FixedIncomeStrategy('p', [s1, s2])
assert s.fixed_income == True
def test_fi_strategy_no_bankruptcy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.transact( 10, 'c2')
assert s.value == 0.
assert s.capital == -10*100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert s.value == -5*10
assert s.capital == -10*100
assert s.bankrupt == False
def test_fi_strategy_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
# Basic setup works with no adjustment
assert s.value == 0
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
assert c1.notional_value == 0
assert c2.notional_value == 0
# Positive or negative capital adjustments are fine
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
s.adjust(-2000)
assert s.capital == -1000
assert s.value == -1000
def test_fi_strategy_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = -5 # Test negative prices
data['c2'][dts[2]] = 0 # Test zero price
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == -5
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 0
def test_fi_strategy_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.notional_value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.notional_value == 500 # Capital does not count towards notl
assert c1.weight == 1.
assert c2.weight == 0
def test_fi_strategy_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = FixedIncomeStrategy('p', children = [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 1.0
assert c2.weight == 0
def test_fi_strategy_close():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
# Price
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
for c in [ c1, c2, c3, c4 ]:
s.transact(10, c.name)
assert c.position == 10
assert c.value == 1000
assert s.capital == -1000
assert s.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
assert s.capital == 0
assert s.value == 0
s.transact(-10, c.name)
assert c.position == -10
assert c.value == -1000
assert s.capital == 1000
assert s.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
assert s.capital == 0
assert s.value == 0
def test_fi_strategy_close_zero_price():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
# Zero prices are OK in fixed income space (i.e. swaps)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=0.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
for c in [ c1, c2, c3, c4 ]:
s.transact(10, c.name)
assert c.position == 10
assert c.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
s.transact(-10, c.name)
assert c.position == -10
assert c.value == 0
s.close( c.name )
assert c.position == 0
assert c.value == 0
def test_fi_strategy_flatten():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
for c in [ c1, c2, c3, c4 ]:
s.transact(10, c.name)
for c in [ c1, c2, c3, c4 ]:
assert c.position == 10
assert c.value == 1000
s.flatten()
for c in [ c1, c2, c3, c4 ]:
assert c.position == 0
assert c.value == 0
def test_fi_strategy_prices():
c1 = CouponPayingSecurity('c1')
s = FixedIncomeStrategy('s', children = [c1] )
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=4)
rawd = [2, -3, 0, 1]
data = pd.DataFrame(index=dts, data=rawd, columns=['c1'])
coupons = pd.DataFrame(index=dts, columns=['c1'], data=[1,2,3,4])
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
s.transact( 10, 'c1')
assert c1.coupon == 10*1
assert s.capital == -10*2
assert s.value == 0
assert len(s.children) == 1
assert s.price == 100
assert s.notional_value == 10
last_coupon = c1.coupon
last_value = s.value
last_notional_value = s.notional_value
last_price = 100.
i=1
s.update(dts[i])
cpn = last_coupon
assert c1.coupon == 10*2
assert s.capital == -10*2 + cpn
assert s.value == -5*10 + cpn # MTM + coupon
assert s.notional_value == 10
assert s.price == last_price + 100 * (s.value-last_value)/last_notional_value
last_value = s.value
last_notional_value = s.notional_value
last_price = s.price
last_coupon = c1.coupon
i=2
s.update(dts[i])
cpn += last_coupon
assert c1.coupon == 10*3
assert s.capital == -10*2 + cpn
assert s.value == -2*10 + cpn # MTM + coupon
assert s.notional_value == 10
assert s.price == last_price + 100 * (s.value - last_value)/last_notional_value
last_value = s.value
last_notional_value = s.notional_value
last_price = s.price
last_coupon = c1.coupon
i=3
s.update(dts[i])
s.transact( 10, 'c1')
# Coupon still from previous period - not affected by new transaction
cpn += last_coupon
assert c1.coupon == 20*4
assert s.capital == -10*2 -10*1 + cpn
assert s.value == -1*10 + 0 + cpn # MTM + coupon
assert s.notional_value == 20
assert s.price == last_price + 100 * (s.value - last_value)/last_notional_value
def test_fi_fail_if_0_base_in_return_calc():
c1 = HedgeSecurity('c1')
s = FixedIncomeStrategy('s', children = [c1] )
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=4)
rawd = [2, -3, 0, 1]
data = pd.DataFrame(index=dts, data=rawd, columns=['c1'])
s.setup(data)
i=0
s.update(dts[i])
assert s.notional_value == 0
# Hedge security has no notional value, so strategy doesn't either
# and thus shouldn't be making PNL.
i = 1
try:
s.update(dts[i])
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_fi_strategy_tree_rebalance():
c1 = SecurityBase('c1')
c2 = CouponPayingSecurity('c2')
c3 = HedgeSecurity('c3')
c4 = CouponPayingHedgeSecurity('c4')
s = FixedIncomeStrategy('p', children = [c1, c2, c3, c4])
c1 = s['c1']; c2 = s['c2']; c3 = s['c3']; c4 = s['c4']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4'], data=50.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.value == 0
assert s.capital == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', base = 1000)
assert c1.position == 10
assert c1.value == 500
assert c1.notional_value == 500
assert s.capital == -500
assert s.value == 0
assert s.notional_value == 500
assert c1.weight == 1.0
assert c2.weight == 0
assert c2.notional_value == 0
# Now rebalance to s2, with no base weight.
# It takes base weight from strategy weight (500)
s.rebalance(0.5, 'c2')
assert c1.position == 10
assert c1.notional_value == 500
assert c2.position == 250
assert c2.notional_value == 250
assert s.notional_value == c1.notional_value + c2.notional_value
assert c1.weight == 2./3.
assert c2.weight == 1./3.
assert s.value == 0
i = 1
s.update(dts[i], data.loc[dts[i]])
# Now rebalance to a new, higher base with given target weights (including negative)
s.rebalance(0.5, 'c1', 1000, update=False)
s.rebalance(-0.5, 'c2', 1000)
assert c1.weight == 0.5
assert c2.weight == -0.5
assert c1.position == 10
assert c1.notional_value == 500
assert c2.position == -500
assert c2.notional_value == -500
def test_fi_strategy_tree_rebalance_nested():
c1 = CouponPayingSecurity('c1')
c2 = CouponPayingSecurity('c2')
s1 = FixedIncomeStrategy('s1', children = [c1, c2])
s2 = FixedIncomeStrategy('s2', children = [c1, c2])
s = FixedIncomeStrategy('s', children = [s1, s2])
p = FixedIncomeStrategy('p', children = [c1, c2])
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
import pandas as pd
import spacy
import os
from glob import glob
import warnings
import sys
import logging
from spacy import displacy
import torch
from scispacy.custom_sentence_segmenter import pysbd_sentencizer
from taxonerd.abbreviation import TaxonomicAbbreviationDetector
from taxonerd.extractor import TextExtractor
class TaxoNERD:
def __init__(
self,
model="en_ner_eco_md",
with_abbrev=False,
with_linking=None,
with_sentence=False,
threshold=0.7,
prefer_gpu=False,
verbose=False,
logger=None,
):
self.logger = logger if logger else logging.getLogger(__name__)
warnings.simplefilter("ignore")
self.verbose = verbose
self.extractor = TextExtractor(logger=self.logger)
if prefer_gpu:
use_cuda = torch.cuda.is_available()
self.logger.info("GPU is available" if use_cuda else "GPU not found")
if use_cuda:
spacy.require_gpu()
self.logger.info("TaxoNERD will use GPU")
self.logger.info("Load model {}".format(model))
self.nlp = spacy.load(model)
self.logger.info(
"Loaded model {}-{}".format(self.nlp.meta["name"], self.nlp.meta["version"])
)
self.with_sentence = with_sentence
if self.with_sentence:
if self.verbose:
logger.info(f"Add pySBDSentencizer to pipeline")
self.nlp.add_pipe("pysbd_sentencizer", before="ner")
self.with_abbrev = with_abbrev
if self.with_abbrev:
if self.verbose:
logger.info(f"Add TaxonomicAbbreviationDetector to pipeline")
self.nlp.add_pipe("taxonomic_abbreviation_detector")
self.with_linking = with_linking != None
if self.with_linking:
kb_name = with_linking if with_linking != "" else "gbif_backbone"
if self.verbose:
logger.info(f"Add EntityLinker {kb_name} to pipeline")
self.create_linker(kb_name, threshold)
def create_linker(self, kb_name, threshold):
from taxonerd.linking.linking_utils import KnowledgeBaseFactory
from taxonerd.linking.candidate_generation import CandidateGenerator
from taxonerd.linking.linking import EntityLinker
self.nlp.add_pipe(
"taxonerd_linker",
config={
"linker_name": kb_name,
"resolve_abbreviations": self.with_abbrev,
"filter_for_definitions": False,
"k": 1,
"threshold": threshold,
},
)
def find_in_corpus(self, input_dir, output_dir=None):
df_map = {}
input_dir = self.extractor(input_dir)
if input_dir:
for filename in glob(os.path.join(input_dir, "*.txt")):
df = self.find_in_file(filename, output_dir)
if df is not None:
df_map[os.path.basename(filename)] = df
return df_map
def find_in_file(self, filename, output_dir=None):
if not os.path.exists(filename):
raise FileNotFoundError("File {} not found".format(path))
filename = self.extractor(filename)
if filename:
self.logger.info("Extract taxa from file {}".format(filename))
with open(filename, "r") as f:
text = f.read()
df = self.find_in_text(text)
if output_dir:
ann_filename = os.path.join(
output_dir,
".".join(os.path.basename(filename).split(".")[:-1]) + ".ann",
)
df.to_csv(ann_filename, sep="\t", header=False)
return ann_filename
return df
return None
def find_in_text(self, text):
doc = self.nlp(text)
# displacy.serve(doc, style="ent")
entities = []
sentences = None
if len(doc.ents) > 0:
if self.with_sentence:
sentences = {sent: id for id, sent in enumerate(doc.sents)}
entities = [
self.get_entity_dict(ent, text, sentences=sentences)
for ent in doc.ents
if (
"\n" not in text[ent.start_char : ent.end_char].strip("\n")
and (ent.label_ in ["LIVB", "TAXON"])
and (ent._.kb_ents if self.with_linking else True)
and ((ent not in doc._.abbreviations) if self.with_abbrev else True)
)
]
# for ent in doc.ents:
# if ent.label_ not in ["LIVB", "TAXON"]:
# raise ValueError(ent.label_)
if self.with_abbrev and len(doc._.abbreviations) > 0:
ents = {ent["text"]: ent for ent in entities}
abbreviations = [
self.get_entity_dict(
abrv,
text,
kb_ents=ents[abrv._.long_form.text]["entity"]
if self.with_linking
else None,
sentences=sentences,
)
for abrv in doc._.abbreviations
if abrv._.long_form
and abrv.text != abrv._.long_form.text
and abrv._.long_form.text in ents
]
entities += abbreviations
# entities += (
# self.get_abbreviated_tax_entity(
# text, entities, doc._.abbreviations, sentences
# )
# if len(doc._.abbreviations) > 0
# else []
# )
df = | pd.DataFrame(entities) | pandas.DataFrame |
# Standard library imports
import os
# Third party imports
import numpy as np
import pandas as pd
# Local application imports
from modules.file_handlers import read_filtrated_datafile, get_data, read_units
from modules.regressors import integralRateRegression
from modules.gof import ssRes, MSE, resAEr, resREr, rSquared
from modules.reaction_models import Model
def data2integralFit(DATA_DIR,OUTPUT_DIR,modelNames,low,high):
# low : lower limit for conversion fraction
# high : upper limit for conversion fraction
# DATA_DIR : directory containing data
# OUTPUT_DIR : output directory
# make output directory
DIR = os.path.join(OUTPUT_DIR,'integral_regression')
if not os.path.exists(DIR):
os.makedirs(DIR)
# get csvs
Csvs = get_data(DATA_DIR)
# filnames
fnames = os.listdir(DATA_DIR)
for indx, Csv in enumerate(Csvs):
# get dataframe
df = | pd.read_csv(Csv) | pandas.read_csv |
"""
Created december 2020 - december 2021
@author: <NAME>, <NAME>, <NAME>
Partly based on the packages 'criticality tool' (Frederique) and 'TRAILS' (Elco)
=======
# -*- coding: utf-8 -*-
"""
import os, sys
sys.path.append(r"P:\osm_flood\network_analysis\igraph\europe_flood_road_disruption\scripts")
import igraph as ig
from preprocess_routes import graph_load
import pandas as pd
import random
from statistics import mean
import time
from pathlib import Path
import pygeos as pyg
from shapely import wkt
import numpy as np
from tqdm import tqdm
import feather
import pickle
import warnings
import json
from utils import load_config
import logging
from Europe_utils import *
# translation between countrycodes (2- and 3-letter and country names)
#Todo: these config loads should be avoided; directly load from europe_utils
config = load_config(file='config.json')
country_codes = config['paths']['data'] / 'country_codes.csv'
warnings.warn("""Still need to fix issue with loading country codes here""")
translate_cntr_codes = pd.read_csv(country_codes, delimiter=';').set_index('code3').to_dict(orient='dict')
# set paths
#input_folder = r"D:\COACCH_paper\data" #TODO: change to config
#output_folder = r"P:\osm_flood\network_analysis\data\main_output\{}"
#output_folder = config['paths']['main_output']
# parameters
AoI_name = 'AoI_RP100y_unique' #Todo: move these settings to config
weighing = 'time' # time or distance #Todo: move these settings to config
#weighing = 'distance'
# import files
def import_graph(the_country, nuts_class='nuts3',config_file='config.json'):
"""
Arguments:
*the_country* (string) : 3-letter code of country name e.g. 'Bel'
*nuts_class* (string) : 'nuts3' or 'nut2'
*config_file* (string) : name of the config file directing to the path, default = config.json
Returns:
**
"""
config = load_config(file=config_file)
networks_europe_path = config['paths']['graphs_folder']
edge_file = [os.path.join(networks_europe_path, f) for f in os.listdir(networks_europe_path) if
f == the_country + '-edges.feather'][0]
# read the network files from Elco Koks, the edges files already contain the flood hazard data
network = | pd.read_feather(edge_file) | pandas.read_feather |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = Series([1])
s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"])
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort():
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat([df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise():
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
s1 = Series({"a": 1, "b": 2}, name=s1name)
s2 = Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
"a",
"b",
]
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_unchanged():
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
result = | pd.concat([df, ser], axis=1) | pandas.concat |
import glob
import json
import os
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from vxbt_calc import vxbt_calc
#from datetime import datetime
capi_data_path = '/path/to/coinapi_csvs'
start_c = pd.to_datetime('2019-05-01 00:00:00')
end_c = pd.to_datetime('2020-05-01 00:00:00')
instrument_start_end = dict()
capi_orderbook_data = dict()
capi_indices_df = pd.DataFrame(columns=['timestamp', 'vxbt', 'gvxbt', 'avxbt'])
results = {}
def read_orderbook_data(csv_paths, expiry, coinapi=False, data_dict=dict()):
if expiry not in data_dict:
data_dict[expiry] = dict()
else:
# Already read
return data_dict
near_next_csv = list()
for path in csv_paths:
near_next_csv += glob.glob(path + f'BTC-{expiry}-*-*.csv')
#if len(near_next_csv) == 0:
# raise ValueError(f'{expiry} data unavailable!')
print(f'Reading {expiry} data from disk...')
for file_path in near_next_csv:
instrument = os.path.basename(file_path).split('-')
exp, strike, cp = instrument[1], int(instrument[2]), instrument[3].split('.')[0]
if strike not in data_dict[exp]:
data_dict[exp][strike] = dict()
try:
df = pd.read_csv(file_path).filter(['timestamp', 'best_bid_price', 'best_ask_price'])
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms', errors='coerce')
df = df.set_index('timestamp').drop_duplicates()
if coinapi:
df.loc[instrument_start_end[exp]['start']] = [np.nan, np.nan]
df.loc[instrument_start_end[exp]['end']] = [np.nan, np.nan]
df = df.sort_index()
df_resampled = df.resample('5min').ffill().fillna(method='bfill')#s.dropna()
else:
df_resampled = df.resample('5min').last().dropna()
except Exception as e:
print('ERROR', file_path, e)
data_dict[exp][strike][cp] = df_resampled
return data_dict
def build_dataframes(time, near_expiry, next_expiry, data_dict):
near_calls = dict()
near_puts = dict()
next_calls = dict()
next_puts = dict()
index_price = index_df[' Price'].loc[time]
for strike in data_dict[near_expiry]:
try:
near_calls[strike] = data_dict[near_expiry][strike]['C'].loc[time].astype(float) * index_price
except KeyError:
pass
try:
near_puts[strike] = data_dict[near_expiry][strike]['P'].loc[time].astype(float) * index_price
except KeyError:
pass
for strike in data_dict[next_expiry]:
try:
next_calls[strike] = data_dict[next_expiry][strike]['C'].loc[time].astype(float) * index_price
except KeyError:
pass
try:
next_puts[strike] = data_dict[next_expiry][strike]['P'].loc[time].astype(float) * index_price
except KeyError:
pass
near_calls_df = pd.DataFrame.from_dict(near_calls, orient='index').sort_index().replace(0, np.nan).rename(columns={'best_bid_price': 'best_bid', 'best_ask_price': 'best_ask'})
near_puts_df = pd.DataFrame.from_dict(near_puts, orient='index').sort_index().replace(0, np.nan).rename(columns={'best_bid_price': 'best_bid', 'best_ask_price': 'best_ask'})
next_calls_df = pd.DataFrame.from_dict(next_calls, orient='index').sort_index().replace(0, np.nan).rename(columns={'best_bid_price': 'best_bid', 'best_ask_price': 'best_ask'})
next_puts_df = pd.DataFrame.from_dict(next_puts, orient='index').sort_index().replace(0, np.nan).rename(columns={'best_bid_price': 'best_bid', 'best_ask_price': 'best_ask'})
return near_calls_df, near_puts_df, next_calls_df, next_puts_df
index_df = | pd.read_csv('/path/to/deribit_btc_usd_index_19-05-01_20-05-31_5min.csv') | pandas.read_csv |
'''recurring_spend.py docstring
Author: <NAME>
'''
import os
import glob
import datetime as dt
import pickle
import numpy as np
import pandas as pd
from google.cloud import bigquery
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import config_google
import config_recurringspend
from gcp_utility import download_table_from_gbq, upload_table_to_gbq
# df = download_table_from_gbq(project_name, dataset_name, table_name)
# upload_table_to_gbq(ndf, dataset_name, table_name)
def return_dataframe_from_sheet(spreadsheet_id, sample_range):
'''docstring for return_dataframe_from_sheet function
# If modifying these scopes, delete the file token.pickle.
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
'''
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=spreadsheet_id,
range=sample_range).execute()
values = result.get('values', [])
df = pd.DataFrame(values)
new_header = df.iloc[0] #grab the first row for the header
df = df[1:] #take the data less the header row
df.columns = new_header #set the header row as the df header
return df
def blank_to_nan(df):
'''docstring for blank_to_nan function
# replace blank elements (of length zero) with numpy nan
'''
data = []
for i in list(df):
temp = []
for j in df[i]:
if len(j) == 0:
temp.append(np.nan)
else:
temp.append(j)
data.append(temp)
return data
def generate_time_series_v1(df, start_col, cost_col):
'''docstring for generate_time_series_v1 function
# generate time series by service_name
'''
ndf = df
df_list = []
for name in ndf['service_name'].unique():
temp = ndf.loc[ndf['service_name'] == name]
dfs = []
for index, row in temp.iterrows():
df = pd.DataFrame(
list(
pd.date_range(row[start_col].date(),
row['end_date'].date())))
df.columns = ['dates']
df['service'] = name
df['cost'] = float(row[cost_col]) / 7
dfs.append(df)
df = pd.concat(dfs)
df = df.sort_values('dates', ascending=True)
df_list.append(df)
df = pd.concat(df_list)
# print(df.shape)
return df
def generate_time_series_v2(df, start_col, cost_col):
'''docstring for generate_time_series_v2 function
# generate time series by service_name
'''
min_start = min(df[start_col])
max_end = max(df['end_date'])
base_df = pd.DataFrame(
list(pd.date_range(min_start.date(), max_end.date())))
base_df.columns = ['dates']
for name in df['service_name'].unique():
temp = df.loc[df['service_name'] == name]
dfs = []
for index, row in temp.iterrows():
ndf = pd.DataFrame(
list(
pd.date_range(row[start_col].date(),
row['end_date'].date())))
ndf.columns = ['dates']
ndf[name] = float(row[cost_col]) / 7
dfs.append(ndf)
if len(dfs) > 1:
for i in range(len(dfs) - 1):
temp = dfs[i].set_index('dates').add(
dfs[i + 1].set_index('dates'), fill_value=0).reset_index()
else:
temp = dfs[0]
base_df = pd.merge(base_df,
temp,
left_on='dates',
right_on='dates',
how='left')
return base_df
def generate_time_series_v3(df, start_col, cost_col):
'''docstring for generate_time_series_v3 function
# generate time series by service_name
'''
min_start = min(df[start_col])
max_end = max(df['end_date'])
base_df = pd.DataFrame(
list(pd.date_range(min_start.date(), max_end.date())))
base_df.columns = ['dates']
for name in df['service_name'].unique():
temp = df.loc[df['service_name'] == name]
dfs = []
for index, row in temp.iterrows():
ndf = pd.DataFrame(
list(
pd.date_range(row[start_col].date(),
row['end_date'].date())))
ndf.columns = ['dates']
ndf[name] = float(row[cost_col])
dfs.append(ndf)
base_df = pd.merge(base_df,
dfs[0],
left_on='dates',
right_on='dates',
how='left')
base_df.fillna(0, inplace=True)
if len(dfs) > 1:
for i in range(1, len(dfs)):
base_df = base_df.set_index('dates').add(
dfs[i].set_index('dates'), fill_value=0).reset_index()
else:
pass
return base_df
def main():
'''docstring for main function'''
spreadsheet_id = config_recurringspend.spreadsheet_id
sample_range = config_recurringspend.sample_range
df = return_dataframe_from_sheet(spreadsheet_id, sample_range)
# clean dataframe and remove irrelevant rows/columns
cols = list(df)
data = blank_to_nan(df)
ndf = pd.DataFrame(data).T
ndf.columns = cols
ndf = ndf[[i for i in list(ndf) if 'relative' not in i]]
ndf.columns = [
i.replace(' ', '_').replace('/', '_').replace('$', 'money').replace(
'[', '').replace(']', '') for i in list(ndf)
]
# key cols
start_col = 'start_date'
cost_col = 'cost_day_money_time'
ndf = ndf.loc[pd.isnull(ndf[cost_col]) == False]
ndf = ndf.loc[ | pd.isnull(ndf[start_col]) | pandas.isnull |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq( | pd.to_datetime(pdf) | pandas.to_datetime |
# Copyright 2019 TsumiNa. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import json
from itertools import zip_longest
from pathlib import Path
import pandas as pd
from pymatgen.ext.matproj import MPRester
from tqdm import tqdm
def _mp_structure(mp_ids, *, api_key=''):
# split requests into fixed number groups
# eg: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
# the following props will be fetched
mp_props = ['structure', 'material_id']
entries = []
mpid_groups = [g for g in grouper(mp_ids, 1000)]
with MPRester(api_key) as mpr:
for group in tqdm(mpid_groups):
mpid_list = [id for id in filter(None, group)]
chunk = mpr.query({"material_id": {"$in": mpid_list}}, mp_props)
entries.extend(chunk)
# entries = [e.as_dict() for e in entries]
df = | pd.DataFrame(entries, index=[e['material_id'] for e in entries]) | pandas.DataFrame |
from operator import methodcaller
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as tm
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.expr.scope import Scope
from ibis.expr.window import get_preceding_value, rows_with_max_lookback
from ibis.udf.vectorized import reduction
from ... import Backend, PandasClient, execute
from ...aggcontext import AggregationContext, window_agg_udf
from ...dispatch import pre_execute
from ...execution.window import get_aggcontext
pytestmark = pytest.mark.pandas
# These custom classes are used inn test_custom_window_udf
class CustomInterval:
def __init__(self, value):
self.value = value
# These are necessary because ibis.expr.window
# will compare preceding and following
# with 0 to see if they are valid
def __lt__(self, other):
return self.value < other
def __gt__(self, other):
return self.value > other
class CustomWindow(ibis.expr.window.Window):
""" This is a dummy custom window that return n preceding rows
where n is defined by CustomInterval.value."""
def _replace(self, **kwds):
new_kwds = {
'group_by': kwds.get('group_by', self._group_by),
'order_by': kwds.get('order_by', self._order_by),
'preceding': kwds.get('preceding', self.preceding),
'following': kwds.get('following', self.following),
'max_lookback': kwds.get('max_lookback', self.max_lookback),
'how': kwds.get('how', self.how),
}
return CustomWindow(**new_kwds)
class CustomAggContext(AggregationContext):
def __init__(
self, parent, group_by, order_by, output_type, max_lookback, preceding
):
super().__init__(
parent=parent,
group_by=group_by,
order_by=order_by,
output_type=output_type,
max_lookback=max_lookback,
)
self.preceding = preceding
def agg(self, grouped_data, function, *args, **kwargs):
upper_indices = pd.Series(range(1, len(self.parent) + 2))
window_sizes = (
grouped_data.rolling(self.preceding.value + 1)
.count()
.reset_index(drop=True)
)
lower_indices = upper_indices - window_sizes
mask = upper_indices.notna()
result_index = grouped_data.obj.index
result = window_agg_udf(
grouped_data,
function,
lower_indices,
upper_indices,
mask,
result_index,
self.dtype,
self.max_lookback,
*args,
**kwargs,
)
return result
@pytest.fixture(scope='session')
def sort_kind():
return 'mergesort'
default = pytest.mark.parametrize('default', [ibis.NA, ibis.literal('a')])
row_offset = pytest.mark.parametrize(
'row_offset', list(map(ibis.literal, [-1, 1, 0]))
)
range_offset = pytest.mark.parametrize(
'range_offset',
[
ibis.interval(days=1),
2 * ibis.interval(days=1),
-2 * ibis.interval(days=1),
],
)
@pytest.fixture
def row_window():
return ibis.window(following=0, order_by='plain_int64')
@pytest.fixture
def range_window():
return ibis.window(following=0, order_by='plain_datetimes_naive')
@pytest.fixture
def custom_window():
return CustomWindow(
preceding=CustomInterval(1),
following=0,
group_by='dup_ints',
order_by='plain_int64',
)
@default
@row_offset
def test_lead(t, df, row_offset, default, row_window):
expr = t.dup_strings.lead(row_offset, default=default).over(row_window)
result = expr.execute()
expected = df.dup_strings.shift(execute(-row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@row_offset
def test_lag(t, df, row_offset, default, row_window):
expr = t.dup_strings.lag(row_offset, default=default).over(row_window)
result = expr.execute()
expected = df.dup_strings.shift(execute(row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@range_offset
def test_lead_delta(t, df, range_offset, default, range_window):
expr = t.dup_strings.lead(range_offset, default=default).over(range_window)
result = expr.execute()
expected = (
df[['plain_datetimes_naive', 'dup_strings']]
.set_index('plain_datetimes_naive')
.squeeze()
.shift(freq=execute(-range_offset))
.reindex(df.plain_datetimes_naive)
.reset_index(drop=True)
)
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@range_offset
def test_lag_delta(t, df, range_offset, default, range_window):
expr = t.dup_strings.lag(range_offset, default=default).over(range_window)
result = expr.execute()
expected = (
df[['plain_datetimes_naive', 'dup_strings']]
.set_index('plain_datetimes_naive')
.squeeze()
.shift(freq=execute(range_offset))
.reindex(df.plain_datetimes_naive)
.reset_index(drop=True)
)
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
def test_first(t, df):
expr = t.dup_strings.first()
result = expr.execute()
assert result == df.dup_strings.iloc[0]
def test_last(t, df):
expr = t.dup_strings.last()
result = expr.execute()
assert result == df.dup_strings.iloc[-1]
def test_group_by_mutate_analytic(t, df):
gb = t.groupby(t.dup_strings)
expr = gb.mutate(
first_value=t.plain_int64.first(),
last_value=t.plain_strings.last(),
avg_broadcast=t.plain_float64 - t.plain_float64.mean(),
delta=(t.plain_int64 - t.plain_int64.lag())
/ (t.plain_float64 - t.plain_float64.lag()),
)
result = expr.execute()
gb = df.groupby('dup_strings')
expected = df.assign(
last_value=gb.plain_strings.transform('last'),
first_value=gb.plain_int64.transform('first'),
avg_broadcast=df.plain_float64 - gb.plain_float64.transform('mean'),
delta=(
(df.plain_int64 - gb.plain_int64.shift(1))
/ (df.plain_float64 - gb.plain_float64.shift(1))
),
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_players(players, players_df):
lagged = players.mutate(pct=lambda t: t.G - t.G.lag())
expected = players_df.assign(
pct=players_df.G - players_df.groupby('playerID').G.shift(1)
)
cols = expected.columns.tolist()
result = lagged.execute()[cols].sort_values(cols).reset_index(drop=True)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# FPL Model -- shortened
import pandas as pd
import requests, json
# requests is a python library that provides useful methods for api requests and webscraping
r = requests.get('https://fantasy.premierleague.com/drf/bootstrap-static')
data = r.json()['elements']
pd.set_option('display.max_columns', 60)
pd.options.display.max_rows = 999
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 12:13:22 2020
@author: <NAME>
ReScale the data
"""
# Import library
import pandas as pd
import numpy as np
import itertools
from sklearn.preprocessing import MinMaxScaler
from sklearn.mixture import GaussianMixture
def rescale (adata, gate=None, return_gates=False, failed_markers=None, method='all'):
'''
Parameters:
data: AnnData object
gate: DataFrame with first column as markers and second column as the gate values in log1p scale
failed_markers: list. list of markers that are not expressed at all in any cell. pass in as ['CD20', 'CD3D']
Returns:
Ann Data object with rescaled data
Example:
adata = rescale (adata, gate=manual_gate, failed_markers=['CD20', 'CD21'])
'''
def rescale_independent (adata, gate=None, return_gates=False, failed_markers=None):
print('Scaling Image '+ str(adata.obs['ImageId'].unique()))
# Copy of the raw data if it exisits
if adata.raw is not None:
adata.X = adata.raw.X
data = pd.DataFrame(adata.X, columns = adata.var.index, index= adata.obs.index)
# Merging the manual gates and non-working markers togeather if any
if gate is not None:
m_markers = list(gate.iloc[:,0])
manual_gate_markers = gate
if failed_markers != None:
manual_gate_markers = pd.DataFrame(data[failed_markers].quantile(0.9999999))
manual_gate_markers['markers'] = failed_markers
# move column to front
cols = manual_gate_markers.columns.tolist()
cols.insert(0, cols.pop(cols.index('markers')))
manual_gate_markers = manual_gate_markers.reindex(columns= cols)
manual_gate_markers.columns = ['marker', 'gate']
m_markers = failed_markers
if gate is not None and failed_markers != None:
m_markers = list(gate.iloc[:,0]) + list(manual_gate_markers.iloc[:,0])
gate.columns = ['marker', 'gate']
manual_gate_markers = pd.concat([gate, manual_gate_markers])
if gate is None and failed_markers == None:
m_markers = []
# Find markers to send to gmm modelling
if gate is not None or failed_markers is not None:
gmm_markers = list(np.setdiff1d(data.columns, m_markers))
else:
gmm_markers = list(data.columns)
# If manual gate is not provided scale the data
if len(gmm_markers) != 0:
gmm_data = data[gmm_markers]
# Clip off the 99th percentile
def clipping (x):
clip = x.clip(lower =np.percentile(x,1), upper=np.percentile(x,99)).tolist()
return clip
# Run the function
gmm_data = gmm_data.apply(clipping)
# Scaling the data
sum_data = gmm_data.sum(axis=1) # Calculate total count for each cell
n_count = gmm_data.div(sum_data, axis=0) # Divide genes by total count for every cell
med = np.median(list(itertools.chain(*gmm_data.values.tolist()))) # Calculate median count of the entire dataset
n_count = n_count*med # Multiply by scaling fator (median count of entire dataset)
n_log = np.log1p(n_count) # Log transform data
scaler = MinMaxScaler(feature_range=(0, 1))
s = scaler.fit_transform(n_log)
normalised_data = pd.DataFrame(s, columns = gmm_data.columns, index= gmm_data.index)
# Gaussian fit to identify the gate for each marker and scale based on the gate
# Empty data frame to hold the results
all_gmm_data = pd.DataFrame()
def gmm_gating (data, marker, return_gates):
# Print
print('Finding the optimal gate for ' + str(marker))
# Identify the marker to fit the model
m = data[marker].values
# Perform GMM
data_gm = m.reshape(-1, 1)
#gmm = GaussianMixture(n_components=2, means_init=[[0],[1]],covariance_type='tied')
gmm = GaussianMixture(n_components=2)
gmm.fit(data_gm)
gate = np.mean(gmm.means_)
# Find the closest value to the gate
absolute_val_array = np.abs(m - gate)
smallest_difference_index = absolute_val_array.argmin()
closest_element = m[smallest_difference_index]
# rescale the data based on the identified gate
marker_study = pd.DataFrame(m, index= data.index)
marker_study = marker_study.sort_values(0)
# Find the index of the gate
gate_index = marker_study.index[marker_study[0] == closest_element][0]
# Split into high and low groups
high = marker_study.loc[gate_index:,:]
low = marker_study.loc[:gate_index,:]
# Prepare for scaling the high and low dataframes
scaler_high = MinMaxScaler(feature_range=(0.5, 1))
scaler_low = MinMaxScaler(feature_range=(0, 0.5))
# Scale it
h = pd.DataFrame(scaler_high.fit_transform(high), index = high.index)
l = pd.DataFrame(scaler_low.fit_transform(low), index = low.index)
# Merge the high and low and resort it
scaled_data = pd.concat([l,h])
scaled_data = scaled_data.loc[~scaled_data.index.duplicated(keep='first')]
scaled_data = scaled_data.reindex(data.index)
#return scaled_data
if return_gates == True:
return gate
else:
return scaled_data
# Apply the function
r_gmm_gating = lambda x: gmm_gating(data=normalised_data, marker=x,return_gates=return_gates) # Create lamda function
all_gmm_data = list(map(r_gmm_gating, gmm_markers)) # Apply function
all_gmm_data = pd.concat(all_gmm_data, axis=1, sort=False)
all_gmm_data.columns = gmm_markers
else:
all_gmm_data = pd.DataFrame()
# Empty data frame to hold the results
all_manual_data = pd.DataFrame()
if len(m_markers) != 0:
m_data = np.log1p(data[m_markers])
# Clip the data
def clipping (x):
clip = x.clip(lower =np.percentile(x,1), upper=np.percentile(x,99)).tolist()
return clip
# Run the function
m_data = m_data.apply(clipping)
def manual_gating (data,marker,gate):
# Print
print('Scaling ' + str(marker))
# Work on processing manual gates
m = gate[gate.iloc[:,0] == marker].iloc[:,1] # gate of the marker passed in
# Find the closest value to the gate
absolute_val_array = np.abs(data[marker].values - float(m))
smallest_difference_index = absolute_val_array.argmin()
closest_element = data[marker].values[smallest_difference_index]
# rescale the data based on the identified gate
marker_study = data[marker]
marker_study = marker_study.sort_values(0)
# Find the index of the gate
gate_index = marker_study.index[marker_study == closest_element][0]
# Split into high and low groups
high = marker_study[gate_index:]
low = marker_study[:gate_index]
# Prepare for scaling the high and low dataframes
scaler_high = MinMaxScaler(feature_range=(0.5, 1))
scaler_low = MinMaxScaler(feature_range=(0, 0.5))
# Scale it
h = pd.DataFrame(scaler_high.fit_transform(high.values.reshape(-1, 1)), index = high.index)
l = pd.DataFrame(scaler_low.fit_transform(low.values.reshape(-1, 1)), index = low.index)
# Merge the high and low and resort it
scaled_data = pd.concat([l,h])
scaled_data = scaled_data.loc[~scaled_data.index.duplicated(keep='first')]
scaled_data = scaled_data.reindex(data.index)
# Return
return scaled_data
# Apply the function
r_manual_gating = lambda x: manual_gating(data=m_data, marker=x, gate=manual_gate_markers) # Create lamda function
all_manual_data = list(map(r_manual_gating, m_markers)) # Apply function
all_manual_data = | pd.concat(all_manual_data, axis=1, sort=False) | pandas.concat |
import logging
import math
import re
from collections import Counter
import numpy as np
import pandas as pd
from certa.utils import diff
def get_original_prediction(r1, r2, predict_fn):
r1r2 = get_row(r1, r2)
return predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
def get_row(r1, r2, lprefix='ltable_', rprefix='rtable_'):
r1_df = pd.DataFrame(data=[r1.values], columns=r1.index)
r2_df = pd.DataFrame(data=[r2.values], columns=r2.index)
r1_df.columns = list(map(lambda col: lprefix + col, r1_df.columns))
r2_df.columns = list(map(lambda col: rprefix + col, r2_df.columns))
r1r2 = pd.concat([r1_df, r2_df], axis=1)
return r1r2
def support_predictions(r1: pd.Series, r2: pd.Series, lsource: pd.DataFrame,
rsource: pd.DataFrame, predict_fn, lprefix, rprefix, num_triangles: int = 100,
class_to_explain: int = None, max_predict: int = -1,
use_w: bool = True, use_q: bool = True):
'''
generate a pd.DataFrame of support predictions to be used to generate open triangles.
:param r1: the "left" record
:param r2: the "right" record
:param lsource: the "left" data source
:param rsource: the "right" data source
:param predict_fn: the ER model prediction function
:param lprefix: the prefix of attributes from the "left" table
:param rprefix: the prefix of attributes from the "right" table
:param num_triangles: number of open triangles to be used to generate the explanation
:param class_to_explain: the class to be explained
:param max_predict: the maximum number of predictions to be performed by the ER model to generate the requested
number of open triangles
:param use_w: whether to use left open triangles
:param use_q: whether to use right open triangles
:return: a pd.DataFrame of record pairs with one record from the original prediction and one record yielding an
opposite prediction by the ER model
'''
r1r2 = get_row(r1, r2)
original_prediction = predict_fn(r1r2)[['nomatch_score', 'match_score']].values[0]
r1r2['id'] = "0@" + str(r1r2[lprefix + 'id'].values[0]) + "#" + "1@" + str(r1r2[rprefix + 'id'].values[0])
copies, copies_left, copies_right = expand_copies(lprefix, lsource, r1, r2, rprefix, rsource)
find_positives, support = get_support(class_to_explain, pd.concat([lsource, copies_left]), max_predict,
original_prediction, predict_fn, r1, r2, pd.concat([rsource, copies_right]),
use_w, use_q, lprefix, rprefix, num_triangles)
if len(support) > 0:
if len(support) > num_triangles:
support = support.sample(n=num_triangles)
else:
logging.warning(f'could find {str(len(support))} triangles of the {str(num_triangles)} requested')
support['label'] = list(map(lambda predictions: int(round(predictions)),
support.match_score.values))
support = support.drop(['match_score', 'nomatch_score'], axis=1)
if class_to_explain == None:
r1r2['label'] = np.argmax(original_prediction)
else:
r1r2['label'] = class_to_explain
support_pairs = pd.concat([r1r2, support], ignore_index=True)
return support_pairs, copies_left, copies_right
else:
logging.warning('no triangles found')
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
def find_candidates_predict(record, source, find_positives, predict_fn, num_candidates, lj=True,
max=-1, lprefix='ltable_', rprefix='rtable_'):
if lj:
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
copy = source.copy()
records.columns = list(map(lambda col: lprefix + col, records.columns))
copy.columns = list(map(lambda col: rprefix + col, copy.columns))
records.index = copy.index
samples = pd.concat([records, copy], axis=1)
else:
copy = source.copy()
records = pd.DataFrame()
records = records.append([record] * len(source), ignore_index=True)
records.index = copy.index
copy.columns = list(map(lambda col: lprefix + col, copy.columns))
records.columns = list(map(lambda col: rprefix + col, records.columns))
samples = pd.concat([copy, records], axis=1)
if max > 0:
samples = samples.sample(frac=1)[:max]
record2text = " ".join([str(val) for k, val in record.to_dict().items() if k not in ['id']])
samples['score'] = samples.T.apply(lambda row: cs(record2text, " ".join(row.astype(str))))
samples = samples.sort_values(by='score', ascending=not find_positives)
samples = samples.drop(['score'], axis=1)
result = pd.DataFrame()
batch = num_candidates * 4
splits = min(10, int(len(samples) / batch))
i = 0
while len(result) < num_candidates and i < splits:
batch_samples = samples[batch * i:batch * (i + 1)]
predicted = predict_fn(batch_samples)
if find_positives:
out = predicted[predicted["match_score"] > 0.5]
else:
out = predicted[predicted["match_score"] < 0.5]
if len(out) > 0:
result = pd.concat([result, out], axis=0)
logging.info(f'{i}:{len(out)},{len(result)}')
i += 1
return result
def generate_subsequences(lsource, rsource, max=-1):
new_records_left_df = pd.DataFrame()
for i in np.arange(len(lsource[:max])):
r = lsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_left_df) + len(lsource)))
if len(nr_df) > 0:
nr_df.columns = lsource.columns
new_records_left_df = pd.concat([new_records_left_df, nr_df])
new_records_right_df = pd.DataFrame()
for i in np.arange(len(rsource[:max])):
r = rsource.iloc[i]
nr_df = pd.DataFrame(generate_modified(r, start_id=len(new_records_right_df) + len(rsource)))
if len(nr_df) > 0:
nr_df.columns = rsource.columns
new_records_right_df = pd.concat([new_records_right_df, nr_df])
return new_records_left_df, new_records_right_df
def get_support(class_to_explain, lsource, max_predict, original_prediction, predict_fn, r1, r2,
rsource, use_w, use_q, lprefix, rprefix, num_triangles):
candidates4r1 = | pd.DataFrame() | pandas.DataFrame |
import fitbit
import gather_keys_oauth2 as Oauth2
import pandas as pd
import datetime
import os.path as osp
import os
# ************ UBC MIST Account *********
# CLIENT_ID ='22DF24'
# CLIENT_SECRET = '7848281e9151008de32698f7dd304c68'
# ************ Hooman's Account *********
CLIENT_ID ='22D68G'
CLIENT_SECRET = '32e28a7e72842298fd5d97ce123104ca'
"""for obtaining Access-token and Refresh-token"""
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
"""Authorization"""
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
yesterday = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y%m%d")) # To avoid updating dates everyday
yesterday2 = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d"))
today = str(datetime.datetime.now().strftime("%Y%m%d"))
# ****************************************************************
# ************* get heart rate data / for Yesterday ***************
# ****************************************************************
heart_rate_data_csv_address = 'Data/Heart/Heart_Rate_Data/'+ yesterday +'.csv'
if not osp.exists(osp.dirname(heart_rate_data_csv_address)):
os.makedirs(osp.dirname(heart_rate_data_csv_address))
if not osp.isfile(heart_rate_data_csv_address):
fit_statsHR = auth2_client.intraday_time_series('activities/heart', base_date=yesterday2, detail_level='1sec') #collects data
#put it in a readable format using Panadas
time_list = []
val_list = []
for i in fit_statsHR['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
heartdf = | pd.DataFrame({'Heart Rate':val_list,'Time':time_list}) | pandas.DataFrame |
import csv
import json
from . import prf, glm, plotting
import lmfit
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import nibabel as nb
import nideconv as nd
import numpy as np
import operator
import os
import pandas as pd
from PIL import ImageColor
from prfpy import stimulus
import random
from scipy import io, stats
import seaborn as sns
from shapely import geometry
import subprocess
import warnings
import yaml
opj = os.path.join
pd.options.mode.chained_assignment = None # disable warning thrown by string2float
warnings.filterwarnings("ignore")
def copy_hdr(source_img,dest_img):
"""copy_hdr
Similar functionality as fslcpgeom but than more rigorious using Nibabel. Copies the ENTIRE header, including affine, quaternion rotations, and dimensions.
Parameters
----------
source_img: str, nibabel.Nifti1Image
source image from which to derive the header information
dest_img: str, nibabel.Nifti1Image
destination image to which to copy the header from <source image> to
Returns
----------
nibabel.Nifti1Image
`source_img` with updated header information
Example
----------
>>> new_img = copy_hdr(img1,img2)
"""
if isinstance(source_img, nb.Nifti1Image):
src_img = source_img
elif isinstance(source_img, str):
src_img = nb.load(source_img)
if isinstance(dest_img, nb.Nifti1Image):
targ_img = dest_img
elif isinstance(dest_img, str):
targ_img = nb.load(dest_img)
new = nb.Nifti1Image(targ_img.get_fdata(), affine=src_img.affine, header=src_img.header)
return new
class color:
# """color
# Add some color to the terminal.
# Example
# ----------
# >>> print("set orientation to " + utils.color.BOLD + utils.color.RED + "SOME TEXT THAT'LL BE IN TED" + utils.color.END)
# """
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def str2operator(ops):
if ops == "and" or ops == "&" or ops == "&&":
return operator.and_
elif ops == "or" or ops == "|" or ops == "||":
return operator.or_
elif ops == "is not" or ops == "!=":
return operator.ne
elif ops == "is" or ops == "==" or ops == "=":
return operator.eq
else:
raise NotImplementedError()
def find_nearest(array, value, return_nr=1):
"""find_nearest
Find the index and value in an array given a value. You can either choose to have 1 item (the `closest`) returned, or the 5 nearest items (`return_nr=5`), or everything you're interested in (`return_nr="all"`)
Parameters
----------
array: numpy.ndarray
array to search in
value: float
value to search for in `array`
return_nr: int, str, optional
number of elements to return after searching for elements in `array` that are close to `value`. Can either be an integer or a string *all*
Returns
----------
int
integer representing the index of the element in `array` closest to `value`.
list
if `return_nr` > 1, a list of indices will be returned
numpy.ndarray
value in `array` at the index closest to `value`
"""
array = np.asarray(array)
if return_nr == 1:
idx = (np.abs(array-value)).argmin()
return idx, array[idx]
else:
try:
idx = (np.abs(array-value))
if return_nr == "all":
idc_list = np.sort(np.where(idx == 0.0)[0])
else:
idc_list = np.sort(np.where(idx == 0.0)[0])[:return_nr]
return idc_list, array[idc_list]
except Exception:
print("Could not perform this operation")
def replace_string(fn, str1, str2, fn_sep='_'):
"""replace_string
Replace a string with another string given a filename
Parameters
----------
fn: str
filename in which we need to replace something
str1: str
string-to-be-replaced
str2: str
string-to-replace-str1-with
fn_sep: str
what type of element can we use to split the filename into chunks that we can replace
Returns
----------
str
filename with replaced substring
"""
split_name = fn.split(os.sep)[-1].split(fn_sep)
idx = [(i, split_name.index(str1))
for i, split_name in enumerate(split_name) if str1 in split_name][0][0]
split_name[idx] = split_name[idx].replace(split_name[idx], str2)
new_filename = fn_sep.join(split_name)
new_filename = opj(os.path.dirname(fn), new_filename)
return new_filename
def convert2unit(v, method="np"):
"""convert vector to unit vector"""
import numpy as np
if method.lower() == "np":
v_hat = v / np.linalg.norm(v)
return v_hat
elif method.lower() == "mesh":
# https://sites.google.com/site/dlampetest/python/calculating-normals-of-a-triangle-mesh-using-numpy
lens = np.sqrt( v[:,0]**2 + v[:,1]**2 + v[:,2]**2 )
v[:,0] /= lens
v[:,1] /= lens
v[:,2] /= lens
return v
def string2float(string_array):
"""string2float
This function converts a array in string representation to a regular float array. This can happen, for instance, when you've stored a numpy array in a pandas dataframe (such is the case with the 'normal' vector). It starts by splitting based on empty spaces, filter these, and convert any remaining elements to floats and returns these in an array.
Parameters
----------
string_array: str
string to be converted to a valid numpy array with float values
Returns
----------
numpy.ndarray
array containing elements in float rather than in string representation
Example
----------
>>> string2float('[ -7.42 -92.97 -15.28]')
array([ -7.42, -92.97, -15.28])
"""
if type(string_array) == str:
new = string_array[1:-1].split(' ')[0:]
new = list(filter(None, new))
new = [float(i) for i in new]
new = np.array(new)
return new
else:
# array is already in non-string format
return string_array
def get_module_nr(key_word):
"""get_module_nr
Fetches the module number from the master script given an input string. It sends a command using sed and grep to the bash command line. Won't work on windows! See `call_bashhelper` for more information (that version is actually more accurate as it allows additions to the `master` usage, while that's hardcoded in this one..)
Parameters
----------
key_word: str
search string of the module your interested in. Should at least match otherwise the function will not find anything. For instance, if we want to know which module the creation of the sinus mask is, we can do:
Example
----------
>>> get_module_nr('sinus')
'12'
"""
cmd = "sed -n \'50,85p\' {master} | grep -A0 \"{key}\" | grep -Eo \"[0-9]{{1,2}}\" | head -n 1".format(
master=opj(os.environ['DIR_SCRIPTS'], 'shell', 'master'), key=key_word)
# print(cmd)
mod = subprocess.getoutput(cmd)
return mod
def bids_fullfile(bids_image):
"""get full path to a BIDS-image filename"""
fullfile = opj(bids_image.dirname, bids_image.filename)
return fullfile
def decode(obj):
"""decode an object"""
if isinstance(obj, bytes):
obj = obj.decode()
return obj
def reverse_sign(x):
"""reverse_sign
Inverts the sign given set of values. Can be either one value or an array of values that need to be inverted
Parameters
----------
x: int,float,list,numpy.ndarray
input that needs inverting, either one value or a list
Returns
----------
the inverse of whatever the input `x` was
Example
----------
>>> # input is integer
>>> x = 5
>>> reverse_sign(x)
-5
>>> # input is array
>>> x = np.array([2, -2340, 2345,123342, 123])
>>> In [6]: reverse_sign(x)
array([-2.00000e+00, 2.34000e+03, -2.34500e+03, -1.23342e+05,-1.23000e+02])
>>> # input is float
>>> x = 5.0
>>> reverse_sign(x)
-5.0
"""
import numpy as np
inverted = ()
if isinstance(x, int) or isinstance(x, float) or isinstance(x, np.float32):
if x > 0:
inverted = -x
else:
inverted = abs(x)
elif isinstance(x, np.ndarray):
for i in x:
if float(i) > 0:
val = -float(i)
else:
val = abs(float(i))
inverted = np.append(inverted, val)
return inverted
def remove_files(path, string, ext=False):
"""remove_files
Remove files from a given path that containg a string as extension (`ext=True`), or at the
start of the file (`ext=False`)
Parameters
----------
path: str
path to the directory from which we need to remove files
string: str
tag for files we need to remove
ext: str, optional
only remove files containing `string` that end with `ext`
"""
files_in_directory = os.listdir(path)
if ext:
filtered_files = [file for file in files_in_directory if file.endswith(string)]
else:
filtered_files = [file for file in files_in_directory if file.startswith(string)]
for file in filtered_files:
path_to_file = os.path.join(path, file)
os.remove(path_to_file)
def match_lists_on(ref_list, search_list, matcher="run"):
"""match_lists_on
Match two list based on a BIDS-specifier such as 'sub', 'run', etc. Can be any key that is extracted using :func:`linescanning.utils.split_bids_components`.
Parameters
----------
ref_list: list
List to use as reference
search_list: list
List to search for items in `ref_list`
matcher: str, optional
BIDS-identifier, by default "run"
Returns
----------
list
new `search_list` filtered for items in `ref_list`
Example
----------
>>> # Let's say I have functional files for 3 runs
>>> func_file
>>> ['sub-003_ses-3_task-SR_run-3_bold.mat',
>>> 'sub-003_ses-3_task-SR_run-4_bold.mat',
>>> 'sub-003_ses-3_task-SR_run-6_bold.mat']
>>> # and anatomical slices for 5 runs
>>> anat_slices
>>> ['sub-003_ses-3_acq-1slice_run-2_T1w.nii.gz',
>>> 'sub-003_ses-3_acq-1slice_run-3_T1w.nii.gz',
>>> 'sub-003_ses-3_acq-1slice_run-4_T1w.nii.gz',
>>> 'sub-003_ses-3_acq-1slice_run-5_T1w.nii.gz',
>>> 'sub-003_ses-3_acq-1slice_run-6_T1w.nii.gz']
>>> # I can then use `match_list_on` to find the anatomical slices corresponding to the functional files
>>> from linescanning import utils
>>> utils.match_lists_on(func_file, anat_slices, matcher='run')
>>> ['sub-003_ses-3_acq-1slice_run-3_T1w.nii.gz',
>>> 'sub-003_ses-3_acq-1slice_run-4_T1w.nii.gz',
>>> 'sub-003_ses-3_acq-1slice_run-6_T1w.nii.gz']
"""
new_list = []
for ii in ref_list:
comps = split_bids_components(ii)
ff = get_file_from_substring(f"{matcher}-{comps[matcher]}", search_list, return_msg="None")
if ff != None:
if ff == search_list:
raise ValueError(f"Output list is equal to input list with identifier '{matcher}'. Please use unique identifier")
new_list.append(ff)
return new_list
def get_file_from_substring(filt, path, return_msg='error', exclude=None):
"""get_file_from_substring
This function returns the file given a path and a substring. Avoids annoying stuff with glob. Now also allows multiple filters to be applied to the list of files in the directory. The idea here is to construct a binary matrix of shape (files_in_directory, nr_of_filters), and test for each filter if it exists in the filename. If all filters are present in a file, then the entire row should be 1. This is what we'll be looking for. If multiple files are found in this manner, a list of paths is returned. If only 1 file was found, the string representing the filepath will be returned.
Parameters
----------
filt: str, list
tag for files we need to select. Now also support a list of multiple filters.
path: str
path to the directory from which we need to remove files
return_msg: str, optional
whether to raise an error (*return_msg='error') or return None (*return_msg=None*). Default = 'error'.
exclude: str, optional:
Specify string to exclude from options. This criteria will be ensued after finding files that conform to `filt` as final filter.
Returns
----------
str
path to the files containing `string`. If no files could be found, `None` is returned
list
list of paths if multiple files were found
Raises
----------
FileNotFoundError
If no files usingn the specified filters could be found
Example
----------
>>> get_file_from_substring("R2", "/path/to/prf")
'/path/to/prf/r2.npy'
>>> get_file_from_substring(['gauss', 'best_vertices'], "path/to/pycortex/sub-xxx")
'/path/to/pycortex/sub-xxx/sub-xxx_model-gauss_desc-best_vertices.csv'
>>> get_file_from_substring(['best_vertices'], "path/to/pycortex/sub-xxx")
['/path/to/pycortex/sub-xxx/sub-xxx_model-gauss_desc-best_vertices.csv',
'/path/to/pycortex/sub-xxx/sub-xxx_model-norm_desc-best_vertices.csv']
"""
input_is_list = False
if isinstance(filt, str):
filt = [filt]
if isinstance(filt, list):
# list and sort all files in the directory
if isinstance(path, str):
files_in_directory = sorted(os.listdir(path))
elif isinstance(path, list):
input_is_list = True
files_in_directory = path.copy()
else:
raise ValueError("Unknown input type; should be string to path or list of files")
# the idea is to create a binary matrix for the files in 'path', loop through the filters, and find the row where all values are 1
filt_array = np.zeros((len(files_in_directory), len(filt)))
for ix,f in enumerate(files_in_directory):
for filt_ix,filt_opt in enumerate(filt):
filt_array[ix,filt_ix] = filt_opt in f
# now we have a binary <number of files x number of filters> array. If all filters were available in a file, the entire row should be 1,
# so we're going to look for those rows
full_match = np.ones(len(filt))
full_match_idc = np.where(np.all(filt_array==full_match,axis=1))[0]
if len(full_match_idc) == 1:
fname = files_in_directory[full_match_idc[0]]
if input_is_list:
return fname
else:
f = opj(path, fname)
if exclude != None:
if exclude not in f:
return opj(path, fname)
else:
if return_msg == "error":
raise FileNotFoundError(f"Could not find file with filters: {filt} and exclusion of [{exclude}] in '{path}'")
else:
return None
else:
return opj(path, fname)
elif len(full_match_idc) > 1:
match_list = []
for match in full_match_idc:
fname = files_in_directory[match]
if input_is_list:
match_list.append(fname)
else:
match_list.append(opj(path, fname))
if exclude != None:
return [f for f in match_list if exclude not in f]
else:
return match_list
# return match_list
else:
if return_msg == "error":
raise FileNotFoundError(f"Could not find file with filters: {filt} in {path}")
else:
return None
def get_bids_file(layout, filter=None):
"""get_bids_file
This search function is more tailored for BIDSified data, and requires a list of BIDS-filenames as per output for `l = BIDSLayout(dir, validate=False)` & `fn = l.get(session='1', datatype='anat')` for instance. From this list the script will look the list of given filters.
Parameters
----------
layout: :abbr:`BIDS (Brain Imaging Data Structure)` layout object
BIDS-layout object obtained with `BIDSLayout`
filter: str, optional
filter for particular strings
Returns
----------
str
filenames meeting the specifications (i.e., existing in `layout` and containing strings specified in `filters`)
Example
----------
>>> layout = BIDSLayout(somedir).get(session='1', datatype='anat')
>>> fn = get_bids_file(layout, filter=['str1', 'str2', 'str3'])
"""
import warnings
warnings.filterwarnings("ignore")
l = []
for i in layout:
if all(f in i for f in filter) == True:
l.append(i)
if len(l) == 1:
return l[0]
else:
return l
def get_matrixfromants(mat, invert=False):
"""get_matrixfromants
This function greps the rotation and translation matrices from the matrix-file create by `antsRegistration`. It basically does the same as on of the ANTs functions, but still..
Parameters
----------
mat: str
string pointing to a *.mat*-file containing the transformation.
invert: bool
Boolean for inverting the matrix (`invert=False`) or not (`invert=True`)
Return
----------
numpy.ndarray
(4,4) array representing the transformation matrix
"""
try:
genaff = io.loadmat(mat)
key = list(genaff.keys())[0]
matrix = np.hstack((genaff[key][0:9].reshape(
3, 3), genaff[key][9:].reshape(3, 1)))
matrix = np.vstack([matrix, [0, 0, 0, 1]])
except:
# assuming I just got a matrix
matrix = np.loadtxt(mat)
if invert == True:
matrix = np.linalg.inv(matrix)
return matrix
def make_chicken_csv(coord, input="ras", output_file=None, vol=0.343):
"""make_chicken_csv
This function creates a .csv-file like the chicken.csv example from ANTs to warp a coordinate using a transformation file. ANTs assumes the input coordinate to be LPS, but this function
can deal with RAS-coordinates too. (see https://github.com/stnava/chicken for the reason of this function's name)
Parameters
----------
coord: np.ndarray
numpy array containing the three coordinates in x,y,z direction
input: str
specify whether your coordinates uses RAS or LPS convention (default is RAS, and will be converted to LPS to create the file)
output_file: str
path-like string pointing to an output file (.csv!)
vol: float
volume of voxels (pixdim_x*pixdim_y*pixdim_z). If you're using the standard 0.7 MP2RAGE, the default vol will be ok
Returns
----------
str
path pointing to the `csv`-file containing the coordinate
Example
----------
>>> make_chicken_csv(np.array([-16.239,-67.23,-2.81]), output_file="sub-001_space-fs_desc-lpi.csv")
"sub-001_space-fs_desc-lpi.csv"
"""
if len(coord) > 3:
coord = coord[:3]
if input.lower() == "ras":
# ras2lps
LPS = np.array([[-1,0,0],
[0,-1,0],
[0,0,1]])
coord = LPS @ coord
# rows = ["x,y,z,t,label,mass,volume,count", f"{coord[0]},{coord[1]},{coord[2]},0,1,1,{vol},1"]
with open(output_file, "w") as target:
writer = csv.writer(target, delimiter=",")
writer.writerow(["x","y","z","t","label","mass","volume","count"])
writer.writerow([coord[0],coord[1],coord[2],0,1,1,vol,1])
return output_file
def read_chicken_csv(chicken_file, return_type="lps"):
"""read_chicken_csv
Function to get at least the coordinates from a csv file used with antsApplyTransformsToPoints. (see https://github.com/stnava/chicken for the reason of this function's name)
Parameters
----------
chicken_file: str
path-like string pointing to an input file (.csv!)
return_type: str
specify the coordinate system that the output should be in
Returns
----------
numpy.ndarray
(3,) array containing the coordinate in `chicken_file`
Example
----------
>>> read_chicken_csv("sub-001_space-fs_desc-lpi.csv")
array([-16.239,-67.23,-2.81])
"""
contents = pd.read_csv(chicken_file)
coord = np.squeeze(contents.iloc[:,0:3].values)
if return_type.lower() == "lps":
return coord
elif return_type.lower() == "ras":
# ras2lps
LPS = np.array([[-1,0,0],
[0,-1,0],
[0,0,1]])
return LPS@coord
def fix_slicetiming(json_dir, TR=1.5, slc=60):
"""fix_slicetiming
Function to fix the slicetiming in json file. Assumes there already is a key called SliceTiming in the json files. You'll only need to specify the directory the json-files are in, the TR, (default = 1.5), and the number of slices (default = 60)
Parameters
----------
json_dir: str
path to folder containing json files
TR: float
repetition time
slc: int
number of slices
Returns
----------
str
updated json-file
Example
----------
>>> fix_slicetiming('path/to/folder/with/json', TR=1.5, slc=60)
"""
op = os.listdir(json_dir)
for ii in op:
if ii.endswith('.json'):
with open(opj(json_dir,ii)) as in_file:
data = json.load(in_file)
data['SliceTiming'] = list(np.tile(np.linspace(0, TR, int(slc/3), endpoint=False), 3))
with open(opj(json_dir,ii), 'w') as out_file:
json.dump(data, out_file, indent=4)
class VertexInfo:
""" VertexInfo
This object reads a .csv file containing relevant information about the angles, vertex position, and normal vector.
Parameters
----------
infofile: str
path to the information file containing `best_vertices` in the filename
subject: str
subject ID as used in `SUBJECTS_DIR`
Returns
----------
attr
sets attributes in the class
"""
def __init__(self, infofile=None, subject=None, hemi="lh"):
self.infofile = infofile
self.data = pd.read_csv(self.infofile, index_col=0)
# try to set the index to hemi. It will throw an error if you want to set the index while there already is an index.
# E.g., initially we will set the index to 'hemi'. If we then later on read in that file again, the index is already
# set
try:
self.data = self.data.set_index('hemi')
except:
pass
if hemi == "lh" or hemi.lower() == "l" or hemi.lower() == "left":
self.hemi = "L"
elif hemi == "rh" or hemi.lower() == "r" or hemi.lower() == "right":
self.hemi = "R"
else:
self.hemi = "both"
if self.hemi == "both":
# check if arrays are in string format
for hemi in ["L", "R"]:
self.data['normal'][hemi] = string2float(self.data['normal'][hemi])
self.data['position'][hemi] = string2float(self.data['position'][hemi])
else:
self.data['normal'][self.hemi] = string2float(self.data['normal'][self.hemi])
self.data['position'][self.hemi] = string2float(self.data['position'][self.hemi])
self.subject = subject
def get(self, keyword, hemi='both'):
"""return values from dataframe given keyword. Can be any column name or 'prf' for pRF-parameters"""
keywords = np.array(self.data.columns)
if keyword == "prf":
if hemi == "both":
return {"lh": [self.data[ii]['L'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']],
"rh": [self.data[ii]['R'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']]}
elif hemi.lower() == "right" or hemi.lower() == "r" or hemi.lower() == "rh":
return [self.data[ii]['R'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']]
elif hemi.lower() == "left" or hemi.lower() == "l" or hemi.lower() == "lh":
return [self.data[ii]['L'] for ii in ['x', 'y', 'size', 'beta', 'baseline', 'r2']]
else:
if keyword not in keywords:
raise ValueError(f"{keyword} does not exist in {keywords}")
if hemi == "both":
return {"lh": self.data[keyword]['L'],
"rh": self.data[keyword]['R']}
elif hemi.lower() == "right" or hemi.lower() == "r" or hemi.lower() == "rh":
return self.data[keyword]['R']
elif hemi.lower() == "left" or hemi.lower() == "l" or hemi.lower() == "lh":
return self.data[keyword]['L']
def make_binary_cm(color):
"""make_binary_cm
This function creates a custom binary colormap using matplotlib based on the RGB code specified. Especially useful if you want to overlay in imshow, for instance. These RGB values will be converted to range between 0-1 so make sure you're specifying the actual RGB-values. I like `https://htmlcolorcodes.com` to look up RGB-values of my desired color. The snippet of code used here comes from https://kbkb-wx-python.blogspot.com/2015/12/python-transparent-colormap.html
Parameters
----------
<color>: tuple, str
either hex-code with (!!) '#' or a tuple consisting of:
* <R> int | red-channel (0-255)
* <G> int | green-channel (0-255)
* <B> int | blue-channel (0-255)
Returns
----------
matplotlib.colors.LinearSegmentedColormap object
colormap to be used with `plt.imshow`
Example
----------
>>> cm = make_binary_cm((232,255,0))
>>> cm
<matplotlib.colors.LinearSegmentedColormap at 0x7f35f7154a30>
>>> cm = make_binary_cm("#D01B47")
>>> cm
>>> <matplotlib.colors.LinearSegmentedColormap at 0x7f35f7154a30>
"""
if isinstance(color, tuple):
(R,G,B) = color
elif isinstance(color, str):
color = ImageColor.getcolor(color, "RGB")
(R,G,B) = color
if R > 1:
R = R/255
if G > 1:
G = G/255
if B > 1:
B = B/255
colors = [(R,G,B,c) for c in np.linspace(0,1,100)]
cmap = mcolors.LinearSegmentedColormap.from_list('mycmap', colors, N=5)
return cmap
def percent_change(ts, ax):
"""convert timeseries to percent signal change via the nilearn method"""
return (ts / np.expand_dims(np.mean(ts, ax), ax) - 1) * 100
def select_from_df(df, expression="run = 1", index=True, indices=None):
if expression == "ribbon":
if isinstance(indices, tuple):
return df.iloc[:,indices[0]:indices[1]]
elif isinstance(indices, list):
return df.iloc[:,indices]
elif isinstance(indices, np.ndarray):
return df.iloc[:,list(indices)]
else:
raise TypeError(f"Unknown type '{type(indices)}' for indices; must be a tuple of 2 values representing a range, or a list/array of indices to select")
else:
# fetch existing indices
idc = list(df.index.names)
if idc[0] != None:
reindex = True
else:
reindex = False
# sometimes throws an error if you're trying to reindex a non-indexed dataframe
try:
df = df.reset_index()
except:
pass
sub_df = df.copy()
if isinstance(expression, str):
expression = [expression]
if isinstance(expression, tuple) or isinstance(expression, list):
expressions = expression[::2]
operators = expression[1::2]
if len(expressions) == 1:
col1,operator1,val1 = expressions[0].split()
ops1 = str2operator(operator1)
if len(val1) == 1:
val1 = int(val1)
sub_df = sub_df.loc[ops1(sub_df[col1], val1)]
if len(expressions) == 2:
col1,operator1,val1 = expressions[0].split()
col2,operator2,val2 = expressions[1].split()
main_ops = str2operator(operators[0])
ops1 = str2operator(operator1)
ops2 = str2operator(operator2)
# check if we should interpret values invididually as integers
if len(val1) == 1:
val1 = int(val1)
if len(val2) == 1:
val2 = int(val2)
sub_df = sub_df.loc[main_ops(ops1(sub_df[col1], val1), ops2(sub_df[col2], val2))]
# first check if we should do indexing
if index != None:
# then check if we actually have something to index
if reindex:
if idc[0] != None:
sub_df = sub_df.set_index(idc)
return sub_df
def split_bids_components(fname):
comp_list = fname.split('_')
comps = {}
ids = ['ses', 'task', 'acq', 'rec', 'sub', 'desc', 'run']
for el in comp_list:
for i in ids:
if i in el:
comp = el.split('-')[-1]
if i == "run":
comp = int(comp)
comps[i] = comp
if len(comps) != 0:
return comps
else:
print(f"Could not find any element of {ids} in {fname}")
def filter_for_nans(array):
"""filter out NaNs from an array"""
if np.isnan(array).any():
return np.nan_to_num(array)
else:
return array
def find_max_val(array):
"""find the index of maximum value given an array"""
return np.where(array == np.amax(array))[0]
def read_fs_reg(dat_file):
"""read_fs_reg
Read a `.dat`-formatted registration file from FreeSurfer
Parameters
----------
dat_file: str
path pointing to the registration file
Returns
----------
nump.ndarray
(4,4) numpy array containing the transformation
"""
with open(dat_file) as f:
d = f.readlines()[4:-1]
return np.array([[float(s) for s in dd.split() if s] for dd in d])
def random_timeseries(intercept, volatility, nr):
"""random_timeseries
Create a random timecourse by multiplying an intercept with a random Gaussian distribution.
Parameters
----------
intercept: float
starting point of timecourse
volatility: float
this factor is multiplied with the Gaussian distribution before multiplied with the intercept
nr: int
length of timecourse
Returns
----------
numpy.ndarray
array of length `nr`
Example
----------
>>> from linescanning import utils
>>> ts = utils.random_timeseries(1.2, 0.5, 100)
Notes
----------
Source: https://stackoverflow.com/questions/67977231/how-to-generate-random-time-series-data-with-noise-in-python-3
"""
time_series = [intercept, ]
for _ in range(nr):
time_series.append(time_series[-1] + intercept * random.gauss(0,1) * volatility)
return np.array(time_series[:-1])
def squeeze_generic(a, axes_to_keep):
"""squeeze_generic
Numpy squeeze implementation keeping <axes_to_keep> dimensions.
Parameters
----------
a: numpy.ndarray
array to be squeezed
axes_to_keep: tuple, range
tuple of axes to keep from original input
Returns
----------
numpy.ndarray
`axes_to_keep` from `a`
Example
----------
>>> a = np.random.rand(3,5,1)
>>> squeeze_generic(a, axes_to_keep=range(2)).shape
(3, 5)
Notes
----------
From: https://stackoverflow.com/questions/57472104/is-it-possible-to-squeeze-all-but-n-dimensions-using-numpy
"""
out_s = [s for i, s in enumerate(a.shape) if i in axes_to_keep or s != 1]
return a.reshape(out_s)
def find_intersection(xx, curve1, curve2):
"""find_intersection
Find the intersection coordinates given two functions using `Shapely`.
Parameters
----------
xx: numpy.ndarray
array describing the x-axis values
curve1: numpy.ndarray
array describing the first curve
curve2: numpy.ndarray
array describing the first curve
Returns
----------
tuple
x,y coordinates where *curve1* and *curve2* intersect
Raises
----------
ValueError
if no intersection coordinates could be found
Example
----------
See [refer to linescanning.prf.SizeResponse.find_stim_sizes]
"""
first_line = geometry.LineString(np.column_stack((xx, curve1)))
second_line = geometry.LineString(np.column_stack((xx, curve2)))
intersection = first_line.intersection(second_line)
try:
x_coord, y_coord = geometry.LineString(intersection).xy[0]
except:
raise ValueError("Could not find intersection between curves..")
return (x_coord, y_coord)
class CollectSubject:
"""CollectSubject
Simple class to fetch pRF-related settings given a subject. Collects the design matrix, settings, and target vertex information. The `ses`-flag decides from which session the pRF-parameters to be used. You can either specify an *analysis_yaml* file containing information about a pRF-analysis, or specify *settings='recent'* to fetch the most recent analysis file in the pRF-directory of the subject. The latter is generally fine if you want information about the stimulus.
Parameters
----------
subject: str
subject ID as used throughout the pipeline
derivatives: str, optional
Derivatives directory, by default None.
cx_dir: str, optional
path to subject-specific pycortex directory
prf_dir: str, optional
subject-specific pRF directory, by default None. `derivatives` will be ignore if this flag is used
ses: int, optional
Source session of pRF-parameters to use, by default 1
analysis_yaml: str, optional
String pointing to an existing file, by default None.
hemi: str, optional
Hemisphere to extract target vertex from, by default "lh"
settings: str, optional
Fetch most recent settings file rather than `analysis_yaml`, by default None.
model: str, optional
This flag can be set to read in a specific 'best_vertex' file as the location parameters sometimes differ between a Gaussian and DN-fit.
Example
----------
>>> from linescanning import utils
>>> subject_info = utils.CollectSubject(subject, derivatives=<path_to_derivatives>, settings='recent', hemi="lh")
"""
def __init__(self, subject, derivatives=None, cx_dir=None, prf_dir=None, ses=1, analysis_yaml=None, hemi="lh", settings=None, model="gauss", correct_screen=True, verbose=True):
self.subject = subject
self.derivatives = derivatives
self.cx_dir = cx_dir
self.prf_dir = prf_dir
self.prf_ses = ses
self.hemi = hemi
self.model = model
self.analysis_yaml = analysis_yaml
self.correct_screen = correct_screen
self.verbose = verbose
if self.hemi == "lh" or self.hemi.lower() == "l" or self.hemi.lower() == "left":
self.hemi_tag = "L"
elif self.hemi == "rh" or self.hemi.lower() == "r" or self.hemi.lower() == "right":
self.hemi_tag = "R"
else:
self.hemi_tag = "both"
# set pRF directory
if self.prf_dir == None:
if derivatives != None:
self.prf_dir = opj(self.derivatives, 'prf', self.subject, f'ses-{self.prf_ses}')
# get design matrix, vertex info, and analysis file
if self.prf_dir != None:
self.design_fn = get_file_from_substring("vis_design.mat", self.prf_dir)
self.design_matrix = io.loadmat(self.design_fn)['stim']
self.func_data_lr = np.load(get_file_from_substring("avg_bold_hemi-LR.npy", self.prf_dir))
self.func_data_l = np.load(get_file_from_substring("avg_bold_hemi-L.npy", self.prf_dir))
self.func_data_r = np.load(get_file_from_substring("avg_bold_hemi-R.npy", self.prf_dir))
# load specific analysis file
if self.analysis_yaml != None:
self.settings = yaml.safe_load(self.analysis_yaml)
with open(self.analysis_yaml) as file:
self.settings = yaml.safe_load(file)
try:
self.gauss_iter_pars = np.load(get_file_from_substring(["model-gauss", "stage-iter", "params"], self.prf_dir))
except:
pass
# load the most recent analysis file. This is fine for screens/stimulus information
if settings == "recent":
self.analysis_yaml = opj(self.prf_dir, sorted([ii for ii in os.listdir(self.prf_dir) if "desc-settings" in ii])[-1])
with open(self.analysis_yaml) as file:
self.settings = yaml.safe_load(file)
if self.cx_dir != None:
self.vert_fn = get_file_from_substring([self.model, "best_vertices.csv"], self.cx_dir)
self.vert_info = VertexInfo(self.vert_fn, subject=self.subject, hemi=self.hemi)
# fetch target vertex parameters
if hasattr(self, "vert_info"):
self.target_params = self.return_prf_params(hemi=self.hemi)
self.target_vertex = self.return_target_vertex(hemi=self.hemi)
# create pRF if settings were specified
if hasattr(self, "settings"):
self.prf_stim = stimulus.PRFStimulus2D(screen_size_cm=self.settings['screen_size_cm'], screen_distance_cm=self.settings['screen_distance_cm'], design_matrix=self.design_matrix,TR=self.settings['TR'])
self.prf_array = prf.make_prf(self.prf_stim, size=self.target_params[2], mu_x=self.target_params[0], mu_y=self.target_params[1])
try:
self.normalization_params_df = pd.read_csv(get_file_from_substring([f"hemi-{self.hemi_tag}", "normalization", "csv"], self.cx_dir), index_col=0)
self.normalization_params = np.load(get_file_from_substring([f"hemi-{self.hemi_tag}", "normalization", "npy"], self.cx_dir))
if self.correct_screen:
self.normalization_params = self.normalization_params*1.08
except:
self.normalization_params_df = None
self.normalization_params = None
if self.prf_dir != None:
self.modelling = prf.pRFmodelFitting(self.func_data_lr,
design_matrix=self.design_matrix,
settings=self.analysis_yaml,
verbose=self.verbose)
if self.model == "gauss":
if hasattr(self, "gauss_iter_pars"):
self.pars = self.gauss_iter_pars.copy()
else:
raise AttributeError("Could not find 'gauss_iter_pars' attribute")
else:
self.pars = self.normalization_params.copy()
self.modelling.load_params(self.pars, model=self.model, stage="iter")
def return_prf_params(self, hemi="lh"):
"""return pRF parameters from :class:`linescanning.utils.VertexInfo`"""
return self.vert_info.get('prf', hemi=hemi)
def return_target_vertex(self, hemi="lh"):
"""return the vertex ID of target vertex"""
return self.vert_info.get('index', hemi=hemi)
def target_prediction_prf(self, xkcd=False, line_width=1, freq_spectrum=None, save_as=None, **kwargs):
_, self.prediction, _ = self.modelling.plot_vox(vox_nr=self.target_vertex,
model=self.model,
stage='iter',
make_figure=True,
xkcd=xkcd,
title='pars',
transpose=True,
line_width=line_width,
freq_spectrum=freq_spectrum,
freq_type="fft",
save_as=save_as,
**kwargs)
class CurveFitter():
"""CurveFitter
Simple class to perform a quick curve fitting procedure on `y_data`. You can either specify your own function with `func`, or select a polynomial of order `order` (currently up until 3rd-order is included). Internally uses `lmfit.Model` to perform the fitting, allowing for access to confidence intervals.
Parameters
----------
y_data: np.ndarray
Data-points to perform fitting on
x: np.ndarray, optional
Array describing the x-axis, by default None. If `None`, we'll take `np.arange` of `y_data.shape[0]`.
func: <function> object, optional
Use custom function describing the behavior of the fit, by default None. If `none`, we'll assume either a linear or polynomial fit (up to 3rd order)
order: str, int, optional
Order of polynomial fit, by default "3rd". Can either be '1st'|1, '2nd'|2, or '3rd'|3
verbose: bool, optional
Print summary of fit, by default True
interpolate: str, optional
Method of interpolation for an upsampled version of the predicted data (default = 1000 samples)
Raises
----------
NotImplementedError
If `func=None` and no valid polynomial order (see above) was specified
Example
----------
>>> # imports
>>> from linescanning import utils
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> # define data points
>>> data = np.array([5.436, 5.467, 5.293, 0.99 , 2.603, 1.902, 2.317])
>>> # create instantiation of CurveFitter
>>> cf = utils.CurveFitter(data, order=3, verbose=False)
>>> # initiate figure with axis to be fed into LazyPlot
>>> fig, axs = plt.subplots(figsize=(8,8))
>>> # plot original data points
>>> axs.plot(cf.x, data, 'o', color="#DE3163", alpha=0.6)
>>> # plot upsampled fit with 95% confidence intervals as shaded error
>>> plotting.LazyPlot(cf.y_pred_upsampled,
>>> xx=cf.x_pred_upsampled,
>>> error=cf.ci_upsampled,
>>> axs=axs,
>>> color="#cccccc",
>>> x_label="x-axis",
>>> y_label="y-axis",
>>> title="Curve-fitting with polynomial (3rd-order)",
>>> set_xlim_zero=False,
>>> sns_trim=True,
>>> line_width=1,
>>> font_size=20)
>>> plt.show()
"""
def __init__(self, y_data, x=None, func=None, order=1, verbose=True, interpolate='linear'):
self.y_data = y_data
self.func = func
self.order = order
self.x = x
self.verbose = verbose
self.interpolate = interpolate
if self.func == None:
self.guess = True
if isinstance(self.order, int):
if self.order == 1:
self.pmodel = lmfit.models.LinearModel()
elif self.order == 2:
self.pmodel = lmfit.models.QuadraticModel()
else:
self.pmodel = lmfit.models.PolynomialModel(order=self.order)
elif isinstance(self.order, str):
if self.order == 'gauss' or self.order == 'gaussian':
self.pmodel = lmfit.models.GaussianModel()
elif self.order == 'exp' or self.order == 'exponential':
self.pmodel = lmfit.models.ExponentialModel()
else:
raise NotImplementedError(f"Model {self.order} is not implemented because I'm lazy..")
else:
self.guess = False
self.pmodel = lmfit.Model(self.func)
if self.x == None:
self.x = np.arange(self.y_data.shape[0])
# self.params = self.pmodel.make_params(a=1, b=1, c=1, d=1)
if self.guess:
self.params = self.pmodel.guess(self.y_data, self.x)
else:
self.params = self.pmodel.make_params(a=1, b=1, c=1, d=1)
self.result = self.pmodel.fit(self.y_data, self.params, x=self.x)
if self.verbose:
print(self.result.fit_report())
# create predictions & confidence intervals that are compatible with LazyPlot
self.y_pred = self.result.best_fit
self.x_pred_upsampled = np.linspace(self.x[0], self.x[-1], 1000)
self.y_pred_upsampled = self.result.eval(x=self.x_pred_upsampled)
self.ci = self.result.eval_uncertainty()
self.ci_upsampled = glm.resample_stim_vector(self.ci, len(self.x_pred_upsampled), interpolate=self.interpolate)
def first_order(x, a, b):
return a * x + b
def second_order(x, a, b, c):
return a * x + b * x**2 + c
def third_order(x, a, b, c, d):
return (a * x) + (b * x**2) + (c * x**3) + d
class NideconvFitter():
"""NideconvFitter
Wrapper class around :class:`nideconv.GroupResponseFitter` to promote reprocudibility, avoid annoyances with pandas indexing, and flexibility when performing multiple deconvolutions in an analysis. Works fluently with :class:`linescanning.dataset.Dataset` and :func:`linescanning.utils.select_from_df`. Because our data format generally involved ~720 voxels, we can specify the range which represents the grey matter ribbon around our target vertex, e.g., `[355,364]`, and select the subset of the main functional dataframe to use as input for this class (see also example).
Main inputs are the dataframe with fMRI-data, the onset timings, followed by specific settings for the deconvolution. Rigde-regression is not yet available as method, because 2D-dataframes aren't supported yet. This is a work-in-progress.
Parameters
----------
func: pd.DataFrame
Dataframe as per the output of :func:`linescanning.dataset.Datasets.fetch_fmri()`, containing the fMRI data indexed on subject, run, and t.
onsets: pd.DataFrame
Dataframe as per the output of :func:`linescanning.dataset.Datasets.fetch_onsets()`, containing the onset timings data indexed on subject, run, and event_type.
TR: float, optional
Repetition time, by default 0.105. Use to calculate the sampling frequency (1/TR)
confounds: pd.DataFrame, optional
Confound dataframe with the same format as `func`, by default None
basis_sets: str, optional
Type of basis sets to use, by default "fourier". Should be 'fourier' or 'fir'.
fit_type: str, optional
Type of minimization strategy to employ, by default "ols". Should be 'ols' or 'ridge' (though the latter isn't implemented properly yet)
n_regressors: int, optional
Number of regressors to use, by default 9
add_intercept: bool, optional
Fit the intercept, by default False
verbose: bool, optional
_description_, by default False
lump_events: bool, optional
If ple are in the onset dataframe, we can lump the events together and consider all onset times as 1 event, by default False
interval: list, optional
Interval to fit the regressors over, by default [0,12]
Example
----------
>>> from linescanning import utils, dataset
>>> func_file
>>> ['sub-003_ses-3_task-SR_run-3_bold.mat',
>>> 'sub-003_ses-3_task-SR_run-4_bold.mat',
>>> 'sub-003_ses-3_task-SR_run-6_bold.mat']
>>> ribbon = [356,363]
>>> window = 19
>>> order = 3
>>>
>>> ## window 5 TR poly 2
>>> data_obj = dataset.Dataset(func_file,
>>> deleted_first_timepoints=50,
>>> deleted_last_timepoints=50,
>>> window_size=window,
>>> high_pass=True,
>>> tsv_file=exp_file,
>>> poly_order=order,
>>> use_bids=True)
>>>
>>> df_func = data_obj.fetch_fmri()
>>> df_onsets = data_obj.fetch_onsets()
>>>
>>> # pick out the voxels representing the GM-ribbon
>>> df_ribbon = utils.select_from_df(df_func, expression='ribbon', indices=ribbon)
>>> nd_fit = utils.NideconvFitter(df_ribbon,
>>> df_onsets,
>>> confounds=None,
>>> basis_sets='fourier',
>>> n_regressors=19,
>>> lump_events=False,
>>> TR=0.105,
>>> interval=[0,12],
>>> add_intercept=True,
>>> verbose=True)
Notes
---------
Several plotting options are available:
* `plot_average_per_event`: for each event, average over the voxels present in the dataframe
* `plot_average_per_voxel`: for each voxel, plot the response to each event
* `plot_hrf_across_depth`: for each voxel, fetch the peak HRF response and fit a 3rd-order polynomial to the points (utilizes :class:`linescanning.utils.CurveFitter`)
See also https://linescanning.readthedocs.io/en/latest/examples/nideconv.html for more details.
"""
def __init__(self, func, onsets, TR=0.105, confounds=None, basis_sets="fourier", fit_type="ols", n_regressors=9, add_intercept=False, verbose=False, lump_events=False, interval=[0,12], **kwargs):
self.func = func
self.onsets = onsets
self.confounds = confounds
self.basis_sets = basis_sets
self.fit_type = fit_type
self.n_regressors = n_regressors
self.add_intercept = add_intercept
self.verbose = verbose
self.lump_events = lump_events
self.TR = TR
self.fs = 1/self.TR
self.interval = interval
if self.lump_events:
self.lumped_onsets = self.onsets.copy().reset_index()
self.lumped_onsets['event_type'] = 'stim'
self.lumped_onsets = self.lumped_onsets.set_index(['subject', 'run', 'event_type'])
self.used_onsets = self.lumped_onsets.copy()
else:
self.used_onsets = self.onsets.copy()
# update kwargs
self.__dict__.update(kwargs)
# get the model
self.define_model()
# specify the events
self.define_events()
# fit
self.fit()
def timecourses_condition(self):
# get the condition-wise timecourses
if self.fit_type == "ols":
self.tc_condition = self.model.get_conditionwise_timecourses()
# get the standard error of mean
self.tc_sem = self.model.get_subjectwise_timecourses().groupby(level=['event type', 'covariate', 'time']).sem()
self.tc_std = self.model.get_subjectwise_timecourses().groupby(level=['event type', 'covariate', 'time']).std()
# rename 'event type' to 'event_type'
tmp = self.tc_std.reset_index().rename(columns={"event type": "event_type"})
self.tc_std = tmp.set_index(['event_type', 'covariate', 'time'])
tmp = self.tc_sem.reset_index().rename(columns={"event type": "event_type"})
self.tc_sem = tmp.set_index(['event_type', 'covariate', 'time'])
# get r2
self.rsq_ = self.model.get_rsq()
elif self.fit_type == "ridge":
# here we need to stitch stuff back together
if not hasattr(self, 'ridge_models'):
raise ValueError("Ridge regression not yet performed")
tc = []
rsq = []
for vox in list(self.ridge_models.keys()):
tc.append(self.ridge_models[vox].get_timecourses())
rsq.append(self.ridge_models[vox].get_rsq())
self.tc_condition = pd.concat(tc, axis=1)
self.rsq_ = | pd.concat(rsq, axis=1) | pandas.concat |
'''
NHL season scraper of single season player stats
Converts the data into a pandas dataframe, and generates /PG and /60 metrics
<NAME>
'''
import pandas as pd
from bs4 import BeautifulSoup as BS
import re
from urllib import request
import json
from googlesearch import search
from datetime import datetime
import numpy as np
#helper function: convert %M:%S into a float of time
#str -> float
def time_convert(timestring):
if ":" in timestring:
time = timestring.split(":")
return int(time[0]) + int(time[1])/60
else:
return int(timestring)
class NHLSeasonScraper():
def __init__(self):
self.player_data = pd.DataFrame()
self.freeagent_data = pd.DataFrame()
#scrape current team rosters (train set) from NHL API
def scraperosters(self,year):
'''
scrapes the rosters, create new dataframe (i.e. overwrites)
'''
year = str(int(year)-1) + year
team_ids = [i for i in range(1,11)] + [i for i in range(12,27)] + [28,29,30,52,53,54,55]
player_list = []
print("1. Scraping rosters...")
for team in team_ids:
#team rosters
url = "https://statsapi.web.nhl.com/api/v1/teams/" + str(team) + "/roster"
html = request.urlopen(url).read()
soup = BS(html,'html.parser')
site_json=json.loads(soup.text)
roster = site_json.get("roster")
#scraping the player stats
for player in roster:
player_dict = {}
if player["position"]["code"] != "G":
try:
url = "https://statsapi.web.nhl.com/api/v1/people/" + str(player['person']['id']) + "/stats?stats=statsSingleSeason&season=" + str(year)
html = request.urlopen(url).read()
soup = BS(html,'html.parser')
site_json=json.loads(soup.text)
player_dict = site_json.get("stats")[0].get('splits')[0].get("stat")
player_dict['id'] = player['person']['id']
player_dict['name'] = player['person']['fullName']
url = "https://statsapi.web.nhl.com/api/v1/people/" + str(player_dict['id'])
html = request.urlopen(url).read()
soup = BS(html,'html.parser')
site_json=json.loads(soup.text)
player_dict['position'] = site_json.get('people')[0].get("primaryPosition").get("abbreviation")
today = datetime.today()
bday = datetime.strptime(site_json.get('people')[0].get('birthDate'), '%Y-%m-%d')
player_dict['age'] = (today - bday).days/365
#scraping cap hits
url = "https://www.capfriendly.com/players/" + player_dict['name'].replace(" ", "-").lower()
html = request.urlopen(url).read()
soup = BS(html,'html.parser')
cap = soup.find("div",attrs={"class":"c"}, text=re.compile("Cap Hit:"))
dollars = int(cap.string[9:].replace("$","").replace(",",""))
millions = round(dollars/1000000,2)
player_dict['cap'] = "$" + str(millions) + "M"
if round(float(player_dict['cap'][1:-1])) == 0:
player_dict['cap'] = "RFA"
player_list.append(player_dict)
except:
pass
else:
pass
#reorder columns
print("2. Feature cleaning/engineering")
df = pd.DataFrame(player_list)
df['C'] = df['position'].apply(lambda k: 1 if k == "C" else 0)
df['LW'] = df['position'].apply(lambda k: 1 if k == "LW" else 0)
df['RW'] = df['position'].apply(lambda k: 1 if k == "RW" else 0)
df['D'] = df['position'].apply(lambda k: 1 if k == "D" else 0)
df['gp'] = df['games']
df['rounded_age'] = df['age'].apply(lambda k: round(np.floor(k)))
df = df[['id','name', 'position', "rounded_age", "games", 'cap'] + [col for col in df.columns if col not in ['id','name','position', "rounded_age","games",'cap']]]
#convert time to floats
df['powerPlayTimeOnIce'] = df['powerPlayTimeOnIce'].apply(lambda x: time_convert(x))
df['timeOnIce'] = df['timeOnIce'].apply(lambda x: time_convert(x))
df['evenTimeOnIce'] = df['evenTimeOnIce'].apply(lambda x: time_convert(x))
df['penaltyMinutes'] = df['penaltyMinutes'].apply(lambda x: time_convert(x))
df['shortHandedTimeOnIce'] = df['shortHandedTimeOnIce'].apply(lambda x: time_convert(x))
df['timeOnIcePerGame'] = df['timeOnIcePerGame'].apply(lambda x: time_convert(x))
df['evenTimeOnIcePerGame'] = df['evenTimeOnIcePerGame'].apply(lambda x: time_convert(x))
df['shortHandedTimeOnIcePerGame'] = df['shortHandedTimeOnIcePerGame'].apply(lambda x: time_convert(x))
df['powerPlayTimeOnIcePerGame'] = df['powerPlayTimeOnIcePerGame'].apply(lambda x: time_convert(x))
#feature engineering - per 60 metrics
df["assists_60"] = df["assists"]/(0.0000001+df['timeOnIce'])*60
df["goals_60"] = df["goals"]/(0.0000001+df['timeOnIce'])*60
df["pim_60"] = df["pim"]/(0.0000001+df['timeOnIce'])*60
df["shots_60"] = df["shots"]/(0.0000001+df['timeOnIce'])*60
df["hits_60"] = df["hits"]/(0.0000001+df['timeOnIce'])*60
df["powerPlayGoals_60"] = df["powerPlayGoals"]/(0.0000001+df['powerPlayTimeOnIce'])*60
df["powerPlayPoints_60"] = df["powerPlayPoints"]/(0.0000001+df['powerPlayTimeOnIce'])*60
df["gameWinningGoals_60"] = df["gameWinningGoals"]/(0.0000001+df['timeOnIce'])*60
df["overTimeGoals_60"] = df["overTimeGoals"]/(0.0000001+df['timeOnIce'])*60
df["shortHandedGoals_60"] = df["shortHandedGoals"]/(0.0000001+df['shortHandedTimeOnIce'])*60
df["shortHandedPoints_60"] = df["shortHandedPoints"]/(0.0000001+df['shortHandedTimeOnIce'])*60
df["blocked_60"] = df["blocked"]/(0.0000001+df['timeOnIce'])*60
df["points_60"] = df["points"]/(0.0000001+df['timeOnIce'])*60
df["shifts_60"] = df["shifts"]/(0.0000001+df['timeOnIce'])*60
self.player_data = df
print("3. Complete!")
#used to get list of viable free agent targets (i.e. test set) from CapFriendly
def scrapeFA(self, year):
#scrape names from capfriendly
year = str(int(year)+1)
print("1. Getting the FAs from Capfriendly...")
url = "https://www.capfriendly.com/browse/free-agents/" + year + "/caphit/all/all/ufa?hide=goalie-stats&limits=gp-10-90"
html = request.urlopen(url).read()
soup = BS(html,'html.parser')
table = soup.find('table')
table_rows = table.find_all("tr")
#get list of names
name_list = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text for tr in td]
if row != [] and row[3] != "G":
name = row[0]
true_name = name.split(". ")[1]
name_list.append(true_name)
#now, use a google search to find player ids as the API for season stats is off
print("2. Obtaining player ids from Google...")
player_id_dict = {}
for name in name_list:
search_list = list(search(name + " nhl", stop=10))
final_url = [url for url in search_list if "www.nhl.com/player" in url][0]
player_id_dict[name] = final_url[-7:]
#now scrape from NHL API
print("3. Scraping FA data from NHL API...")
player_list = []
year_new = str(int(year)-2) + str(int(year)-1)
for player in player_id_dict:
url = "https://statsapi.web.nhl.com/api/v1/people/" + str(player_id_dict[player]) + "/stats?stats=statsSingleSeason&season=" + year_new
html = request.urlopen(url).read()
soup = BS(html,'html.parser')
site_json=json.loads(soup.text)
player_dict = site_json.get("stats")[0].get('splits')[0].get("stat")
player_dict['id'] = str(player_id_dict[player])
player_dict['name'] = player
url = "https://statsapi.web.nhl.com/api/v1/people/" + str(player_id_dict[player])
html = request.urlopen(url).read()
soup = BS(html,'html.parser')
site_json=json.loads(soup.text)
player_dict['position'] = site_json.get('people')[0].get("primaryPosition").get("abbreviation")
today = datetime.today()
bday = datetime.strptime(site_json.get('people')[0].get('birthDate'), '%Y-%m-%d')
player_dict['age'] = (today - bday).days/365
#no cap hit
player_dict['cap'] = "UFA"
player_list.append(player_dict)
#reorder columns
print("4. Feature cleaning/engineering")
df = | pd.DataFrame(player_list) | pandas.DataFrame |
"""This module contains function solely related to the estimation of the model."""
import shutil
import copy
import os
from statsmodels.tools.eval_measures import rmse as get_rmse
from scipy.optimize import minimize
import pandas as pd
import numpy as np
from trempy.shared.shared_auxiliary import get_optimal_compensations
from trempy.shared.shared_auxiliary import dist_class_attributes
from trempy.shared.shared_auxiliary import char_floats
from trempy.config_trempy import PREFERENCE_PARAMETERS
from trempy.config_trempy import NEVER_SWITCHERS
from trempy.custom_exceptions import MaxfunError
from trempy.simulate.simulate import simulate
from trempy.config_trempy import SMALL_FLOAT
from trempy.config_trempy import HUGE_FLOAT
from trempy.shared.clsBase import BaseCls
class StartClass(BaseCls):
"""This class manages all issues about the model estimation."""
def __init__(self, questions, m_optimal_obs, start_fixed,
start_utility_paras, version, **version_specific):
"""Init class."""
self.attr = dict()
self.attr['version'] = version
# Handle version-specific objects
if version in ['scaled_archimedean']:
# assert all(x in version_specific.keys() for x in ['marginals', 'upper'])
self.attr['marginals'] = version_specific['marginals']
self.attr['upper'] = version_specific['upper']
elif version in ['nonstationary']:
pass
# Initialization attributes
self.attr['start_utility_paras'] = start_utility_paras
self.attr['m_optimal_obs'] = m_optimal_obs
self.attr['start_fixed'] = start_fixed
self.attr['questions'] = questions
# Housekeeping attributes
self.attr['f_current'] = HUGE_FLOAT
self.attr['f_start'] = HUGE_FLOAT
self.attr['f_step'] = HUGE_FLOAT
self.attr['num_eval'] = 0
def evaluate(self, x_vals):
"""Evalute. This will be the criterion function."""
if self.attr['num_eval'] > 10:
return HUGE_FLOAT
version = self.attr['version']
if version in ['scaled_archimedean']:
marginals = self.attr['marginals']
upper = self.attr['upper']
version_specific = {'upper': upper, 'marginals': marginals}
elif version in ['nonstationary']:
version_specific = dict()
start_utility_paras = self.attr['start_utility_paras']
m_optimal_obs = self.attr['m_optimal_obs']
start_fixed = self.attr['start_fixed']
questions = self.attr['questions']
# Override non-fixed values in the para_obj with the xvals.
utility_cand = copy.deepcopy(start_utility_paras)
para_obj = utility_cand.attr['para_objs']
j = 0
nparas_econ = start_utility_paras.attr['nparas_econ']
for i in range(nparas_econ):
if start_fixed[i]:
continue
else:
para_obj[i].attr['value'] = x_vals[j]
j += 1
# Update para_obj in utility candidate
utility_cand.attr['para_objs'] = para_obj
m_optimal_cand = get_optimal_compensations(
version=version, paras_obj=utility_cand,
questions=questions, **version_specific)
m_optimal_cand = np.array([m_optimal_cand[q] for q in questions])
# We need to ensure that we only compare values if the mean is not missing.
np_stats = np.tile(np.nan, (len(questions), 2))
for i, _ in enumerate(questions):
np_stats[i, :] = [m_optimal_obs[i], m_optimal_cand[i]]
np_stats = np_stats[~np.isnan(np_stats).any(axis=1)]
fval = np.mean((np_stats[:, 0] - np_stats[:, 1]) ** 2)
# Update class attributes
self.attr['num_eval'] += 1
self._update_evaluation(fval, x_vals)
return fval
def _update_evaluation(self, fval, x_vals):
"""Update all attributes based on the new evaluation and write some information to files."""
self.attr['f_current'] = fval
self.attr['num_eval'] += 1
# Determine special events
is_start = self.attr['num_eval'] == 1
is_step = fval < self.attr['f_step']
# Record information at start
if is_start:
self.attr['x_vals_start'] = x_vals
self.attr['f_start'] = fval
# Record information at step
if is_step:
self.attr['x_vals_step'] = x_vals
self.attr['f_step'] = fval
if self.attr['num_eval'] == 100:
raise MaxfunError
def get_automatic_starting_values(paras_obj, df_obs, questions, version, **version_specific):
"""Update the container for the parameters with the automatic starting values."""
def _adjust_bounds(value, bounds):
"""Adjust the starting values to meet the requirements of the bounds."""
lower, upper = bounds
if value <= bounds[0]:
value = lower + 2 * SMALL_FLOAT
elif value >= bounds[1]:
value = upper - 2 * SMALL_FLOAT
else:
pass
return value
# During testing it might occur that we in fact run an estimation on a dataset that does not
# contain any interior observations for any question. This results in a failure of the
# automatic determination of the starting values and is thus ruled out here from the
# beginning. In that case, we simply use the starting values from the initialization file.
cond = df_obs['Compensation'].isin([NEVER_SWITCHERS])
df_mask = df_obs['Compensation'].mask(cond)
if df_mask.isnull().all():
return paras_obj
# We first get the observed average compensation from the data.
m_optimal_obs = [df_mask.loc[slice(None), q].mean() for q in questions]
m_optimal_obs = np.array(m_optimal_obs)
# Now we gather information about the utility parameters and prepare for the interface to the
# optimization algorithm.
# start_utility_paras = paras_obj.get_values('econ', 'all')[:5
start_paras, start_bounds, start_fixed = [], [], []
for label in PREFERENCE_PARAMETERS[version]:
value, is_fixed, bounds = paras_obj.get_para(label)
start_fixed += [is_fixed]
# Get list of labels that are not fixed
if is_fixed:
continue
start_paras += [value]
start_bounds += [bounds]
# We minimize the squared distance between the observed and theoretical average
# compensations. This is only a valid request if there are any free preference parameters.
if len(start_paras) > 0:
args = [questions, m_optimal_obs, start_fixed, copy.deepcopy(paras_obj), version]
start_obj = StartClass(*args, **version_specific)
try:
minimize(start_obj.evaluate, start_paras, method='L-BFGS-B', bounds=start_bounds)
except MaxfunError:
pass
start_utility = start_obj.get_attr('x_vals_step').tolist()
# We construct the relevant set of free economic starting values.
x_econ_free_start = []
for label in PREFERENCE_PARAMETERS[version] + questions:
value, is_fixed, bounds = paras_obj.get_para(label)
if is_fixed:
continue
else:
if label in PREFERENCE_PARAMETERS[version]:
x_econ_free_start += [_adjust_bounds(start_utility.pop(0), bounds)]
else:
cond = df_obs['Compensation'].isin([NEVER_SWITCHERS])
value = df_obs['Compensation'].mask(cond).loc[slice(None), label].std()
# If there are no individuals observed without truncation for a particular
# question, we start with 0.1.
if pd.isnull(value):
x_econ_free_start += [_adjust_bounds(0.1, bounds)]
else:
x_econ_free_start += [_adjust_bounds(value, bounds)]
paras_obj.set_values('econ', 'free', x_econ_free_start)
return paras_obj
def estimate_cleanup():
"""Ensure that we start the estimation with a clean slate."""
# We remove the directories that contain the simulated choice menus at the start.
for dirname in ['start', 'stop']:
if os.path.exists(dirname):
shutil.rmtree(dirname)
# We remove the information from earlier estimation runs.
for fname in ['est.trempy.info', 'est.trempy.log', '.stop.trempy.scratch']:
if os.path.exists(fname):
os.remove(fname)
def estimate_simulate(which, points, model_obj, df_obs):
"""Allow the user to easily simulate samples at the beginning and the end of the estimation."""
paras_obj, questions, version = \
dist_class_attributes(model_obj, 'paras_obj', 'questions', 'version')
if version in ['scaled_archimedean']:
upper, marginals = dist_class_attributes(model_obj, 'upper', 'marginals')
version_specific = {'upper': upper, 'marginals': marginals}
elif version in ['nonstationary']:
version_specific = dict()
m_optimal = get_optimal_compensations(version, paras_obj, questions, **version_specific)
os.mkdir(which)
os.chdir(which)
sim_model = copy.deepcopy(model_obj)
sim_model.attr['sim_file'] = which
sim_model.update('optim', 'free', points)
sim_model.write_out(which + '.trempy.ini')
simulate(which + '.trempy.ini')
compare_datasets(which, df_obs, questions, m_optimal)
os.chdir('../')
def compare_datasets(which, df_obs, questions, m_optimal):
"""Compare the estimation dataset with a simulated one using the estimated parameter vector."""
df_sim = | pd.read_pickle(which + '.trempy.pkl') | pandas.read_pickle |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = | OrderedDict() | pandas.compat.OrderedDict |
import numpy as np
import pandas as pd
from src.core import print_label
import streamlit as st
from streamlit_autorefresh import st_autorefresh
import os
import src
from datetime import datetime
def main():
PATH = str(os.path.realpath('..')) + '/HopeCheckIn/'
DATA_PATH = PATH + 'data/'
st.set_page_config(layout="wide")
st.title("Hope Anglican Welcome Team Print Station")
# Run the autorefresh about every 2000 milliseconds (2 seconds) and stop
# after it's been refreshed 100 times.
st_autorefresh(interval=2000, limit=1800, key="counter")
df_people = src.load_table_no_cache("s3://hope-bucket/all_people_directory.csv")
df_checked_in = df_people[ | pd.to_datetime(df_people["Checked In"]) | pandas.to_datetime |
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
# ---------- Made by <NAME> @ircam on 11/2015
# ---------- Analyse audio and return soudn features
# ---------- to us this don't forget to include these lines before your script:
# ----------
# ----------
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from pandas import DataFrame
import os
#sdif
import eaSDIF
import sys
from fileio.sdif.FSdifLoadFile import FSdifLoadFile
from six.moves import range
def get_f0_info(f0file) :
"""
load f0 from ascii or SDIF file
return tuple of np.arrays as follows
return f0times, f0harm, f0val
f0harm will be None if input is file is ASCII format
"""
try:
(f0times, f0data) = FSdifLoadFile(f0file)
f0times = np.array(f0times)
dd = np.array(f0data)
if len(dd.shape) > 1 and dd.shape[1] > 2 :
f0harm = dd[:,2]
else:
f0harm = None
f0val = np.array(f0data)[:,0]
except RuntimeError as rr :
print("failed reading "+f0file+" as sdif try reading as txt!")
f0times_data = np.loadtxt(f0file)
f0times = np.array(f0times_data)[:,0]
f0val = np.array(f0times_data)[:,1]
f0harm = None
return f0times, f0harm, f0val
def get_nb_formants(formant_file):
"""
get the number of formants of an sdif file
"""
try:
ent = eaSDIF.Entity()
ent.OpenRead(formant_file)
frame = eaSDIF.Frame()
#take the length of the fifth matrix, sometimes the first ones don't have the good number of formants =P
ent.ReadNextSelectedFrame(frame)
ent.ReadNextSelectedFrame(frame)
ent.ReadNextSelectedFrame(frame)
ent.ReadNextSelectedFrame(frame)
ent.ReadNextSelectedFrame(frame)
try :
mat = frame.GetMatrixWithSig("1RES")
except IndexError :
pass
par_mat = mat.GetMatrixData()
return len(par_mat)
except EOFError :
pass
return 0
def get_formants_info(formant_file):
"""
load formant_file from SDIF file
return an array of panadas data frames with the formants in the sdif file
return: Array of pandas data frames with formants
"""
ts = [] # analysis times
cols_names = ("Frequency", "Amplitude", "Bw", "Saliance")
nb_formants = get_nb_formants(formant_file)
try:
formants = []
for i in range(nb_formants):
formant = | DataFrame(columns=cols_names) | pandas.DataFrame |
# -*- encoding: utf-8 -*-
import os
from collections import defaultdict
from copy import deepcopy
from typing import Union, Any, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from frozendict import frozendict
from sklearn.preprocessing import LabelEncoder
from autoflow.constants import AUXILIARY_FEATURE_GROUPS, NAN_FEATURE_GROUPS, UNIQUE_FEATURE_GROUPS
from autoflow.data_container import DataFrameContainer
from autoflow.data_container import NdArrayContainer
from autoflow.data_container.base import get_container_data
from autoflow.utils.data import is_nan, is_cat, is_highR_nan, to_array, is_highC_cat, is_date, is_text, \
is_target_need_label_encode
from autoflow.utils.dataframe import get_unique_col_name
from autoflow.utils.klass import StrSignatureMixin
from autoflow.utils.logging_ import get_logger
from autoflow.utils.ml_task import MLTask, get_ml_task_from_y
from autoflow.workflow.components.utils import stack_Xs
def pop_if_exists(df: Union[pd.DataFrame, DataFrameContainer], col: str) -> Optional[pd.DataFrame]:
if df is None:
return None
if isinstance(df, DataFrameContainer):
df = df.data
if col in df.columns:
return df.pop(col)
else:
return None
class DataManager(StrSignatureMixin):
'''
DataManager is a Dataset manager to store the pattern of dataset.
'''
def __init__(
self,
resource_manager=None,
X_train: Union[pd.DataFrame, DataFrameContainer, np.ndarray, None, str] = None,
y_train: Union[pd.Series, np.ndarray, None] = None,
X_test: Union[pd.DataFrame, DataFrameContainer, np.ndarray, None, str] = None,
y_test: Union[pd.Series, np.ndarray, None] = None,
dataset_metadata: Dict[str, Any] = frozendict(),
column_descriptions: Dict[str, Union[List[str], str]] = frozendict(),
highR_nan_threshold: float = 0.5,
highC_cat_threshold: int = 4,
consider_ordinal_as_cat=False,
upload_type="fs"
):
'''
Parameters
----------
X_train: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
y_train: :class:`numpy.ndarray`
X_test: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
y_test: :class:`numpy.ndarray`
dataset_metadata: dict
column_descriptions: dict
``column_descriptions`` is a dict, key is ``feature_group``,
value is column (column name) or columns (list of column names).
This is a list of some frequently-used built-in ``feature_group``
* ``id`` - id of this table.
* ``ignore`` - some columns which contains irrelevant information.
* ``target`` - column in the dataset is what your model will learn to predict.
* ``nan`` - Not a Number, a column contain missing values.
* ``num`` - numerical features, such as [1, 2, 3].
* ``cat`` - categorical features, such as ["a", "b", "c"].
* ``num_nan`` - numerical features contains missing values. such as [1, 2, NaN].
* ``cat_nan`` - categorical features contains missing values. such as ["a", "b", NaN].
* ``highR_nan`` - highly ratio NaN. You can find explain in :class:`autoflow.hdl.hdl_constructor.HDL_Constructor`
* ``lowR_nan`` - lowly ratio NaN. You can find explain in :class:`autoflow.hdl.hdl_constructor.HDL_Constructor`
* ``highC_cat`` - highly cardinality ratio categorical. You can find explain in :class:`autoflow.hdl.hdl_constructor.HDL_Constructor`
* ``lowR_cat`` - lowly cardinality ratio categorical. You can find explain in :class:`autoflow.hdl.hdl_constructor.HDL_Constructor`
highR_nan_threshold: float
high ratio NaN threshold, you can find examples and practice in :class:`autoflow.hdl.hdl_constructor.HDL_Constructor`
'''
self.upload_type = upload_type
from autoflow.resource_manager.base import ResourceManager
self.logger = get_logger(self)
if resource_manager is None:
self.logger.warning(
"In DataManager __init__, resource_manager is None, create a default local resource_manager.")
resource_manager = ResourceManager()
self.resource_manager: ResourceManager = resource_manager
self.resource_manager = resource_manager
self.highC_cat_threshold = highC_cat_threshold
self.consider_ordinal_as_cat = consider_ordinal_as_cat
dataset_metadata = dict(dataset_metadata)
self.highR_nan_threshold = highR_nan_threshold
self.dataset_metadata = dataset_metadata
self.column_descriptions = dict(column_descriptions)
# --load data to container---------------------------------
self.X_test, self.input_test_hash = self.parse_data_container("TestSet", X_test, y_test)
# train set 靠后,以train set 的column_descriptions为准
self.X_train, self.input_train_hash = self.parse_data_container("TrainSet", X_train, y_train)
# --migrate column descriptions------------------------------
# if X is dataset_id , remote data_container's column_descriptions will assigned to final_column_descriptions
if self.final_column_descriptions is not None:
self.column_descriptions = deepcopy(self.final_column_descriptions)
# --column descriptions------------------------------
self.parse_column_descriptions()
# 注意,此时feature_groups与columns不是一一匹配的,删除了辅助特征组
# ---check target-----------------------------------------------------
assert "target" in self.column_descriptions
self.target_col_name = self.column_descriptions["target"]
# todo: 测试集预测的情况
# --final column descriptions------------------------------
# 用户定义的 column descriptions 和 remote 下载的column description都不应该包含nan的内容
# update `column2essential_feature_groups` to `final_column_descriptions`
if self.final_column_descriptions is None:
final_column_descriptions = defaultdict(list)
final_column_descriptions.update(self.column_descriptions)
# 先将非唯一的特征组处理为列表
for feat_grp, cols in final_column_descriptions.items():
if feat_grp not in UNIQUE_FEATURE_GROUPS:
if isinstance(cols, str):
final_column_descriptions[feat_grp] = [cols]
# 然后开始更新
for column, essential_feature_group in self.column2feature_groups.items():
if column not in final_column_descriptions[essential_feature_group]:
final_column_descriptions[essential_feature_group].append(column)
self.final_column_descriptions = final_column_descriptions
self.final_column_descriptions = dict(self.final_column_descriptions)
# ---set column descriptions, upload to dataset-----------------------------------------------------
if self.X_train is not None:
self.X_train.set_column_descriptions(self.final_column_descriptions)
self.X_train.upload(self.upload_type)
self.logger.info(f"TrainSet's DataSet ID = {self.X_train.dataset_id}")
if self.X_test is not None:
self.X_test.set_column_descriptions(self.final_column_descriptions)
self.X_test.upload(self.upload_type)
self.logger.info(f"TestSet's DataSet ID = {self.X_test.dataset_id}")
# ---origin hash-----------------------------------------------------
self.train_set_id = self.X_train.get_hash() if self.X_train is not None else ""
self.test_set_id = self.X_test.get_hash() if self.X_test is not None else ""
if self.input_train_hash:
assert self.input_train_hash == self.train_set_id
if self.input_test_hash:
assert self.input_test_hash == self.test_set_id
# ---pop auxiliary columns-----------------------------------------------------
y_train, y_test = self.pop_auxiliary_feature_groups()
# --验证X与X_test的列应该相同
if self.X_test is not None and self.X_train is not None:
assert self.X_train.shape[1] == self.X_test.shape[1]
assert np.all(self.X_train.columns == self.X_test.columns)
# --设置feature_groups--
if self.X_train is not None:
self.X_train.set_feature_groups(self.feature_groups)
if self.X_test is not None:
self.X_test.set_feature_groups(self.feature_groups)
# --设置参数--
y_train = to_array(y_train)
y_test = to_array(y_test)
# encode label
assert y_train is not None, ValueError(f"{self.target_col_name} does not exist!")
self.label_encoder = None
if is_target_need_label_encode(y_train):
self.label_encoder = LabelEncoder()
y_train = self.label_encoder.fit_transform(y_train)
y_test = self.encode_label(y_test)
if y_train is not None:
y_train = NdArrayContainer("TrainLabel", dataset_instance=y_train,
resource_manager=self.resource_manager)
y_train.upload()
if y_test is not None:
y_test = NdArrayContainer("TestLabel", dataset_instance=y_test,
resource_manager=self.resource_manager)
y_test.upload()
self.ml_task: MLTask = get_ml_task_from_y(y_train.data)
self.y_train = y_train
self.y_test = y_test
self.train_label_id = self.y_train.get_hash() if self.y_train is not None else ""
self.test_label_id = self.y_test.get_hash() if self.y_test is not None else ""
if self.X_train is not None:
self.columns = self.X_train.columns
else:
self.columns = self.X_test.columns
# todo: 用户自定义验证集可以通过RandomShuffle 或者mlxtend指定
# fixme: 不支持multilabel
def encode_label(self, y):
if self.label_encoder is not None:
try:
return self.label_encoder.transform(y) if y is not None else None
except Exception as e:
return y
return y
def pop_auxiliary_feature_groups(self):
y_train = pop_if_exists(self.X_train, self.target_col_name)
y_test = pop_if_exists(self.X_test, self.target_col_name)
# --确定id--
if "id" in self.column_descriptions:
id_col = self.column_descriptions["id"] # id 应该只有一列
self.train_id_seq = pop_if_exists(self.X_train, id_col)
self.test_id_seq = pop_if_exists(self.X_test, id_col)
# --确定ignore--
if "ignore" in self.column_descriptions:
ignore_cols = self.column_descriptions["ignore"]
if not isinstance(ignore_cols, (list, tuple)):
ignore_cols = [ignore_cols]
for ignore_col in ignore_cols:
pop_if_exists(self.X_train, ignore_col)
pop_if_exists(self.X_test, ignore_col)
return y_train, y_test
def concat_y(self, X, y):
# if isinstance(y,)
if isinstance(y, (np.ndarray, pd.Series)):
# 添加y为target列并与X合并,更新column_descriptions
y = pd.Series(y)
target_col_name = get_unique_col_name(X.columns, "target")
y = | pd.DataFrame(y, columns=[target_col_name]) | pandas.DataFrame |
# -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# Name: HLK-20C02(4℃)
# Description:
# Author: shichao
# Date: 2019/7/17
#-------------------------------------------------------------------------------
import os
import numpy as np
import pandas as pd
import time
import datetime
# 核心代码,设置显示的最大列、宽等参数,消掉打印不完全中间的省略号
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)
# 读取数据
def read_df(file_name):
file_dir = './raw_data/'
file_path = os.path.join(file_dir, file_name)
file_path = open(file_path)
df_file = pd.read_csv(file_path)
df_file['DateTime'] = pd.to_datetime(df_file['DateTime'])
df_file = df_file.sort_values(by='DateTime')
return df_file
# 对 x 增加特征:date_ymd 年月日、把年月日时分秒转化为秒数utc时间
def add_features(df_file):
date_list = []
for date in list(df_file['DateTime']):
date_str = str(date).split(' ')[0]
date_list.append(date_str)
df_file['date_ymd'] = date_list
time_stamp_list = []
for time_stamp in list(df_file['DateTime']):
time_s = time.mktime(time.strptime(str(time_stamp), '%Y-%m-%d %H:%M:%S'))
# time_s = time.mktime(time.strptime(time_stamp, '%Y/%m/%d %H:%M:%S'))
time_stamp_list.append(time_s)
df_file['time_stamp'] = time_stamp_list
# date_ymdh_list = []
# for time_stamp in list(df_file['DateTime']):
# date_ymdh = str(time_stamp).split(':')[0]
# date_ymdh_list.append(date_ymdh)
# df_file['date_ymdh'] = date_ymdh_list
return df_file
# 画图:补全缺失值后,画图与原图比较
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def draw_plot(dataset_12_plot):
#date_time = 'date_ymd'
temperature = 'Temperature'
date_ymd = 'date_ymd'
plt.figure(1, figsize=(26, 13))
# 获取坐标轴
ax = plt.gca()
#plt.plot(dataset_12_plot[date_time], dataset_12_plot[temperature], 'red', marker='o')
plt.plot(dataset_12_plot[temperature], 'red', marker='o')
for label in ax.get_xticklabels():
# 横轴标签旋转 30°
label.set_rotation(30)
label.set_horizontalalignment('right')
# 显示图例
plt.legend(loc='upper left')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S')) # 设置时间标签显示格式
ax.xaxis.set_major_locator(mdates.HourLocator()) # X轴的间隔为小时
png_dir = './repair_png/'
date_ymd = str(dataset_12_plot[date_ymd][1])
png_path = os.path.join(png_dir, date_ymd + '_' + str(len(dataset_12_plot)) + '.png')
plt.savefig(png_path)
plt.show()
# 对缺失的温度进行插补
def repair_tem(df_data_by_date, sample_frequency):
"""
:param df_data_by_date:
:param sample_frequency: 采样频率
:return:
"""
# 去除重复列,默认所有列无重复记录
#df_data_by_date.duplicated()
df_data_by_date = df_data_by_date.reset_index(drop=True)
term_list_1 = list(df_data_by_date['Temperature'])
term_list_date = list(df_data_by_date['date_ymd'])
n = len(term_list_1)
date_list = []
temp_temp_list = []
# 采样频率
time_n = 3 # 时间间隔, 3 分钟
for i in range(n):
if (i >= 0 and i + 1 <= n - 1):
temp_temp_list.append(term_list_1[i])
date_list.append(term_list_date[i])
# 对中间缺失的温度值进行插补
if (df_data_by_date.loc[i + 1]['time_stamp'] - df_data_by_date.loc[i]['time_stamp'] >= (sample_frequency + time_n) * 60):
#n_temp = int(np.ceil((df_data_by_date.loc[i + 1]['time_stamp'] - df_data_by_date.loc[i]['time_stamp']) / (sample_frequency * 60.0)))
# 四舍五入取整
n_temp = int(((df_data_by_date.loc[i + 1]['time_stamp'] - df_data_by_date.loc[i]['time_stamp']) / (sample_frequency * 60.0)) + 0.5)
for j in range(n_temp - 1):
temp_temp = (df_data_by_date.loc[i + 1]['Temperature'] + df_data_by_date.loc[i]['Temperature']) / 2
temp_temp_list.append(temp_temp)
date_list.append(term_list_date[-1])
temp_temp_list.append(term_list_1[-1])
date_list.append(term_list_date[-1])
# 如果开始连续缺失数量少于 30%, 用均值补齐
df_data_by_date = df_data_by_date.reset_index(drop=True)
#date_ = term_list_date[1]
# 看是否中间补全
if(len(temp_temp_list) < int(24*60/sample_frequency)):
# 开头缺失
continue_list = []
time_s = time.mktime(time.strptime(str(term_list_date[1]), '%Y-%m-%d')) # 当天开始时间 0 时 0 分 0 秒
if(df_data_by_date.loc[0]['time_stamp'] - time_s > (sample_frequency + time_n) * 60):
# 开头缺失
n_temp = int(np.ceil((df_data_by_date.loc[0]['time_stamp'] - time_s) / (sample_frequency * 60.0)))
for j in range(n_temp - 1):
#for j in range(int(24*60/sample_frequency) - len(term_list_1)):
continue_list.append(round(np.mean(term_list_1), 2))
date_list.append(term_list_date[-1])
continue_list.extend(temp_temp_list)
temp_temp_list = continue_list
# 结尾缺失
# 获取下一天的日期
date_end = pd.to_datetime(term_list_date[1]) + datetime.timedelta(days=1)
time_end = time.mktime(time.strptime(str(date_end), '%Y-%m-%d %X'))
if(time_end - df_data_by_date.loc[len(df_data_by_date)-1]['time_stamp'] >= (sample_frequency + time_n) * 60):
# 结尾缺失
for j in range(int(24*60/sample_frequency) - len(term_list_1)):
continue_list.append(round(np.mean(term_list_1), 2))
date_list.append(term_list_date[-1])
temp_temp_list.extend(continue_list)
df_repair = pd.DataFrame()
df_repair['date_ymd'] = date_list
df_repair['Temperature'] = temp_temp_list
return df_repair
# 对温度做分段常数逼近处理,下采样
def constant_appro_low(df_data_by_date_tem):
df_data_by_date_tem = df_data_by_date_tem.reset_index(drop=True)
df_appro = pd.DataFrame()
date_index = pd.date_range(end = '01/01/2019', periods=len(df_data_by_date_tem), freq='D')
temperature = 'Temperature'
date_ymd = 'date_ymd'
df_appro[temperature] = df_data_by_date_tem[temperature]
df_appro.index = date_index
# 下采样,取均值
df_appro_low = | pd.DataFrame() | pandas.DataFrame |
import argparse
import pandas as pd
parser = argparse.ArgumentParser(description="Automated normalization of proteolysis products in PeakView SWATH output")
parser.add_argument("-s",
"-swath",
type=str,
help="Filepath to PeakView peptide output file in csv format", dest="s")
parser.add_argument("-f",
"-fasta",
type=str,
help="Filepath to fasta file containing original sequences of the protein before being digested", dest="f")
parser.add_argument("-o",
"-output",
type=str,
help="Filepath to output", dest="o")
args = parser.parse_args()
input_file = args.s
input_fasta = args.f
output_file = args.o
def fasta_reader(fasta):
df = []
with open(fasta, "rt") as fasta_file:
id = ""
seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if id:
df.append([id[:], seq[:]])
stop = line.find(" ")
id = line[1:stop+1].strip()
seq = ""
else:
seq += line
if id:
df.append([id[:], seq[:]])
return pd.DataFrame(df, columns=["id", "sequence"])
def digest(seq):
tryptic = []
length = len(seq)
current_position = 0
for i in range(length):
if seq[i] in "KR":
tryptic.append([current_position, seq[current_position:i+1], i+1 - current_position])
current_position = i+1
final_seq = seq[current_position:length]
if final_seq[0] != "*":
tryptic.append([current_position, seq[current_position:length-1], length-1-current_position])
if not tryptic:
tryptic.append([current_position, seq[current_position:length - 1], length - 1 - current_position])
return pd.DataFrame(tryptic, columns=["position", "seq", "length"])
if __name__ == "__main__":
df = pd.read_csv(input_file)
fasta = fasta_reader(input_fasta)
not_found = []
df = df[~(df["Peptide"].str.contains("[", regex=False))]
samples = df.columns[5:]
data = []
for i, g in df.groupby("Protein", sort=False):
print(i)
d = fasta[fasta["id"] == i]
print(d)
if d.empty:
print(i)
not_found.append(i)
else:
tryptic = digest(d["sequence"].values[0])
for i2, r in g.iterrows():
tryptic_locate = tryptic[tryptic["seq"].str.contains(r["Peptide"], regex=False)]
if not tryptic_locate.empty:
g.at[i2, "tryptic_seq"] = tryptic_locate["seq"].values[0]
g.at[i2, "tryptic_location"] = tryptic_locate["position"].values[0]
else:
print(r["Peptide"])
if "tryptic_location" in g.columns:
for i3, g2 in g.groupby("tryptic_location"):
total_dict = {k: g2[k].sum() for k in samples}
for i4, r2 in g2.iterrows():
for c in total_dict:
g2.at[i4, c] = g2.at[i4, c]/total_dict[c]
data.append(g2)
else:
print(d["sequence"].values[0])
#columns = [df.columns[0], "tryptic_seq", "tryptic_location"] + [c for c in df.columns[1:]]
#print(columns)
result = pd.concat(data, ignore_index=True)
result = result.astype({"tryptic_location": "int32"})
result = result.set_index(["Protein", "tryptic_seq", "tryptic_location", "Peptide"])
with | pd.ExcelWriter(output_file) | pandas.ExcelWriter |
import pandas as pd
import ray.tune
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.cloud_io import load as pl_load
import json
import pytorch_lightning as pl
import pandas as pd
import sklearn
from ray import tune
import numpy as np
import seaborn
import matplotlib.pyplot as plt
import argparse
import os
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining
from ray.tune.integration.pytorch_lightning import TuneReportCallback
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.optim import SGD, Adam
from torchvision import transforms
import MLmodels as m
from ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback
from ray.tune.suggest.bayesopt import BayesOptSearch
class ATTENTION_VAE(pl.LightningModule):
def __init__(self,
config,
pretrained=True,
resnet_model=152,
image_channels=1,
hidden_dims=[8, 16, 32, 64, 128],
out_image_channels=1,
output_size=4,
fcl_layers=[]):
super(ATTENTION_VAE, self).__init__()
#Lightning Additions
self.criterion = m.SmoothCrossEntropyLoss(smoothing=0.01, reduction='sum')
self.eps = 1.0
self.replicas = 4
self.__dict__.update(locals())
optimizers = {'adam': Adam, 'sgd': SGD}
self.optimizer = optimizers['adam']
# hyperparameters
self.lr = config['lr']
self.batch_size = config['batch_size']
# for importing different versions of the data
self.datatype = config['datatype']
self.dr = config['dr']
kld = 1./self.batch_size
self.train_criterion = m.SymmetricMSE(1.0, 0.3, kld)
if 'B' in self.datatype and '20' not in self.datatype:
self.data_length = 40
else:
self.data_length = 20
self.training_data = None
self.validation_data = None
self.image_channels = image_channels
self.hidden_dims = hidden_dims
self.z_dim = config['z_dim']
self.out_image_channels = out_image_channels
self.encoder = None
self.decoder = None
self.pretrained = pretrained
# self.resnet_model = resnet_model
# if self.resnet_model == 18:
# resnet = models.resnet18(pretrained=self.pretrained)
# elif self.resnet_model == 34:
# resnet = models.resnet34(pretrained=self.pretrained)
# elif self.resnet_model == 50:
# resnet = models.resnet50(pretrained=self.pretrained)
# elif self.resnet_model == 101:
# resnet = models.resnet101(pretrained=self.pretrained)
# elif self.resnet_model == 152:
# resnet = models.resnet152(pretrained=self.pretrained)
self.conv = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=(3, 3), padding=(2, 2)), # 64@96*96
nn.ReLU(inplace=True),
)
self.encoder_block1 = self.encoder_block(
64, 48, (4,4), 1, (1,1))
self.encoder_atten1 = m.Self_Attention(48)
self.encoder_block2 = self.encoder_block(
48, 32, (4,4), 1, (1,1))
self.encoder_atten2 = m.Self_Attention(32)
self.encoder_block3 = self.encoder_block(
32, 16, (5, 15), (1,2), (1,1))
self.encoder_atten3 = m.Self_Attention(16)
# Add extra output channel if we are using Matt's physical constraint
if self.out_image_channels > 1:
self.hidden_dims = [
self.out_image_channels * x for x in self.hidden_dims
]
self.fc1 = self.make_fcn(128, self.z_dim, [128, 112], self.dr)
self.fc2 = self.make_fcn(128, self.z_dim, [128, 112], self.dr)
self.fc3 = self.make_fcn(self.z_dim, 128, [128, 128], self.dr)
self.fcn = self.make_fcn(self.z_dim, output_size, [128, 64], self.dr)
self.decoder_block2 = self.decoder_block(
self.hidden_dims[4], self.hidden_dims[3], (4, 5), (1, 5), 0)
self.decoder_atten2 = m.Self_Attention(self.hidden_dims[3])
self.decoder_block3 = self.decoder_block(
self.hidden_dims[3], self.hidden_dims[2], (1, 2), (1, 2), 0)
self.decoder_atten3 = m.Self_Attention(self.hidden_dims[2])
self.decoder_block4 = self.decoder_block(
self.hidden_dims[2], self.hidden_dims[1], (1, 2), (1, 2), 0)
self.decoder_atten4 = m.Self_Attention(self.hidden_dims[1])
self.decoder_block5 = self.decoder_block(
self.hidden_dims[1], self.out_image_channels, (1, 1), (1, 1), 0)
self.decoder_atten5 = m.Self_Attention(self.hidden_dims[0])
# self.load_weights()
def encoder_block(self, dim1, dim2, kernel_size, stride, padding):
return nn.Sequential(
m.SpectralNorm(
nn.Conv2d(dim1, dim2, kernel_size=kernel_size,
stride=stride, padding=padding)
),
nn.BatchNorm2d(dim2),
nn.LeakyReLU()
)
def decoder_block(self, dim1, dim2, kernel_size, stride, padding, sigmoid=False):
return nn.Sequential(
m.SpectralNorm(
nn.ConvTranspose2d(
dim1, dim2, kernel_size=kernel_size, stride=stride, padding=padding)
),
nn.BatchNorm2d(dim2),
nn.LeakyReLU() if not sigmoid else nn.Sigmoid()
)
def make_fcn(self, input_size, output_size, fcl_layers, dr):
if len(fcl_layers) > 0:
fcn = [
nn.Dropout(dr),
nn.Linear(input_size, fcl_layers[0]),
nn.BatchNorm1d(fcl_layers[0]),
torch.nn.LeakyReLU()
]
if len(fcl_layers) == 1:
fcn.append(nn.Linear(fcl_layers[0], output_size))
else:
for i in range(len(fcl_layers) - 1):
fcn += [
nn.Linear(fcl_layers[i], fcl_layers[i + 1]),
nn.BatchNorm1d(fcl_layers[i + 1]),
torch.nn.LeakyReLU(),
nn.Dropout(dr)
]
fcn.append(nn.Linear(fcl_layers[i + 1], output_size))
else:
fcn = [
nn.Dropout(dr),
nn.Linear(input_size, output_size)
]
if output_size > 1:
fcn.append(torch.nn.LogSoftmax(dim=1))
return nn.Sequential(*fcn)
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
# return torch.normal(mu, std)
esp = torch.randn(*mu.size()).to(std.device)
z = mu + std * esp
return z
def bottleneck(self, h):
mu, logvar = self.fc1(h), self.fc2(h)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
def encode(self, x):
h = self.conv(x)
h = self.encoder_block1(h)
h, att_map1 = self.encoder_atten1(h)
h = self.encoder_block2(h)
h, att_map2 = self.encoder_atten2(h)
h = self.encoder_block3(h)
h, att_map3 = self.encoder_atten3(h)
h = h.view(h.size(0), -1) # flatten
z, mu, logvar = self.bottleneck(h)
return h, z, mu, logvar, [att_map1, att_map2, att_map3]
def decode(self, z):
z = self.fc3(z)
z = z.view(z.size(0), self.hidden_dims[-1], 1, 1) # flatten/reshape
z = self.decoder_block2(z)
z, att_map2 = self.decoder_atten2(z)
z = self.decoder_block3(z)
z, att_map3 = self.decoder_atten3(z)
z = self.decoder_block4(z)
z, att_map4 = self.decoder_atten4(z)
z = self.decoder_block5(z)
return z, [att_map2, att_map3, att_map4]
def forward(self, x):
h, z, mu, logvar, encoder_att = self.encode(x)
out = self.fcn(z)
z, decoder_att = self.decode(z)
return out, z, mu, logvar
# def load_weights(self):
# # Load weights if supplied
# if os.path.isfile(self.weights):
# # Load the pretrained weights
# model_dict = torch.load(
# self.weights,
# map_location=lambda storage, loc: storage
# )
# self.load_state_dict(model_dict["model_state_dict"])
# return
#
# for p in self.parameters():
# if p.dim() > 1:
# nn.init.xavier_uniform_(p)
#
# return
#Lightning Methods
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.lr)
def prepare_data(self):
# import our data
train, validate, weights = m.get_rawdata(self.datatype, 10, 5, round=8)
_train = train.copy()
_validate = validate.copy()
# Assigns labels for learning
_train["binary"] = _train["affinity"].apply(m.bi_labelM)
#print(_train[_train["binary"] == 1].count())
#print(_train[_train["binary"] == 0].count())
_validate["binary"] = _validate["affinity"].apply(m.bi_labelM)
#print(_validate[_validate["binary"] == 1].count())
#print(_validate[_validate["binary"] == 0].count())
_weights = torch.FloatTensor(weights)
# instantiate loss criterion, need weights so put this here
self.criterion = m.SmoothCrossEntropyLoss(weight=_weights, smoothing=0.01, reduction='sum')
self.training_data = _train
self.validation_data = _validate
def train_dataloader(self):
# Data Loading
train_reader = m.NAReader(self.training_data, shuffle=True)
train_loader = torch.utils.data.DataLoader(
train_reader,
batch_size=self.batch_size,
# batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=True
)
return train_loader
def training_step(self, batch, batch_idx):
seq, x, y = batch
# get output from the model, given the inputs
predictions, xp, mu, logvar = self(x)
xpp = torch.where(xp > 0.5, 1.0, 0.0)
recon_acc = (x == xpp).float().mean()
seq_acc = recon_acc.item()
loss = self.criterion(predictions, y)
vae_loss, bce, kld = self.train_criterion(x, xpp, mu, logvar)
_epoch = self.current_epoch+1 # lightning module member
_eps = self.eps / (1 + 0.06 * _epoch)
train_loss = (1 - _eps) * loss + _eps * vae_loss
# Convert to labels
preds = torch.argmax(predictions, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
train_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/train_loss", train_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/train_accuracy", train_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/train_seq_accuracy", seq_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return train_loss
def val_dataloader(self):
# Data Loading
# train_reader = m.NAContrast(_train, n=n, shuffle=True)
val_reader = m.NAReader(self.validation_data, shuffle=False)
val_loader = torch.utils.data.DataLoader(
val_reader,
batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=False
)
return val_loader
def validation_step(self, batch, batch_idx):
seq, x, y = batch
seq_aves = []
pred_aves = []
for _ in range(self.replicas):
predictions, xp, mu, logvar = self(x)
seq_aves.append(xp)
pred_aves.append(predictions)
predictions = torch.mean(torch.stack(pred_aves, dim=0), dim=0)
xp = torch.mean(torch.stack(seq_aves, dim=0), dim=0)
xpp = torch.where(xp > 0.5, 1.0, 0.0)
recon_acc = (x == xpp).float().mean()
seq_acc = recon_acc.item()
# get loss for the predicted output
val_loss = torch.nn.CrossEntropyLoss(reduction='sum')(predictions, y)
vae_loss, bce, kld = self.train_criterion(x, xp, mu, logvar)
# Convert to labels
preds = torch.argmax(predictions, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
val_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/val_loss", val_loss, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/val_accuracy", val_acc, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/val_seq_accuracy", seq_acc, on_epoch=True, prog_bar=True, logger=True)
return {"val_loss": val_loss, "val_acc": val_acc}
### Train the model
def train_vae(config, checkpoint_dir=None, num_epochs=10, num_gpus=0):
trainer = pl.Trainer(
# default_root_dir="~/ray_results/",
max_epochs=num_epochs,
# If fractional GPUs passed in, convert to int.
gpus=num_gpus,
logger=TensorBoardLogger(
save_dir=tune.get_trial_dir(), name="", version="."),
progress_bar_refresh_rate=0,
callbacks=[
TuneReportCheckpointCallback(
metrics={
"loss": "ptl/val_loss",
"acc": "ptl/val_accuracy",
"train_seq_acc": "ptl/train_seq_accuracy",
"val_seq_acc": "ptl/val_seq_accuracy"
},
filename="checkpoint",
on="validation_end")
]
)
if checkpoint_dir:
# Workaround:
ckpt = pl_load(
os.path.join(checkpoint_dir, "checkpoint"),
map_location=lambda storage, loc: storage)
model = ATTENTION_VAE._load_model_state(
ckpt, config=config)
trainer.current_epoch = ckpt["epoch"]
else:
model = ATTENTION_VAE(config, True, 152, image_channels=1,
hidden_dims=[128, 128, 128, 128, 128], out_image_channels=1, output_size=2, fcl_layers=[])
trainer.fit(model)
def tune_asha(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.loguniform(1e-5, 1e-3),
"batch_size": tune.choice([32, 64, 128]),
"dr": tune.loguniform(0.005, 0.5),
"z_dim": tune.choice([10, 100, 200]),
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration", "train_seq_accuracy", "val_seq_accuracy"])
analysis = tune.run(
tune.with_parameters(
train_vae,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
config=config,
num_samples=num_samples,
local_dir="./ray_results/",
scheduler=scheduler,
progress_reporter=reporter,
name="tune_vae_asha")
print("Best hyperparameters found were: ", analysis.best_config)
# analysis.to_csv('~/ray_results/' + config['datatype'])
def tune_asha_search(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.uniform(1e-5, 1e-3),
"batch_size": 64,
"dr": tune.uniform(0.005, 0.5),
"z_dim": 100,
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration", "train_seq_accuracy", "val_seq_accuracy"])
bayesopt = BayesOptSearch(metric="mean_acc", mode="max")
analysis = tune.run(
tune.with_parameters(
train_vae,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
search_alg=bayesopt,
scheduler=scheduler,
progress_reporter=reporter,
name="tune_vae_bayopt")
print("Best hyperparameters found were: ", analysis.best_config)
class CustomStopper(tune.Stopper):
def __init__(self):
self.should_stop = False
def __call__(self, trial_id, result):
max_iter = 100
if not self.should_stop and result["acc"] > 0.96:
self.should_stop = True
return self.should_stop or result["training_iteration"] >= max_iter
def stop_all(self):
return self.should_stop
def pbt_vae(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.uniform(1e-5, 1e-3),
"batch_size": 64,
"dr": tune.uniform(0.005, 0.5),
"z_dim": 100,
"datatype": datatype
}
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=5,
hyperparam_mutations={
# distribution for resampling
"lr": lambda: np.random.uniform(0.00001, 0.001),
"dr": lambda: np.random.uniform(0.005, 0.5),
# allow perturbations within this set of categorical values
# "momentum": [0.8, 0.9, 0.99],
})
reporter = CLIReporter(
parameter_columns=["lr", "dr"],
metric_columns=["loss", "acc", "training_iteration"])
stopper = CustomStopper()
analysis = tune.run(
tune.with_parameters(
train_vae,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
name="tune_vae_pbt",
scheduler=scheduler,
progress_reporter=reporter,
verbose=1,
stop=stopper,
# export_formats=[ExportFormat.MODEL],
checkpoint_score_attr="acc",
keep_checkpoints_num=4)
print("Best hyperparameters found were: ", analysis.best_config)
def exp_results_check(checkpoint_path, result_path, title):
# example
# checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'
checkpoint_file = checkpoint_path
param_file = open(result_path, 'r')
check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
resultjsons = param_file.read().split('\n')
results = json.loads(resultjsons[check_epoch+1])
params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
z_dim = params['z_dim']
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'z_dim': z_dim, 'datatype': datatype}
model = ATTENTION_VAE(con, True, 152, image_channels=1, hidden_dims=[128, 128, 128, 128, 128], out_image_channels=1, output_size=2, fcl_layers=[])
checkpoint = torch.load(checkpoint_file)
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_set = m.test_set_corr
verdict = {'sequence':list(test_set.keys()), 'binary':list(test_set.values())}
_verification = | pd.DataFrame(verdict) | pandas.DataFrame |
import math
import pandas as pd
import geopandas as gpd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.sources.census.etl_utils import (
check_census_data_source,
)
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class GeoScoreETL(ExtractTransformLoad):
"""
A class used to generate per state and national GeoJson files with the score baked in
"""
def __init__(self, data_source: str = None):
self.SCORE_GEOJSON_PATH = self.DATA_PATH / "score" / "geojson"
self.SCORE_LOW_GEOJSON = self.SCORE_GEOJSON_PATH / "usa-low.json"
self.SCORE_HIGH_GEOJSON = self.SCORE_GEOJSON_PATH / "usa-high.json"
self.SCORE_CSV_PATH = self.DATA_PATH / "score" / "csv"
self.TILE_SCORE_CSV = self.SCORE_CSV_PATH / "tiles" / "usa.csv"
self.DATA_SOURCE = data_source
self.CENSUS_USA_GEOJSON = (
self.DATA_PATH / "census" / "geojson" / "us.json"
)
self.TARGET_SCORE_NAME = "Score G"
self.TARGET_SCORE_RENAME_TO = "G_SCORE"
self.NUMBER_OF_BUCKETS = 10
self.geojson_usa_df: gpd.GeoDataFrame
self.score_usa_df: pd.DataFrame
self.geojson_score_usa_high: gpd.GeoDataFrame
self.geojson_score_usa_low: gpd.GeoDataFrame
def extract(self) -> None:
# check census data
check_census_data_source(
census_data_path=self.DATA_PATH / "census",
census_data_source=self.DATA_SOURCE,
)
logger.info("Reading US GeoJSON (~6 minutes)")
self.geojson_usa_df = gpd.read_file(
self.CENSUS_USA_GEOJSON,
dtype={"GEOID10": "string"},
usecols=["GEOID10", "geometry"],
low_memory=False,
)
self.geojson_usa_df.head()
logger.info("Reading score CSV")
self.score_usa_df = pd.read_csv(
self.TILE_SCORE_CSV,
dtype={"GEOID10": "string"},
low_memory=False,
)
def transform(self) -> None:
logger.info("Pruning Census GeoJSON")
fields = ["GEOID10", "geometry"]
self.geojson_usa_df = self.geojson_usa_df[fields]
logger.info("Merging and compressing score CSV with USA GeoJSON")
self.geojson_score_usa_high = self.score_usa_df.merge(
self.geojson_usa_df, on="GEOID10", how="left"
)
self.geojson_score_usa_high = gpd.GeoDataFrame(
self.geojson_score_usa_high, crs="EPSG:4326"
)
usa_simplified = self.geojson_score_usa_high[
["GEOID10", self.TARGET_SCORE_NAME, "geometry"]
].reset_index(drop=True)
usa_simplified.rename(
columns={self.TARGET_SCORE_NAME: self.TARGET_SCORE_RENAME_TO},
inplace=True,
)
logger.info("Aggregating into tracts (~5 minutes)")
usa_tracts = self._aggregate_to_tracts(usa_simplified)
usa_tracts = gpd.GeoDataFrame(
usa_tracts,
columns=[self.TARGET_SCORE_RENAME_TO, "geometry"],
crs="EPSG:4326",
)
logger.info("Creating buckets from tracts")
usa_bucketed = self._create_buckets_from_tracts(
usa_tracts, self.NUMBER_OF_BUCKETS
)
logger.info("Aggregating buckets")
usa_aggregated = self._aggregate_buckets(usa_bucketed, agg_func="mean")
compressed = self._breakup_multipolygons(
usa_aggregated, self.NUMBER_OF_BUCKETS
)
self.geojson_score_usa_low = gpd.GeoDataFrame(
compressed,
columns=[self.TARGET_SCORE_RENAME_TO, "geometry"],
crs="EPSG:4326",
)
# round to 2 decimals
decimals = | pd.Series([2], index=[self.TARGET_SCORE_RENAME_TO]) | pandas.Series |
#%%
import pandas as pd
import tqdm
#%% Load the schmidt data to set up a dictionary of cog letter to sub and super
#class
schmidt_data = | pd.read_csv('../../../data/schmidt2016_longform.csv') | pandas.read_csv |
# Omid55
from __future__ import print_function
import numpy as np
import pandas as pd
import math
import seaborn as sns
import sklearn as sk
import matplotlib.pyplot as plt
from time import time
#% matplotlib inline
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import AgglomerativeClustering
from sklearn import manifold
from sklearn.neighbors import KNeighborsClassifier
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import SGDClassifier
from sklearn.dummy import DummyClassifier
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def visualize_hierarchical_clusters(data, NUMBER_OF_CLUSTERS, metric='cosine'):
X = np.asmatrix(data.ix[:,:-1])
y = np.asanyarray(data['label'])
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
if metric == 'euclidean':
linkages = ['ward', 'average', 'complete']
else:
linkages = ['average', 'complete']
for linkage in linkages:
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=NUMBER_OF_CLUSTERS)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
def normalize_the_dataset(dataset):
# MIN MAX Normalization
#x = dataset.values #returns a numpy array
#min_max_scaler = preprocessing.MinMaxScaler()
#x_scaled = min_max_scaler.fit_transform(x)
#dataset = pd.DataFrame(x_scaled)
# Standard Normalization (x-mean(x) / std(x))
x = dataset.values[:,:-1] #returns a numpy array
min_max_scaler = StandardScaler()
x_scaled = min_max_scaler.fit_transform(x)
dataset = pd.DataFrame(np.column_stack((x_scaled,dataset.values[:,-1])), columns=dataset.columns)
return dataset
def plot_data(data, has_label=True):
if not has_label:
data = data.copy()
data['label'] = np.zeros([len(data),1])
LIMIT = 4000
if data.shape[0] > LIMIT:
dt = data.sample(n=LIMIT, replace=False)
X = dt.ix[:,:-1]
labels = dt.ix[:,-1]
else:
X = data.ix[:,:-1]
labels = data.ix[:,-1]
tsne_model = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
points1 = tsne_model.fit_transform(X)
df1 = pd.DataFrame(data=np.column_stack([points1,labels]), columns=["x","y","class"])
sns.lmplot("x", "y", data=df1, hue='class', fit_reg=False, palette=sns.color_palette('colorblind'))
sns.plt.title('T-SNE')
pca = PCA(n_components=2)
pca.fit(X)
points2 = pca.transform(X)
df2 = pd.DataFrame(data=np.column_stack([points2,labels]), columns=["x","y","class"])
sns.lmplot("x", "y", data=df2, hue='class', fit_reg=False, palette=sns.color_palette('colorblind'))
sns.plt.title('PCA')
def dropnans_from_dataset(dataset):
to_be_deleted = []
for idx,item in enumerate(dataset.as_matrix()):
if np.isnan(item).any():
to_be_deleted.append(idx)
dataset = dataset.drop(to_be_deleted)
return dataset
def do_classification(dataset):
if type(dataset) is pd.core.frame.DataFrame:
dataset = dataset.as_matrix()
#clf = MLPClassifier(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
models = [#(DummyClassifier(strategy='stratified'), 'Dummy (Stratified)'),
(DummyClassifier(strategy='uniform'), 'Dummy (Uniform)'),
(SGDClassifier(), 'Stochastic Gradient Descent Classifier'),
(LogisticRegression(), 'Logistic Regression'),
(GaussianNB(), 'Naive Bayes'),
(svm.LinearSVC(C=1.0, verbose=False), 'Linear SVM'),
(svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False), 'SVM with RBF kernel'),
(RandomForestClassifier(n_estimators=100), 'Random Forest'),
(KNeighborsClassifier(n_neighbors=8), 'KNN'),
(DecisionTreeClassifier(max_depth=5), 'Decision Tree'),
(AdaBoostClassifier(), 'AdaBoost'),
(LinearDiscriminantAnalysis(), 'Linear Discriminant Analysis'),
(QuadraticDiscriminantAnalysis(), 'Quadratic Discriminant Analysis')]
#print(print_class_ratio(dataset))
#print('~~~~~~~~~~~~~~~~~~~~~~~~~~')
# applying the models
n_folds = 10
k_fold = cross_validation.KFold(n=len(dataset), n_folds=n_folds, shuffle=False, random_state=None)
accuracy = {}
train_accuracy = {}
for train_index, test_index in k_fold:
X_train = dataset[train_index,:-1]
y_train = dataset[train_index,-1]
X_test = dataset[test_index,:-1]
y_test = dataset[test_index,-1]
for clf, name in models:
clf.fit(X_train, y_train)
# test data accuracy
y_pred = clf.predict(X_test)
accuracy_percent = 100 * sk.metrics.accuracy_score(y_test, y_pred) / n_folds
if name not in accuracy:
accuracy[name] = accuracy_percent
else:
accuracy[name] += accuracy_percent
# train data accuracy
y_train_pred = clf.predict(X_train)
train_accuracy_percent = 100 * sk.metrics.accuracy_score(y_train, y_train_pred) / n_folds
if name not in train_accuracy:
train_accuracy[name] = train_accuracy_percent
else:
train_accuracy[name] += train_accuracy_percent
result = []
for key in accuracy.keys():
result.append([key, round(accuracy[key],2), round(train_accuracy[key],2)])
result_accuracy_df = pd.DataFrame(data=result, columns=['classifier', 'test acc', 'train acc'])
return result_accuracy_df
def run(file_name):
original_data = pd.read_csv(file_name)
print('Data size: ', original_data.shape)
original_data = original_data.drop(original_data.columns[0], 1)
original_data = dropnans_from_dataset(original_data)
sampled_data = original_data #original_data.sample(n=500)
# Adding label and swapping 50% of winners and losers ;Thus:
# label 0 == winner + loser
# label 1 == loser + winner
dataset = sampled_data.copy()
dataset['label'] = np.zeros([len(dataset),1])
dt = dataset.as_matrix()
idx = np.random.choice(len(dt), int(len(dt)/2), replace=False)
tf = math.floor(dt.shape[1]/2)
tmp = dt[idx,tf:2*tf]
dt[idx,tf:2*tf] = dt[idx,:tf]
dt[idx,:tf] = tmp
dt[idx,-1] = 1
dataset = | pd.DataFrame(data=dt, columns=dataset.columns) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: | pd.Timestamp("2012-05-02 00:00:00") | pandas.Timestamp |
"""
If payment has not been received within 45 days of the close of a deal, the vendor sends a reminder to the customer. By region, what is the current (where "today" is December 11, 2018) total value of contracts to be collected that are more than 45 days past close? More than 90 days? More than 135 days? How does this compare to contracts closed in 2017?
Contracts in 2017 by region:
APAC 87399.0
Africa 23178.0
EMEA 166514.0
Latin America 21355.0
North America 151625.0
"""
import pandas as pd
import numpy as np
import datetime
from datetime import datetime, timedelta
contracts = pd.DataFrame(pd.read_csv('contracts.csv'))
accounts = pd.DataFrame(pd.read_csv('accounts.csv'))
contracts['closingDate'] = pd.to_datetime(contracts['closingDate'])
contracts['paymentDate'] = | pd.to_datetime(contracts['paymentDate']) | pandas.to_datetime |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
tile_path = "../tiles/"
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def tile_ids_in(slide, label, root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append([slide, label, level, dirr+'/'+id])
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path'])
idsa = ids.loc[ids['level'] == 0]
idsb = ids.loc[ids['level'] == 1]
idsc = ids.loc[ids['level'] == 2]
idss = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
idss['slide'] = idsa['slide']
idss['label'] = idsa['label']
idss['L0path'] = idsa['path']
idss = idss.reset_index(drop=True)
idsb = idsb.reset_index(drop=True)
idsc = idsc.reset_index(drop=True)
idss['L1path'] = idsb['path']
idss['L2path'] = idsc['path']
idss = sku.shuffle(idss)
idss = idss.fillna(method='ffill')
idss = idss.fillna(method='bfill')
else:
idss = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idss
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(pmd, path='../tiles/', ref_file='../dummy_His_MUT_joined.csv'):
if not os.path.isdir(path):
os.mkdir(path)
import Cutter
Cutter.cut()
allimg = image_ids_in(path)
ref = pd.read_csv(ref_file, header=0)
big_images = []
if pmd == 'subtype':
ref = ref.loc[ref['subtype_0NA'] == 0]
MSIimg = intersection(ref.loc[ref['subtype_MSI'] == 1]['name'].tolist(), allimg)
EMimg = intersection(ref.loc[ref['subtype_Endometrioid'] == 1]['name'].tolist(), allimg)
SLimg = intersection(ref.loc[ref['subtype_Serous-like'] == 1]['name'].tolist(), allimg)
POLEimg = intersection(ref.loc[ref['subtype_POLE'] == 1]['name'].tolist(), allimg)
for i in MSIimg:
big_images.append([i, 0, path + "{}/".format(i)])
for i in EMimg:
big_images.append([i, 1, path + "{}/".format(i)])
for i in SLimg:
big_images.append([i, 2, path + "{}/".format(i)])
for i in POLEimg:
big_images.append([i, 3, path + "{}/".format(i)])
elif pmd == 'histology':
ref = ref.loc[ref['histology_0NA'] == 0]
EMimg = intersection(ref.loc[ref['histology_Endometrioid'] == 1]['name'].tolist(), allimg)
Serousimg = intersection(ref.loc[ref['histology_Serous'] == 1]['name'].tolist(), allimg)
Mixedimg = intersection(ref.loc[ref['histology_Mixed'] == 1]['name'].tolist(), allimg)
for i in EMimg:
big_images.append([i, 0, path + "{}/".format(i)])
for i in Serousimg:
big_images.append([i, 1, path + "{}/".format(i)])
for i in Mixedimg:
big_images.append([i, 2, path + "{}/".format(i)])
elif pmd in ['Endometrioid', 'MSI', 'Serous-like', 'POLE']:
ref = ref.loc[ref['subtype_0NA'] == 0]
negimg = intersection(ref.loc[ref['subtype_{}'.format(pmd)] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref['subtype_{}'.format(pmd)] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, 0, path + "{}/".format(i)])
for i in posimg:
big_images.append([i, 1, path + "{}/".format(i)])
elif pmd in ['histology_Endometrioid', 'histology_Serous', 'histology_Mixed']:
ref = ref.loc[ref['histology_0NA'] == 0]
negimg = intersection(ref.loc[ref[pmd] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref[pmd] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, 0, path + "{}/".format(i)])
for i in posimg:
big_images.append([i, 1, path + "{}/".format(i)])
else:
negimg = intersection(ref.loc[ref[pmd] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref[pmd] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, 0, path + "{}/".format(i)])
for i in posimg:
big_images.append([i, 1, path + "{}/".format(i)])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path'])
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cls, cut=0.2):
trlist = []
telist = []
valist = []
for i in range(cls):
subset = alll.loc[alll['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq)*cut/2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq)*cut/2):int(len(unq)*cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq)*cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
train_tiles = | pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path']) | pandas.DataFrame |
# Copyright © 2021. TIBCO Software Inc.
# This file is subject to the license terms contained
# in the license file that is distributed with this file.
"""Functions to import data tables into Python from SBDF files and export data from Python to SBDF files."""
import collections.abc
import datetime
import decimal
import enum
import struct
import tempfile
import typing
import warnings
import bitstring
import pandas as pd
import numpy as np
from spotfire import _utils
try:
import geopandas as gpd
import shapely
import shapely.geometry.base as shp_geom
except ImportError:
gpd = None
shapely = None
shp_geom = None
try:
import matplotlib
import matplotlib.figure
except ImportError:
matplotlib = None
try:
import PIL
import PIL.Image
except ImportError:
PIL = None
try:
import seaborn
except ImportError:
seaborn = None
__all__ = ["import_data", "export_data"]
# Public Functions
def import_data(sbdf_file: typing.Union[str, bytes, int]) -> pd.DataFrame:
"""Import data from an SBDF file and create a 'pandas' DataFrame.
:param sbdf_file: the filename of the SBDF file to import
:return: the DataFrame containing the imported data
:raises SBDFError: if a problem is encountered during import
"""
# Open the SBDF file
with open(sbdf_file, "rb") as file:
# Read the file header
version_major, version_minor = _FileHeader.read(file)
if version_major != _FileHeader.Major_Version or version_minor != _FileHeader.Minor_Version:
raise SBDFError(f"unsupported file version {version_major}.{version_minor}")
# Read the table metadata
tmeta = _TableMetadata.read(file)
# Process table metadata
table_metadata_dict = _import_table_metadata(tmeta)
# Process column metadata
pd_data, pd_dtypes, column_metadata_dict, column_names = _import_column_metadata(tmeta)
# Read the table slices
_import_table_slices(file, column_names, pd_data, tmeta)
# Construct the pandas DataFrame and return the final results
columns = []
for col in column_names:
columns.append(pd.Series(pd_data[col], dtype=pd_dtypes[col], name=col))
dataframe = pd.concat(columns, axis=1)
for col in column_names:
dataframe[col].spotfire_column_metadata = column_metadata_dict[col]
if gpd is not None and table_metadata_dict.get('MapChart.IsGeocodingTable'):
dataframe = _data_frame_to_geo_data_frame(dataframe, table_metadata_dict)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dataframe.spotfire_table_metadata = table_metadata_dict
return dataframe
def _import_table_metadata(tmeta: '_TableMetadata') -> typing.Dict[str, typing.Any]:
table_metadata_dict = {}
for i in range(tmeta.table_metadata.count()):
table_metadata_dict[tmeta.table_metadata.names[i]] = tmeta.table_metadata.values[i].data
return table_metadata_dict
def _import_column_metadata(tmeta: '_TableMetadata') -> typing.Tuple[typing.Dict[str, typing.List],
typing.Dict[str, str],
typing.Dict[str, typing.Dict[str, typing.Any]],
typing.List[str]]:
pd_data = {}
pd_dtypes = {}
column_metadata_dict = {}
column_names = []
for i in range(tmeta.column_count()):
cmeta = tmeta.column_metadata[i]
# get the column name
cm_column_name = _ColumnMetadata.get_name(cmeta)
column_names.append(cm_column_name)
# add a new list to hold the column
pd_data[cm_column_name] = []
# get the pandas dtype for constructing this column
pd_dtypes[cm_column_name] = _ColumnMetadata.get_type(cmeta).to_dtype_name()
# get the remaining column metadata
cm_dict = {}
for j in range(cmeta.count()):
if cmeta.names[j] in (_ColumnMetadata.Property_Name, _ColumnMetadata.Property_DataType):
continue
cm_dict[cmeta.names[j]] = cmeta.values[j].data
column_metadata_dict[cm_column_name] = cm_dict
return pd_data, pd_dtypes, column_metadata_dict, column_names
def _import_table_slices(file: typing.BinaryIO, column_names: typing.List[str],
pd_data: typing.Dict[str, typing.List], tmeta: '_TableMetadata') -> None:
while True:
tslice = _TableSlice.read(file, tmeta, None)
if tslice is None:
break
for i in range(tslice.column_count()):
cslice = tslice.columns[i]
cs_values = cslice.values.get_values()
cs_invalid_prop = cslice.get_property(_ColumnSlice.ValueProperty_IsInvalid)
if cs_invalid_prop is None:
cs_invalid = [False] * cs_values.get_count()
else:
cs_invalid = cs_invalid_prop.get_values().data
for value, invalid in zip(cs_values.data, cs_invalid):
pd_data[column_names[i]].append(None if invalid else value)
def export_data(obj: typing.Any, sbdf_file: typing.Union[str, bytes, int], default_column_name: str = "x") -> None:
"""Export data to an SBDF file.
:param obj: the data object to export
:param sbdf_file: the filename to export the data to
:param default_column_name: the column name to use when exporting data objects without intrinsic names (such as
lists or scalar values)
:raises SBDFError: if a problem is encountered during export
"""
columns, column_names, column_types, table_metadata, column_metadata = _export_columnize_data(obj,
default_column_name)
# Open the SBDF file
with open(sbdf_file, "wb") as file:
# Write the file header
_FileHeader.write(file)
# Write the table and column metadata
tmeta = _export_table_metadata(table_metadata)
row_count = _export_column_metadata(columns, column_names, column_types, column_metadata, tmeta)
tmeta.write(file)
# Write out the table and column slices
_export_table_slices(columns, column_names, column_types, file, row_count, tmeta)
def _export_columnize_data(obj: typing.Any, default_column_name: str) -> \
typing.Tuple[typing.Dict[str, typing.List[typing.Any]],
typing.List[str],
typing.Dict[str, '_ValueTypeId'],
typing.Dict[str, typing.List[typing.Any]],
typing.Dict[str, typing.Dict[str, typing.List[typing.Any]]]
]:
# pylint: disable=too-many-branches,too-many-statements
table_metadata = {}
column_metadata = {}
if isinstance(obj, pd.DataFrame):
# Extract the table and column metadata from the data frame
try:
table_metadata = obj.spotfire_table_metadata
except AttributeError:
pass
for col in obj.columns.tolist():
try:
col_meta = obj[col].spotfire_column_metadata
column_metadata[col] = col_meta
except AttributeError:
column_metadata[col] = {}
# Convert geopandas geodataframe to Spotfire's native geocoding format
if gpd is not None and isinstance(obj, gpd.GeoDataFrame):
obj = _geo_data_frame_to_data_frame(obj, table_metadata, column_metadata)
if len({str(x) for x in obj.keys()}) != len(obj.columns):
raise SBDFError("obj does not have unique column names")
# columns = obj.to_dict("list")
columns = obj
column_names = obj.columns.tolist()
column_types = {str(k): _ValueTypeId.infer_from_dtype(v, f"column '{str(k)}'") for (k, v) in obj.iteritems()}
elif isinstance(obj, pd.Series):
# Handle series as columnar data
series_name = default_column_name if obj.name is None else obj.name
series_description = "series" if obj.name is None else f"series '{obj.name}'"
# Extract the column metadata from the series
try:
column_metadata = {series_name: obj.spotfire_column_metadata}
except AttributeError:
pass
columns = {series_name: obj.tolist()}
column_names = [series_name]
column_types = {series_name: _ValueTypeId.infer_from_dtype(obj, series_description)}
elif isinstance(obj, dict):
# Handle dicts of lists as tabular data
if not all(isinstance(x, list) for x in obj.values()):
raise SBDFError("obj is not a dict of lists")
if len({str(x) for x in obj.keys()}) != len(obj):
raise SBDFError("obj does not have unique column names")
column_metadata = {col: {} for col in obj.keys()}
columns = obj
column_names = obj.keys()
column_types = {str(k): _ValueTypeId.infer_from_type(v, f"column '{str(k)}'") for (k, v) in obj.items()}
elif isinstance(obj, (str, bytes, bytearray)):
# Handle strings and bytes as scalar data
column_metadata[default_column_name] = {}
columns = {default_column_name: [obj]}
column_names = [default_column_name]
column_types = {default_column_name: _ValueTypeId.infer_from_type([obj], "value")}
elif isinstance(obj, collections.abc.Iterable):
# Handle all iterable items as columnar data
column_metadata[default_column_name] = {}
columns = {default_column_name: list(obj)}
column_names = [default_column_name]
column_types = {default_column_name: _ValueTypeId.infer_from_type(list(obj), "list")}
else:
# Handle various image formats
if matplotlib is not None and isinstance(obj, matplotlib.figure.Figure):
obj = _pyplot_to_binary(obj)
elif seaborn is not None and isinstance(obj, seaborn.axisgrid.Grid):
obj = _seaborn_plot_to_binary(obj)
elif PIL is not None and isinstance(obj, PIL.Image.Image):
obj = _pil_image_to_binary(obj)
# If all else fails, treat this as scalar data
column_metadata[default_column_name] = {}
columns = {default_column_name: [obj]}
column_names = [default_column_name]
column_types = {default_column_name: _ValueTypeId.infer_from_type([obj], "value")}
return columns, column_names, column_types, table_metadata, column_metadata
def _export_table_metadata(table_metadata: typing.Dict[str, typing.Any]) -> '_TableMetadata':
metadata = _Metadata()
if isinstance(table_metadata, dict):
for key, val in table_metadata.items():
obj_val = val if isinstance(val, list) else [val]
obj_valuetype = _ValueTypeId.infer_from_type(obj_val, "table metadata")
obj = _SbdfObject(obj_valuetype, obj_val)
metadata.add(key, obj)
tmeta = _TableMetadata(metadata)
return tmeta
def _export_column_metadata(columns: typing.Dict[str, typing.List], column_names: typing.List[str],
column_types: typing.Dict[str, '_ValueTypeId'],
column_metadata: typing.Dict[str, typing.Dict[str, typing.Any]],
tmeta: '_TableMetadata') -> int:
row_count = None
for colname in column_names:
metadata = _Metadata()
if isinstance(column_metadata, dict):
colmeta = column_metadata.get(colname, {})
if not isinstance(colmeta, dict):
raise SBDFError("column_metadata is not a dict of dicts")
for key, val in colmeta.items():
obj_val = val if isinstance(val, list) else [val]
obj_valuetype = _ValueTypeId.infer_from_type(obj_val, "column metadata")
obj = _SbdfObject(obj_valuetype, obj_val)
metadata.add(key, obj)
if row_count is None:
row_count = len(columns[colname])
else:
if row_count != len(columns[colname]):
raise SBDFError(f"column '{colname}' has inconsistent column length")
_ColumnMetadata.set_values(metadata, colname, column_types.get(str(colname)))
tmeta.add(metadata)
return row_count
def _export_table_slices(columns: typing.Dict[str, typing.List], column_names: typing.List[str],
column_types: typing.Dict[str, '_ValueTypeId'], file: typing.BinaryIO,
row_count: int, tmeta: '_TableMetadata') -> None:
# max_rows_per_slice = max(10, 100000 // max(1, len(column_names)))
max_rows_per_slice = 50000
row_offset = 0
while row_offset < row_count:
slice_row_count = min(max_rows_per_slice, row_count - row_offset)
tslice = _TableSlice(tmeta)
for colname in column_names:
if isinstance(columns, pd.DataFrame):
dataslice = columns.loc[row_offset:row_offset + slice_row_count, colname]
else:
dataslice = columns[colname][row_offset:row_offset + slice_row_count]
obj = _SbdfObject(column_types.get(str(colname)), dataslice)
cslice = _ColumnSlice(_ValueArray(_ValueArrayEncoding.PLAIN_ARRAY, obj))
if isinstance(dataslice, pd.Series):
invalid = dataslice.isna()
if invalid.sum() > 0:
obj_vt = _ValueType(column_types.get(str(colname)))
dataslice = dataslice.fillna(obj_vt.missing_value())
obj_empty = _SbdfObject(_ValueTypeId.BOOL, invalid.tolist())
va_empty = _ValueArray(_ValueArrayEncoding.BIT_ARRAY, obj_empty)
cslice.add_property(_ColumnSlice.ValueProperty_IsInvalid, va_empty)
else:
invalid = [pd.isnull(x) for x in obj.data]
if any(invalid):
obj_vt = _ValueType(column_types.get(str(colname)))
obj.data = [obj_vt.missing_value() if missing else val for val, missing in zip(obj.data, invalid)]
obj_empty = _SbdfObject(_ValueTypeId.BOOL, invalid)
va_empty = _ValueArray(_ValueArrayEncoding.BIT_ARRAY, obj_empty)
cslice.add_property(_ColumnSlice.ValueProperty_IsInvalid, va_empty)
tslice.add(cslice)
tslice.write(file)
row_offset += slice_row_count
_TableSlice.write_end(file)
# Exceptions
class SBDFError(Exception):
"""An exception that is raised to indicate a problem during import or export of SBDF files."""
# File Headers
class _FileHeader:
Major_Version = 1
Minor_Version = 0
Version_String = "1.0"
@staticmethod
def write(file: typing.BinaryIO) -> None:
"""writes the current sbdf fileheader to file"""
_section_write(file, _SectionTypeId.FILEHEADER)
_write_int8(file, _FileHeader.Major_Version)
_write_int8(file, _FileHeader.Minor_Version)
@staticmethod
def read(file: typing.BinaryIO) -> typing.Tuple[int, int]:
"""reads the sbdf fileheader from file"""
_section_expect(file, _SectionTypeId.FILEHEADER)
major = _read_int8(file)
minor = _read_int8(file)
return major, minor
# Column Slices
class _ColumnSlice:
ValueProperty_IsInvalid = "IsInvalid"
ValueProperty_ErrorCode = "ErrorCode"
ValueProperty_ReplacedValue = "HasReplacedValue"
def __init__(self, values: '_ValueArray') -> None:
"""creates a column slice and stores a reference to the valuearray in it"""
self.values = values
self.property_names = []
self.property_values = []
def __repr__(self) -> str:
return f"<{_utils.type_name(type(self))} object: {self.values!r}>"
def add_property(self, name: str, values: '_ValueArray') -> None:
"""stores a named value property reference in the given column slice"""
if name in self.property_names:
raise SBDFError("the property with the given name already exists")
self.property_names.append(name)
self.property_values.append(values)
def get_property(self, name: str) -> typing.Optional['_ValueArray']:
"""gets a value property reference with the given name"""
if name in self.property_names:
index = self.property_names.index(name)
return self.property_values[index]
return None
def get_property_count(self) -> int:
"""gets the number of value properties in the column slice"""
return len(self.property_names)
def get_row_count(self) -> int:
"""gets the number of rows of the values in the column slice"""
return self.values.row_count()
def write(self, file: typing.BinaryIO) -> None:
"""writes a value array to file"""
_section_write(file, _SectionTypeId.COLUMNSLICE)
self.values.write(file)
_write_int32(file, self.get_property_count())
for i in range(self.get_property_count()):
_write_string(file, self.property_names[i])
self.property_values[i].write(file)
@classmethod
def read(cls, file: typing.BinaryIO) -> '_ColumnSlice':
"""reads a value array from file"""
_section_expect(file, _SectionTypeId.COLUMNSLICE)
values = _ValueArray.read(file)
cslice = cls(values)
val = _read_int32(file)
for _ in range(val):
name = _read_string(file)
prop = _ValueArray.read(file)
cslice.add_property(name, prop)
return cslice
@classmethod
def skip(cls, file: typing.BinaryIO) -> None:
"""skips a value array in the file"""
raise NotImplementedError # sbdf_cs_skip
# Table Slices
class _TableSlice:
def __init__(self, table_metadata: '_TableMetadata') -> None:
"""creates a table slice, storing a reference to the table metadata"""
self.table_metadata = table_metadata
self.columns = []
def __repr__(self) -> str:
return f"<{_utils.type_name(type(self))} object: {self.columns!r}>"
def column_count(self) -> int:
"""get the number of columns in the table slice"""
return len(self.columns)
def add(self, column_slice: _ColumnSlice) -> None:
"""adds a column slice reference to the table slice"""
self.columns.append(column_slice)
def write(self, file: typing.BinaryIO) -> None:
"""writes a table slice to file"""
_section_write(file, _SectionTypeId.TABLESLICE)
no_columns = self.column_count()
_write_int32(file, no_columns)
for i in range(no_columns):
self.columns[i].write(file)
@staticmethod
def write_end(file: typing.BinaryIO) -> None:
"""writes the end-of-table marker to the file"""
_section_write(file, _SectionTypeId.TABLEEND)
@classmethod
def read(cls, file: typing.BinaryIO, table_metadata: '_TableMetadata',
subset: typing.Optional[typing.List[bool]]) -> typing.Optional['_TableSlice']:
"""reads a table slice from file. returns None when the end of the table is reached"""
val = _section_read(file)
if val == _SectionTypeId.TABLEEND:
return None
if val != _SectionTypeId.TABLESLICE:
raise SBDFError("unexpected section id")
column_count = _read_int32(file)
if column_count < 0:
raise SBDFError("the number of elements is incorrect")
if column_count != table_metadata.column_count():
raise SBDFError("the number of the columnslice doesn't match the number of the columns of the metadata")
tslice = cls(table_metadata)
for i in range(column_count):
if subset is None or subset[i]:
tslice.add(_ColumnSlice.read(file))
else:
_ColumnSlice.skip(file)
return tslice
@classmethod
def skip(cls, file: typing.BinaryIO, table_metadata: '_TableMetadata') -> None:
"""skips a table slice in file"""
subset = []
for _ in range(table_metadata.column_count()):
subset.append(False)
cls.read(file, table_metadata, subset)
# Column Metadata
class _ColumnMetadata:
Property_Name = "Name"
Property_DataType = "DataType"
@staticmethod
def set_values(metadata: '_Metadata', column_name: str, data_type) -> None:
"""sets the column metadata values name and data type for the previously allocated metadata head"""
metadata.add_str(_ColumnMetadata.Property_Name, column_name)
obj = _ValueType(data_type).as_sbdfobject()
metadata.add(_ColumnMetadata.Property_DataType, obj)
@staticmethod
def get_name(metadata: '_Metadata') -> str:
"""gets the name of the column metadata"""
name = metadata.get(_ColumnMetadata.Property_Name)
if name.valuetype != _ValueTypeId.STRING:
raise SBDFError("the metadata is incorrect")
return name.data[0]
@staticmethod
def get_type(metadata: '_Metadata') -> '_ValueTypeId':
"""gets the value type of the column metadata"""
obj = metadata.get(_ColumnMetadata.Property_DataType)
if obj.valuetype != _ValueTypeId.BINARY or obj.get_count() != 1:
raise SBDFError("the metadata is incorrect")
return _ValueTypeId(obj.data[0][0])
# Table Metadata
class _TableMetadata:
def __init__(self, table_metadata: '_Metadata') -> None:
"""creates the table metadata, storing a copy the given table information"""
self.table_metadata = table_metadata
self.table_metadata.set_immutable()
self.column_metadata = []
def __repr__(self) -> str:
return f"<{_utils.type_name(type(self))} object: {self.table_metadata!r} {self.column_metadata!r}>"
def column_count(self) -> int:
"""return the number of columns in this table"""
return len(self.column_metadata)
def add(self, column_metadata: '_Metadata') -> None:
"""adds column metadata to the table metadata"""
self.column_metadata.append(column_metadata)
def write(self, file: typing.BinaryIO) -> None:
"""writes table metadata to file"""
_section_write(file, _SectionTypeId.TABLEMETADATA)
_write_int32(file, self.table_metadata.count())
for i in range(self.table_metadata.count()):
_write_string(file, self.table_metadata.names[i])
_write_int8(file, self.table_metadata.values[i].valuetype)
_write_optional_value(file, self.table_metadata.values[i])
_write_optional_value(file, self.table_metadata.default_values[i])
_write_int32(file, self.column_count())
# fold duplicate values
cm_default = {}
cm_types = {}
for cmeta in self.column_metadata:
for i in range(cmeta.count()):
cm_name = cmeta.names[i]
if cm_name in cm_default and cm_default[cm_name] != cmeta.default_values[i]:
raise SBDFError("the metadata is incorrect")
cm_default[cm_name] = cmeta.default_values[i]
if cmeta.values[i] is not None:
cm_type = cmeta.values[i].valuetype
else:
cm_type = cmeta.default_values[i].valuetype
if cm_name in cm_types and cm_types[cm_name] != cm_type:
raise SBDFError("the metadata is incorrect")
cm_types[cm_name] = cm_type
_write_int32(file, len(cm_default))
# write names, data types, and default values
for i in sorted(cm_default):
_write_string(file, i)
_ValueType(cm_types[i]).write(file)
_write_optional_value(file, cm_default[i])
# write column values
for cmeta in self.column_metadata:
for i in sorted(cm_default):
val = cmeta.get(i)
_write_optional_value(file, val)
@classmethod
def read(cls, file: typing.BinaryIO) -> '_TableMetadata':
"""reads table metadata from file"""
_section_expect(file, _SectionTypeId.TABLEMETADATA)
metadata_count = _read_int32(file)
if metadata_count < 0:
raise SBDFError("the number of elements is incorrect")
metadata = _Metadata()
for _ in range(metadata_count):
name = _read_string(file)
valtype = _ValueType.read(file)
value_present = _read_int8(file)
value = _SbdfObject.read(file, valtype) if value_present else None
value_present = _read_int8(file)
default_value = _SbdfObject.read(file, valtype) if value_present else None
metadata.add(name, value, default_value)
tmeta = _TableMetadata(metadata)
column_count = _read_int32(file)
metadata_count = _read_int32(file)
md_name = []
md_type = []
md_default = []
for i in range(metadata_count):
md_name.append(_read_string(file))
md_type.append(_ValueType.read(file))
value_present = _read_int8(file)
md_default.append(_SbdfObject.read(file, md_type[i]) if value_present else None)
for i in range(column_count):
metadata = _Metadata()
for j in range(metadata_count):
value_present = _read_int8(file)
if value_present:
value = _SbdfObject.read(file, md_type[j])
metadata.add(md_name[j], value, md_default[j])
tmeta.add(metadata)
return tmeta
# Metadata
class _Metadata:
def __init__(self) -> None:
"""creates an empty metadata structure"""
self.modifiable = True
self.names = []
self.values = []
self.default_values = []
def __repr__(self) -> str:
return f"<{_utils.type_name(type(self))} object: {self.names!r} -> {self.values!r}>"
def add_str(self, name: str, value: str, default_value: str = None) -> None:
"""adds a named string metadata value and default value to out"""
val = _SbdfObject(_ValueTypeId.STRING, [str(value)])
if default_value is not None:
default = _SbdfObject(_ValueTypeId.STRING, [str(default_value)])
else:
default = None
self.add(name, val, default)
def add_int(self, name: str, value: int, default_value: int = None) -> None:
"""adds a named integer metadata value and default value to out"""
val = _SbdfObject(_ValueTypeId.INT, [value])
if default_value is not None:
default = _SbdfObject(_ValueTypeId.INT, [default_value])
else:
default = None
self.add(name, val, default)
def add(self, name: str, value: '_SbdfObject', default_value: '_SbdfObject' = None) -> None:
"""adds a named metadata value and default value to out"""
if not self.modifiable:
raise SBDFError("the metadata is readonly and may not be modified")
if default_value is not None and value.valuetype != default_value.valuetype:
raise SBDFError("the valuetypes of the arguments must be equal")
if name in self.names:
raise SBDFError("the metadata with the given name already exists")
self.names.append(name)
self.values.append(value)
self.default_values.append(default_value)
def remove(self, name: str) -> None:
"""removes the named metadata value from out"""
if not self.modifiable:
raise SBDFError("the metadata is readonly and may not be modified")
if name in self.names:
index = self.names.index(name)
del self.names[index]
del self.values[index]
del self.default_values[index]
def get(self, name: str) -> typing.Optional['_SbdfObject']:
"""gets a copy of the named metadata value"""
if name in self.names:
index = self.names.index(name)
return self.values[index]
return None
def get_default(self, name: str) -> typing.Optional['_SbdfObject']:
"""gets a copy of the named default metadata value"""
if name in self.names:
index = self.names.index(name)
return self.default_values[index]
return None
def count(self) -> int:
"""returns the number of metadata entries pointed to by head"""
return len(self.names)
def exists(self, name: str) -> bool:
"""returns True if the named metadata exists. False is returned if the metadata doesn't exist"""
return name in self.names
def set_immutable(self) -> None:
"""sets the metadata immutable so that it may not be modified by subsequent operations"""
self.modifiable = False
# Objects
class _SbdfObject:
def __init__(self, valuetype: '_ValueTypeId', data: typing.List) -> None:
self.valuetype = valuetype
self.data = data
def __repr__(self) -> str:
return f"<{_utils.type_name(type(self))} object ({self.valuetype!r}): {self.data!r}>"
def get_count(self) -> int:
"""get the number of items in the object"""
return len(self.data)
def write_array(self, file: typing.BinaryIO) -> None:
"""writes the array information to the specified file. valuetype information is not written"""
count = len(self.data)
_write_int32(file, count)
self._write_n(file, count, True)
def write(self, file: typing.BinaryIO) -> None:
"""writes the object to the specified file. valuetype information is not written"""
self._write_n(file, 1, False)
# pylint: disable=too-many-branches
def _write_n(self, file: typing.BinaryIO, n: int, packed: bool) -> None:
valtype = _ValueType(self.valuetype)
if valtype.is_array():
byte_size = 0
# packed: no need to write 7bit packed int32
if packed:
if isinstance(self.data, pd.Series) and self.valuetype is not _ValueTypeId.BINARY:
barr = self.data.values.astype('U')
_write_int32(file, sum(_get_7bit_packed_length(len(s.encode("utf-8"))) +
len(s.encode("utf-8")) for s in barr))
for bstr in barr:
bstr = bstr.encode("utf-8")
_write_7bit_packed_int32(file, len(bstr))
if len(bstr):
_write_bytes(file, bstr)
else:
saved_bytes = []
for i in range(n):
saved_bytes.append(valtype.to_bytes(self.data[i]))
length = len(saved_bytes[i])
byte_size += _get_7bit_packed_length(length) + length
_write_int32(file, byte_size)
for i in range(n):
length = len(saved_bytes[i])
_write_7bit_packed_int32(file, length)
if length:
_write_bytes(file, saved_bytes[i])
else:
if isinstance(self.data, pd.Series):
barr = self.data.values.astype('S')
for bstr in barr:
_write_7bit_packed_int32(file, len(bstr))
if len(bstr):
_write_bytes(file, bstr)
else:
for i in range(n):
valtype_bytes = valtype.to_bytes(self.data[i])
length = len(valtype_bytes)
_write_int32(file, length)
if length:
_write_bytes(file, valtype_bytes)
else:
size = valtype.get_packed_size()
if size is None:
raise SBDFError("unknown typeid")
if isinstance(self.data, pd.Series) and isinstance(self.data.values, np.ndarray):
_write_bytes(file, self.data.values.tobytes())
else:
for i in range(n):
valtype_bytes = valtype.to_bytes(self.data[i])
_write_bytes(file, valtype_bytes)
@classmethod
def read_array(cls, file: typing.BinaryIO, valuetype: '_ValueType') -> '_SbdfObject':
"""reads an array object with the given value type from the specified file"""
count = _read_int32(file)
return cls._read_n(file, count, valuetype, True)
@classmethod
def read(cls, file: typing.BinaryIO, valuetype: '_ValueType') -> '_SbdfObject':
"""reads an object with the given valuetype from the file."""
return cls._read_n(file, 1, valuetype, False)
@classmethod
def _read_n(cls, file: typing.BinaryIO, n: int, valuetype: '_ValueType', packed: bool) -> '_SbdfObject':
data = []
if valuetype.is_array():
# read byte size and ignore it
if packed:
_read_int32(file)
for _ in range(n):
if packed:
length = _read_7bit_packed_int32(file)
else:
length = _read_int32(file)
if length < 0:
raise SBDFError("the number of elements is incorrect")
dest = _read_bytes(file, length)
data.append(valuetype.to_python(dest))
else:
size = valuetype.get_packed_size()
if size is None:
raise SBDFError("unknown typeid")
for _ in range(n):
dest = _read_bytes(file, size)
data.append(valuetype.to_python(dest))
return cls(valuetype.type_id, data)
@classmethod
def skip_array(cls, file: typing.BinaryIO, valuetype: '_ValueType') -> None:
"""skips an array with the given valuetype"""
raise NotImplementedError # sbdf_obj_skip_arr
@classmethod
def skip(cls, file: typing.BinaryIO, valuetype: '_ValueType') -> None:
"""skips an object with the given valuetype"""
raise NotImplementedError # sbdf_obj_skip
# Value arrays
class _ValueArrayEncoding(enum.IntEnum):
PLAIN_ARRAY = 0x1
RUN_LENGTH = 0x2
BIT_ARRAY = 0x3
class _ValueArray:
def __init__(self, array_encoding: _ValueArrayEncoding, array: typing.Optional[_SbdfObject]) -> None:
"""creates a value array from the specified values"""
self.encoding = _ValueArrayEncoding(array_encoding)
self.val1 = None
self.obj1 = None
self.obj2 = None
self.valuetype = None
if array is None:
pass
elif array_encoding == _ValueArrayEncoding.PLAIN_ARRAY:
self._create_plain(array)
elif array_encoding == _ValueArrayEncoding.RUN_LENGTH:
self._create_rle(array)
elif array_encoding == _ValueArrayEncoding.BIT_ARRAY:
self._create_bit(array)
else:
raise SBDFError("unknown valuearray encoding")
def _create_plain(self, array: _SbdfObject) -> None:
self.valuetype = array.valuetype
self.obj1 = array
def _create_rle(self, array: _SbdfObject) -> None:
raise NotImplementedError # sbdf_va_create_rle
def _create_bit(self, array: _SbdfObject) -> None:
self.valuetype = _ValueTypeId.BOOL
self.val1 = array.get_count()
bits = bitstring.BitArray(array.data)
while bits.len % 8 != 0:
bits.append('0b0')
self.obj1 = _SbdfObject(_ValueTypeId.BINARY, [bits.bytes])
def __repr__(self) -> str:
if self.encoding == _ValueArrayEncoding.PLAIN_ARRAY:
arr = self.obj1
elif self.encoding == _ValueArrayEncoding.RUN_LENGTH:
arr = "..."
elif self.encoding == _ValueArrayEncoding.BIT_ARRAY:
arr = self.obj1.data[0]
else:
arr = "unknown encoding"
return f"<{_utils.type_name(type(self))} object ({self.encoding!r}): {arr!r}>"
def get_values(self) -> _SbdfObject:
"""extracts the values from the array"""
if self.encoding == _ValueArrayEncoding.PLAIN_ARRAY:
return self.obj1
if self.encoding == _ValueArrayEncoding.RUN_LENGTH:
return self._get_rle()
if self.encoding == _ValueArrayEncoding.BIT_ARRAY:
return self._get_bit()
raise SBDFError("unknown valuearray encoding")
def _get_rle(self) -> _SbdfObject:
raise NotImplementedError # sbdf_get_rle_values
def _get_bit(self) -> _SbdfObject:
obj = _SbdfObject(_ValueTypeId.BOOL, [])
bits = bitstring.BitArray(bytes=self.obj1.data[0], length=self.val1)
for i in bits:
obj.data.append(i)
return obj
def row_count(self) -> int:
"""returns the number of rows stored in the value array"""
if self.encoding == _ValueArrayEncoding.PLAIN_ARRAY:
return self.obj1.get_count()
if self.encoding == _ValueArrayEncoding.RUN_LENGTH:
return self.val1
if self.encoding == _ValueArrayEncoding.BIT_ARRAY:
return self.val1
raise SBDFError("unknown valuearray encoding")
def write(self, file: typing.BinaryIO) -> None:
"""writes the value array to the current file position"""
_write_int8(file, self.encoding)
_ValueType(self.valuetype).write(file)
if self.encoding == _ValueArrayEncoding.PLAIN_ARRAY:
self.obj1.write_array(file)
elif self.encoding == _ValueArrayEncoding.RUN_LENGTH:
_write_int32(file, self.val1)
self.obj1.write_array(file)
self.obj2.write_array(file)
elif self.encoding == _ValueArrayEncoding.BIT_ARRAY:
_write_int32(file, self.val1)
_write_bytes(file, self.obj1.data[0])
else:
raise SBDFError("unknown valuearray encoding")
@classmethod
def read(cls, file: typing.BinaryIO) -> '_ValueArray':
"""reads the value array from the current file position"""
encoding = _read_int8(file)
valtype = _ValueType.read(file)
handle = cls(_ValueArrayEncoding(encoding), None)
handle.valuetype = valtype
if encoding == _ValueArrayEncoding.PLAIN_ARRAY:
handle.obj1 = _SbdfObject.read_array(file, valtype)
elif encoding == _ValueArrayEncoding.RUN_LENGTH:
handle.val1 = _read_int32(file)
handle.obj1 = _SbdfObject.read_array(file, _ValueType(_ValueTypeId.INTERNAL_BYTE))
handle.obj2 = _SbdfObject.read_array(file, valtype)
elif encoding == _ValueArrayEncoding.BIT_ARRAY:
val = _read_int32(file)
handle.val1 = val
packed_size = val // 8 + (1 if val % 8 else 0)
bits = _read_bytes(file, packed_size)
handle.obj1 = _SbdfObject(_ValueTypeId.BINARY, [bits])
else:
raise SBDFError("unknown valuearray encoding")
return handle
@classmethod
def skip(cls, file: typing.BinaryIO) -> None:
"""skips the value array at the current file position"""
encoding = _read_int8(file)
valtype = _ValueType.read(file)
if encoding == _ValueArrayEncoding.PLAIN_ARRAY:
_SbdfObject.skip_array(file, valtype)
elif encoding == _ValueArrayEncoding.RUN_LENGTH:
_SbdfObject.skip_array(file, _ValueType(_ValueTypeId.INTERNAL_BYTE))
_SbdfObject.skip_array(file, valtype)
elif encoding == _ValueArrayEncoding.BIT_ARRAY:
val = _read_int32(file)
packed_size = val // 8 + (1 if val % 8 else 0)
file.seek(packed_size, 1)
else:
raise SBDFError("unknown valuearray encoding")
# Value types
class _ValueTypeId(enum.IntEnum):
UNKNOWN = 0x00
BOOL = 0x01 # C type is char
INT = 0x02 # C type is 32-bit int
LONG = 0x03 # C type is 64-bit int
FLOAT = 0x04 # C type is float
DOUBLE = 0x05 # C type is double
DATETIME = 0x06 # C representation is milliseconds since 01/01/01, 00:00:00, stored in a 64-bit int
DATE = 0x07 # C representation is milliseconds since 01/01/01, 00:00:00, stored in a 64-bit int
TIME = 0x08 # C representation is milliseconds since 01/01/01, 00:00:00, stored in a 64-bit int
TIMESPAN = 0x09 # C representation is milliseconds, stored in a 64-bit int
STRING = 0x0a # C representation is char-ptr
BINARY = 0x0c # C representation is void-ptr
DECIMAL = 0x0d # C representation is IEEE754 decimal128 Binary Integer Decimals
INTERNAL_BYTE = 0xfe
def to_typename_string(self) -> str:
"""convert this valuetype id to the type name used by Spotfire"""
return {
_ValueTypeId.BOOL: "Boolean",
_ValueTypeId.INT: "Integer",
_ValueTypeId.LONG: "LongInteger",
_ValueTypeId.FLOAT: "SingleReal",
_ValueTypeId.DOUBLE: "Real",
_ValueTypeId.DATETIME: "DateTime",
_ValueTypeId.DATE: "Date",
_ValueTypeId.TIME: "Time",
_ValueTypeId.TIMESPAN: "TimeSpan",
_ValueTypeId.STRING: "String",
_ValueTypeId.BINARY: "Binary",
_ValueTypeId.DECIMAL: "Currency"
}.get(self, "unknown")
def to_dtype_name(self) -> str:
"""convert this valuetype id to the dtype name used by Pandas"""
return {
_ValueTypeId.INT: "Int32",
_ValueTypeId.LONG: "Int64",
_ValueTypeId.FLOAT: "float32",
_ValueTypeId.DOUBLE: "float64"
}.get(self, "object")
@staticmethod
def infer_from_type(values, value_description: str) -> '_ValueTypeId':
"""determine the proper valuetype id from the Python types in a column"""
# Remove any None (or other none-ish things) from values
if isinstance(values, pd.Series):
vals = values.dropna().tolist()
else:
vals = [x for x in values if not pd.isnull(x)]
# Check if any values remain
if not vals:
raise SBDFError(f"cannot determine type for {value_description}; all values are missing")
# Check to make sure only one type remains
vals_type = type(vals[0])
if not all(isinstance(i, vals_type) for i in vals):
raise SBDFError(f"types in {value_description} do not match")
# Determine the right type id
typeid = {
bool: _ValueTypeId.BOOL,
np.int32: _ValueTypeId.INT,
int: _ValueTypeId.LONG,
np.int64: _ValueTypeId.LONG,
np.float32: _ValueTypeId.FLOAT,
float: _ValueTypeId.DOUBLE,
np.float64: _ValueTypeId.DOUBLE,
datetime.datetime: _ValueTypeId.DATETIME,
pd.Timestamp: _ValueTypeId.DATETIME,
datetime.date: _ValueTypeId.DATE,
datetime.time: _ValueTypeId.TIME,
datetime.timedelta: _ValueTypeId.TIMESPAN,
pd.Timedelta: _ValueTypeId.TIMESPAN,
str: _ValueTypeId.STRING,
bytes: _ValueTypeId.BINARY,
decimal.Decimal: _ValueTypeId.DECIMAL,
}.get(vals_type, None)
if typeid is None:
raise SBDFError(f"unknown type '{_utils.type_name(vals_type)}' in {value_description}")
return typeid
@staticmethod
def infer_from_dtype(series: pd.Series, series_description: str) -> '_ValueTypeId':
"""determine the proper valuetype id from the Pandas dtype of a series"""
dtype = series.dtype.name
if dtype == "object":
return _ValueTypeId.infer_from_type(series, series_description)
if dtype == "category":
return _ValueTypeId.infer_from_dtype(series.astype(series.cat.categories.dtype), series_description)
typeid = {
"bool": _ValueTypeId.BOOL,
"int32": _ValueTypeId.INT,
"Int32": _ValueTypeId.INT,
"int64": _ValueTypeId.LONG,
"Int64": _ValueTypeId.LONG,
"float32": _ValueTypeId.FLOAT,
"float64": _ValueTypeId.DOUBLE,
"datetime64[ns]": _ValueTypeId.DATETIME,
"timedelta64[ns]": _ValueTypeId.TIMESPAN,
"string": _ValueTypeId.STRING,
}.get(dtype, None)
if typeid is None:
raise SBDFError(f"unknown dtype '{dtype}' in {series_description}")
return typeid
class _ValueType:
def __init__(self, type_id: int) -> None:
self.type_id = _ValueTypeId(type_id)
def __repr__(self) -> str:
return str(self.type_id)
def __cmp__(self, other: '_ValueType') -> int:
return self.type_id - other.type_id
def write(self, file: typing.BinaryIO) -> None:
"""writes a valuetype to the current file position"""
_write_int8(file, self.type_id)
@classmethod
def read(cls, file: typing.BinaryIO) -> '_ValueType':
"""reads a valuetype from the current file position"""
return cls(_read_int8(file))
def is_array(self) -> bool:
"""determines if this valuetype is an array type index"""
return self.type_id in (_ValueTypeId.STRING, _ValueTypeId.BINARY)
def get_packed_size(self) -> int:
"""returns the packed byte size (on disk) of a valuetype"""
return {
_ValueTypeId.BOOL: 1,
_ValueTypeId.INT: 4,
_ValueTypeId.LONG: 8,
_ValueTypeId.FLOAT: 4,
_ValueTypeId.DOUBLE: 8,
_ValueTypeId.DATETIME: 8,
_ValueTypeId.DATE: 8,
_ValueTypeId.TIME: 8,
_ValueTypeId.TIMESPAN: 8,
_ValueTypeId.STRING: 0, # size is dynamic
_ValueTypeId.BINARY: 0, # size is dynamic
_ValueTypeId.DECIMAL: 16
}.get(self.type_id, None)
_DATETIME_EPOCH = datetime.datetime(1, 1, 1)
_DECIMAL_EXPONENT_BIAS = 12320
@staticmethod
def _to_python_bool(data: bytes) -> bool:
return struct.unpack("?", data)[0]
@staticmethod
def _to_python_int(data: bytes) -> int:
return struct.unpack("<i", data)[0]
@staticmethod
def _to_python_long(data: bytes) -> int:
return struct.unpack("<q", data)[0]
@staticmethod
def _to_python_float(data: bytes) -> float:
return struct.unpack("<f", data)[0]
@staticmethod
def _to_python_double(data: bytes) -> float:
return struct.unpack("<d", data)[0]
@staticmethod
def _to_python_datetime(data: bytes) -> datetime.datetime:
timestamp = struct.unpack("<q", data)[0]
return _ValueType._DATETIME_EPOCH + datetime.timedelta(milliseconds=timestamp)
@staticmethod
def _to_python_date(data: bytes) -> datetime.date:
timestamp = struct.unpack("<q", data)[0]
date = _ValueType._DATETIME_EPOCH + datetime.timedelta(milliseconds=timestamp)
return date.date()
@staticmethod
def _to_python_time(data: bytes) -> datetime.time:
timestamp = struct.unpack("<q", data)[0]
date = _ValueType._DATETIME_EPOCH + datetime.timedelta(milliseconds=timestamp)
return date.timetz()
@staticmethod
def _to_python_timespan(data: bytes) -> datetime.timedelta:
timespan = struct.unpack("<q", data)[0]
return datetime.timedelta(milliseconds=timespan)
@staticmethod
def _to_python_string(data: bytes) -> str:
return data.decode("utf-8")
@staticmethod
def _to_python_binary(data: bytes) -> bytes:
return data
@staticmethod
def _to_python_decimal(data: bytes) -> decimal.Decimal:
bits = bitstring.BitArray(bytes=data)
# pylint: disable=unbalanced-tuple-unpacking
coefficient, biased_exponent_bits_high, sign_bit, biased_exponent_bits_low = \
bits.unpack('uintle:96,pad:17,bits:7,bool,bits:7')
# un-bias the exponent
biased_exponent_bits = bitstring.BitArray('0b00')
biased_exponent_bits.append(biased_exponent_bits_high)
biased_exponent_bits.append(biased_exponent_bits_low)
exponent = biased_exponent_bits.uintle - _ValueType._DECIMAL_EXPONENT_BIAS
# break up the coefficient into its digits
digits = []
while coefficient != 0:
digits.insert(0, coefficient % 10)
coefficient //= 10
# construct the decimal value
return decimal.Decimal((1 if sign_bit else 0, tuple(digits), exponent))
def to_python(self, data: bytes) -> typing.Any:
"""return a Python representation of the raw data"""
return getattr(self, "_to_python_" + self.type_id.name.lower(), lambda x: None)(data)
@staticmethod
def _to_bytes_bool(obj: bool) -> bytes:
return struct.pack("?", obj)
@staticmethod
def _to_bytes_int(obj: int) -> bytes:
return struct.pack("<i", obj)
@staticmethod
def _to_bytes_long(obj: int) -> bytes:
return struct.pack("<q", obj)
@staticmethod
def _to_bytes_float(obj: float) -> bytes:
return struct.pack("<f", obj)
@staticmethod
def _to_bytes_double(obj: float) -> bytes:
return struct.pack("<d", obj)
@staticmethod
def _to_bytes_datetime(obj: datetime.datetime) -> bytes:
if isinstance(obj, pd.Timestamp):
obj_dt = obj.to_pydatetime()
else:
obj_dt = obj
td_after_epoch = obj_dt - _ValueType._DATETIME_EPOCH
timespan = int(td_after_epoch / datetime.timedelta(milliseconds=1))
return struct.pack("<q", timespan)
@staticmethod
def _to_bytes_date(obj: datetime.date) -> bytes:
td_after_epoch = obj - _ValueType._DATETIME_EPOCH.date()
timespan = int(td_after_epoch / datetime.timedelta(milliseconds=1))
return struct.pack("<q", timespan)
@staticmethod
def _to_bytes_time(obj: datetime.time) -> bytes:
obj_td = datetime.datetime.combine(datetime.datetime.min, obj) - datetime.datetime.min
timestamp = obj_td // datetime.timedelta(milliseconds=1)
return struct.pack("<q", timestamp)
@staticmethod
def _to_bytes_timespan(obj: datetime.timedelta) -> bytes:
if isinstance(obj, pd.Timedelta):
obj_td = obj.to_pytimedelta()
elif | pd.isnull(obj) | pandas.isnull |
import os
import time
import math
import json
import hashlib
import datetime
import pandas as pd
import numpy as np
from run_pyspark import PySparkMgr
graph_type = "loan_agent/"
def make_md5(x):
md5 = hashlib.md5()
md5.update(x.encode('utf-8'))
return md5.hexdigest()
def make_node_schema(entity_name, entity_df, comp_index_properties = None, mix_index_properties = None):
properties = {"propertyKeys": []}
for col in entity_df.columns:
if entity_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif entity_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
vertexLabels = {"vertexLabels": []}
vertexLabels["vertexLabels"].append({"name": entity_name})
vertexIndexes = {"vertexIndexes": []}
if comp_index_properties is not None:
for prop in comp_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_comp",
"propertyKeys" : [ prop ],
"composite" : True,
"unique" : False
})
if mix_index_properties is not None:
for prop in mix_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_mixed",
"propertyKeys" : [ prop ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_graph_label_mixed",
"propertyKeys" : [ "graph_label" ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
return {**properties, **vertexLabels, **vertexIndexes}
def make_node_mapper(entity_name, entity_df):
entity_file = "gra_" + entity_name + ".csv"
vertexMap = {"vertexMap": {entity_file: {}}}
vertexMap["vertexMap"][entity_file] = {
"[VertexLabel]" : entity_name
}
for col in entity_df.columns:
vertexMap["vertexMap"][entity_file][col] = col
return vertexMap
def make_vertex_centric_schema(edge_name, index_property, direction, order):
if direction not in ["BOTH", "IN", "OUT"]:
print("direction should be in {}".format(["BOTH", "IN", "OUT"]))
return None
if order not in ["incr", "decr"]:
print("order should be in {}".format(["incr", "decr"]))
return None
vertexCentricIndexes = {"vertexCentricIndexes": []}
vertexCentricIndexes["vertexIndexes"].append({
"name" : edge_name + "_" + index_property,
"edge" : edge_name,
"propertyKeys" : [ index_property ],
"order": order,
"direction": direction
})
return vertexCentricIndexes
def make_edge_schema(relation_df = None, relation_comp_index_properties = None, relation_mix_index_properties = None):
properties = {"propertyKeys": []}
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
if relation_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif relation_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
relation_names = relation_df["Type"].value_counts().index.tolist()
edgeLabels = {"edgeLabels": []}
for relation in relation_names:
edgeLabels["edgeLabels"].append({
"name": relation,
"multiplicity": "MULTI",
"unidirected": False
})
edgeIndexes = {"edgeIndexes": []}
for relation_name in relation_names:
if relation_comp_index_properties is not None:
for prop in relation_comp_index_properties:
edgeIndexes["edgeIndexes"].append({
"name": relation_name + "_" + prop + "_comp",
"propertyKeys": [ prop ],
"composite": True,
"unique": False,
"indexOnly": relation_name
})
if relation_mix_index_properties is not None:
for prop in relation_mix_index_properties:
edgeIndexes["edgeIndexes"].append({
"name" : relation_name + "_" + prop + "_mixed",
"propertyKeys": [ prop ],
"composite": False,
"unique": False,
"mixedIndex": "search",
"indexOnly": relation_name
})
return {**properties, **edgeLabels, **edgeIndexes}
def make_edge_mapper(entity_relations, relation_df=None, specific_relation=None):
edgeMap = {"edgeMap": {}}
for relation_name, entity_pairs in entity_relations.items():
if specific_relation is not None and relation_name != specific_relation:
continue
for pair in entity_pairs:
relation_file = "gra_" + relation_name + ".csv"
edge = {"[edge_left]": {"Left": pair[0]},
"[EdgeLabel]": relation_name,
"[edge_right]": {"Right": pair[1]}}
if relation_df is not None:
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
edge[col] = col
edgeMap["edgeMap"][relation_file] = edge
return edgeMap
def dump_schema(schema, datamapper, folder):
if not os.path.exists(graph_type + folder):
os.makedirs(graph_type + folder)
f = open(graph_type + folder + "/schema.json", 'w')
f.write(json.dumps(schema))
f.close()
f = open(graph_type + folder + "/datamapper.json", 'w')
f.write(json.dumps(datamapper))
f.close()
spark_args = {}
pysparkmgr = PySparkMgr(spark_args)
_, spark, sc = pysparkmgr.start('xubin.xu')
# 申请表
apply_loan_df = spark.sql("select * from adm.adm_credit_apply_quota_doc").toPandas()
# 支用表
zhiyong_loan_df = spark.sql("select * from adm.adm_credit_loan_apply_doc").toPandas()
zhiyong_loan_df.quota_apply_id = zhiyong_loan_df.quota_apply_id.astype("int")
# 逾期表
overdue_sql = """select
*
from adm.adm_credit_apply_quota_doc t1
--逾期关联,存在一个客户不同时间多笔申请,不同申请会对应不同的逾期状态
--当前逾期天数和历史最大逾期天数
left join
(
select
quota_apply_id,
max(overdue_days_now) as overdue_days_now,
max(his_max_overdue_days) as his_max_overdue_days
from
(
select
c4.quota_apply_id,
c3.overdue_days_now,
c3.his_max_overdue_days
from
adm.adm_credit_loan_apply_doc c4
left join
(
select
c2.business_id,
max(overdue_days_now) as overdue_days_now,
max(overdue_day_calc) as his_max_overdue_days
from
(
select
c1.*,
(case when (overdue_day_calc>0 and latest_actual_repay_date is not null) then 0 else overdue_day_calc end) as overdue_days_now
FROM adm.adm_credit_rpt_risk_overdue_bill c1
) c2
group by c2.business_id
) c3
on c4.loan_no=c3.business_id
) c5
group by quota_apply_id
) t4
on t1.quota_apply_id=t4.quota_apply_id
--首逾天数:当前首逾天数,历史最大首逾天数----------------------------------------------------------
left join
(
select
quota_apply_id,
max(fpd) as fpd,
max(fpd_ever) as fpd_ever
from
(
select
a1.*,a2.*
from
adm.adm_credit_loan_apply_doc a1
left join
(
select
c1.business_id,
(case when (overdue_day_calc>0 and latest_actual_repay_date is null) then overdue_day_calc else 0 end) as fpd,--当前首逾天数
c1.overdue_day_calc as fpd_ever--历史首逾天数
from
adm.adm_credit_rpt_risk_overdue_bill c1
where periods=1
) a2
on a1.loan_no=a2.business_id
) a3
group by quota_apply_id
) t5
on t1.quota_apply_id=t5.quota_apply_id"""
overday_df = spark.sql(overdue_sql).toPandas()
# 构建借款者实体
def make_borrower_entity():
shouxin_zhiyong_df = pd.merge(apply_loan_df, zhiyong_loan_df[
["quota_apply_id", "apply_id", "apply_status_risk", "loan_status", "loan_amount", "repayment_principal"]],
how='left', on='quota_apply_id')
borrower_basic_df = shouxin_zhiyong_df[
["name", "uus_id", "employee_no", "identity_no", "sex", "age", "zociac", "educate_level", "marital_status",
"city", "access_role", "entry_date",
"resign_date", "on_job_status", "current_working_days", "uc_job_level_name", "store_city", "apply_id",
"team_code", "shop_code", "area_code", "marketing_code", "region_code"]]
borrower = shouxin_zhiyong_df.groupby("identity_no")
borrower_ext_df = pd.DataFrame([], columns=["identity_no", "累计贷款笔数", "未结清贷款笔数", "累计贷款金额", "当前贷款余额"])
idx = 0
for group, df in borrower:
loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].apply_id.count()
unclosed_loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功") & (
df.loan_status == "REPAYING")].apply_id.count()
loans_amt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].loan_amount_y.sum()
unpayed_amt = loans_amt - df[
(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].repayment_principal.sum()
borrower_ext_df.loc[idx] = {"identity_no": group, "累计贷款笔数": loans_cnt, "未结清贷款笔数": unclosed_loans_cnt,
"累计贷款金额": loans_amt, "当前贷款余额": unpayed_amt}
idx += 1
borrower_basic_df.drop_duplicates(borrower_basic_df.columns, keep='first', inplace=True)
borrower_entity_df = pd.merge(borrower_basic_df, borrower_ext_df, on="identity_no")
borrower_entity_df = borrower_entity_df.fillna(0)
overday_gp = overday_df[(~pd.isnull(overday_df.overdue_days_now))].groupby("identity_no")["overdue_days_now"].max()
overday_now_df = pd.DataFrame({"identity_no": overday_gp.index, "overdue_days_now": overday_gp.values})
borrower_entity_df = pd.merge(borrower_entity_df, overday_now_df, how="left", on="identity_no")
his_overday_gp = overday_df[(~ | pd.isnull(overday_df.his_max_overdue_days) | pandas.isnull |
#!/usr/bin/env python
"""Generate kmer specific, position specific, and modification specific accuracy results"""
########################################################################
# File: plot_multiple_variant_accuracy.py
# executable: plot_multiple_variant_accuracy.py
#
# Author: <NAME>
# History: Created 04/02/20
########################################################################
# py3helpers
from py3helpers.seq_tools import ReferenceHandler
from py3helpers.classification import ClassificationMetrics
from py3helpers.utils import load_json, create_dot_dict, list_dir, merge_lists
# other libs
import pandas as pd
import numpy as np
# std libs
import os
import sys
import platform
import shutil
from argparse import ArgumentParser
from timeit import default_timer as timer
# matplotlib
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
if platform.system() == "Darwin":
mpl.use("macosx")
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--config', '-c', action='store',
dest='config', required=True, type=str, default=None,
help="Path to json config file")
args = parser.parse_args()
return args
def load_positions_file(path_to_positions_file):
assert os.path.exists(path_to_positions_file), "Path to positions file does not exist: {}".format(
path_to_positions_file)
return pd.read_csv(path_to_positions_file, sep="\t", names=["contig", "reference_index", "strand", "base", "label"])
def load_sa2bed_variant_data(path_to_variant_data):
assert os.path.exists(path_to_variant_data), "Path to variant data does not exist: {}".format(path_to_variant_data)
return pd.read_csv(path_to_variant_data)
def get_prob_and_label(variants):
# create probability columns
variant_strings = list(set(variants["variants"]))
assert len(variant_strings) > 0, "No variant data passed into function get_prob_and_label"
n_variants = len(variant_strings[0])
lengths = [len(x) for x in variant_strings]
if len(variant_strings) > 1:
assert np.sum(lengths) == lengths[0] * len(lengths), "All modifications must have the " \
"same number of possible variants"
prob = variants[["prob"+str(x+1) for x in range(n_variants)]].rename(
columns={"prob" + str(i + 1): value for i, value in enumerate([str(i) for i in range(n_variants)])})
label = variants[["prob"+str(x+1)+"_label" for x in range(n_variants)]].rename(
columns={"prob" + str(i + 1)+"_label": value for i, value in enumerate([str(i) for i in range(n_variants)])})
else:
prob = variants[["prob"+str(x+1) for x in range(n_variants)]].rename(
columns={"prob" + str(i + 1): value for i, value in enumerate(variants["variants"].iloc[0])})
label = variants[["prob"+str(x+1)+"_label" for x in range(n_variants)]].rename(
columns={"prob" + str(i + 1)+"_label": value for i, value in enumerate(variants["variants"].iloc[0])})
label_ids = ["_".join([str(y) for y in x]) for x in zip(variants["read_id"],
variants["contig"],
variants["reference_index"],
variants["strand"])]
return label, prob, label_ids
def create_master_table(positions, variants):
n_variants = len(variants.columns)-5
label_data = variants[["prob"+str(x+1) for x in range(n_variants)]] * 0
label_data.columns = ["prob"+str(x+1)+"_label" for x in range(n_variants)]
variants2 = pd.concat([variants, label_data], axis=1)
labelled_dfs = []
pd.set_option('mode.chained_assignment', None)
for x, y in variants2.groupby(['contig', 'reference_index', "strand", "variants"], as_index=False):
label_row = positions[(positions['contig'] == x[0])
& (positions['reference_index'] == x[1])
& (positions['strand'] == x[2])]
if len(label_row) == 0:
continue
first_row = y.iloc[0]
label = label_row.iloc[0]["label"]
index = first_row["variants"].find(label)
assert index != -1, "Variant label is not in variants at this position. Check model file and positions file"
y.loc[:, "prob"+str(index+1)+"_label"] = 1
labelled_dfs.append(y)
complete_table = | pd.concat(labelled_dfs) | pandas.concat |
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.compat import (
pa_version_under2p0,
pa_version_under4p0,
)
from pandas.errors import PerformanceWarning
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])
def test_startswith_endswith_non_str_patterns(pattern):
# GH3485
ser = Series(["foo", "bar"])
msg = f"expected a string object, not {type(pattern).__name__}"
with pytest.raises(TypeError, match=msg):
ser.str.startswith(pattern)
with pytest.raises(TypeError, match=msg):
ser.str.endswith(pattern)
def assert_series_or_index_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else: # Index
tm.assert_index_equal(left, right)
def test_iter():
# GH3638
strs = "google", "wikimedia", "wikipedia", "wikitravel"
ser = Series(strs)
with tm.assert_produces_warning(FutureWarning):
for s in ser.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ser.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, str) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == "l"
def test_iter_empty(any_string_dtype):
ser = Series([], dtype=any_string_dtype)
i, s = 100, 1
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(any_string_dtype):
ser = Series(["a"], dtype=any_string_dtype)
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert not i
tm.assert_series_equal(ser, s)
def test_iter_object_try_string():
ser = Series(
[
slice(None, np.random.randint(10), np.random.randint(10, 20))
for _ in range(4)
]
)
i, s = 100, "h"
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert i == 100
assert s == "h"
# test integer/float dtypes (inferred by constructor) and mixed
def test_count(any_string_dtype):
ser = Series(["foo", "foofoo", np.nan, "foooofooofommmfoo"], dtype=any_string_dtype)
result = ser.str.count("f[o]+")
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
expected = Series([1, 2, np.nan, 4], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_count_mixed_object():
ser = Series(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
result = ser.str.count("a")
expected = Series([1, np.nan, 0, np.nan, np.nan, 0, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_repeat(any_string_dtype):
ser = Series(["a", "b", np.nan, "c", np.nan, "d"], dtype=any_string_dtype)
result = ser.str.repeat(3)
expected = Series(
["aaa", "bbb", np.nan, "ccc", np.nan, "ddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
result = ser.str.repeat([1, 2, 3, 4, 5, 6])
expected = Series(
["a", "bb", np.nan, "cccc", np.nan, "dddddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
def test_repeat_mixed_object():
ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
result = ser.str.repeat(3)
expected = Series(
["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg, repeat", [[None, 4], ["b", None]])
def test_repeat_with_null(any_string_dtype, arg, repeat):
# GH: 31632
ser = Series(["a", arg], dtype=any_string_dtype)
result = ser.str.repeat([3, repeat])
expected = Series(["aaa", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_empty_str_methods(any_string_dtype):
empty_str = empty = Series(dtype=any_string_dtype)
if any_string_dtype == "object":
empty_int = Series(dtype="int64")
empty_bool = Series(dtype=bool)
else:
empty_int = Series(dtype="Int64")
empty_bool = Series(dtype="boolean")
empty_object = Series(dtype=object)
empty_bytes = Series(dtype=object)
empty_df = DataFrame()
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert "" == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.contains("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.startswith("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.endswith("a"))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.replace("a", "b"))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.match("^a"))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=any_string_dtype),
empty.str.extract("()", expand=True),
)
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=True),
)
tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=False),
)
tm.assert_frame_equal(empty_df, empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(""))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_object, empty_str.str.findall("a"))
tm.assert_series_equal(empty_int, empty.str.find("a"))
tm.assert_series_equal(empty_int, empty.str.rfind("a"))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_object, empty.str.split("a"))
tm.assert_series_equal(empty_object, empty.str.rsplit("a"))
tm.assert_series_equal(empty_object, empty.str.partition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.partition("a"))
tm.assert_series_equal(empty_object, empty.str.rpartition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.strip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.lstrip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_object, empty_bytes.str.decode("ascii"))
tm.assert_series_equal(empty_bytes, empty.str.encode("ascii"))
# ismethods should always return boolean (GH 29624)
tm.assert_series_equal(empty_bool, empty.str.isalnum())
tm.assert_series_equal(empty_bool, empty.str.isalpha())
tm.assert_series_equal(empty_bool, empty.str.isdigit())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under2p0,
):
tm.assert_series_equal(empty_bool, empty.str.isspace())
tm.assert_series_equal(empty_bool, empty.str.islower())
tm.assert_series_equal(empty_bool, empty.str.isupper())
tm.assert_series_equal(empty_bool, empty.str.istitle())
tm.assert_series_equal(empty_bool, empty.str.isnumeric())
tm.assert_series_equal(empty_bool, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize("NFC"))
table = str.maketrans("a", "b")
tm.assert_series_equal(empty_str, empty.str.translate(table))
@pytest.mark.parametrize(
"method, expected",
[
("isalnum", [True, True, True, True, True, False, True, True, False, False]),
("isalpha", [True, True, True, False, False, False, True, False, False, False]),
(
"isdigit",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isnumeric",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isspace",
[False, False, False, False, False, False, False, False, False, True],
),
(
"islower",
[False, True, False, False, False, False, False, False, False, False],
),
(
"isupper",
[True, False, False, False, True, False, True, False, False, False],
),
(
"istitle",
[True, False, True, False, True, False, False, False, False, False],
),
],
)
def test_ismethods(method, expected, any_string_dtype):
ser = Series(
["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "], dtype=any_string_dtype
)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]"
and pa_version_under2p0
and method == "isspace",
):
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, True, True, False, True, True, False]),
("isdecimal", [False, True, False, False, False, True, False]),
],
)
def test_isnumeric_unicode(method, expected, any_string_dtype):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
ser = Series(["A", "3", "¼", "★", "፸", "3", "four"], dtype=any_string_dtype)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, np.nan, True, False, np.nan, True, False]),
("isdecimal", [False, np.nan, False, False, np.nan, True, False]),
],
)
def test_isnumeric_unicode_missing(method, expected, any_string_dtype):
values = ["A", np.nan, "¼", "★", np.nan, "3", "four"]
ser = Series(values, dtype=any_string_dtype)
expected_dtype = "object" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip(any_string_dtype):
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = ser.str.split("_").str.join("_")
expected = ser.astype(object)
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip_mixed_object():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.split("_").str.join("_")
expected = Series(
["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
def test_len(any_string_dtype):
ser = Series(
["foo", "fooo", "fooooo", np.nan, "fooooooo", "foo\n", "あ"],
dtype=any_string_dtype,
)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = ser.str.len()
expected_dtype = "float64" if any_string_dtype == "object" else "Int64"
expected = Series([3, 4, 6, np.nan, 8, 4, 1], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_len_mixed():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.len()
expected = Series([3, np.nan, 13, np.nan, np.nan, 3, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method,sub,start,end,expected",
[
("index", "EF", None, None, [4, 3, 1, 0]),
("rindex", "EF", None, None, [4, 5, 7, 4]),
("index", "EF", 3, None, [4, 3, 7, 4]),
("rindex", "EF", 3, None, [4, 5, 7, 4]),
("index", "E", 4, 8, [4, 5, 7, 4]),
("rindex", "E", 0, 5, [4, 3, 1, 4]),
],
)
def test_index(method, sub, start, end, index_or_series, any_string_dtype, expected):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
expected_dtype = np.int64 if any_string_dtype == "object" else "Int64"
expected = index_or_series(expected, dtype=expected_dtype)
result = getattr(obj.str, method)(sub, start, end)
if index_or_series is Series:
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)(sub, start, end) for item in obj]
assert list(result) == expected
def test_index_not_found_raises(index_or_series, any_string_dtype):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
with pytest.raises(ValueError, match="substring not found"):
obj.str.index("DE")
@pytest.mark.parametrize("method", ["index", "rindex"])
def test_index_wrong_type_raises(index_or_series, any_string_dtype, method):
obj = index_or_series([], dtype=any_string_dtype)
msg = "expected a string object, not int"
with pytest.raises(TypeError, match=msg):
getattr(obj.str, method)(0)
@pytest.mark.parametrize(
"method, exp",
[
["index", [1, 1, 0]],
["rindex", [3, 1, 2]],
],
)
def test_index_missing(any_string_dtype, method, exp):
ser = Series(["abcb", "ab", "bcbe", np.nan], dtype=any_string_dtype)
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
result = getattr(ser.str, method)("b")
expected = Series(exp + [np.nan], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_pipe_failures(any_string_dtype):
# #2119
ser = Series(["A|B|C"], dtype=any_string_dtype)
result = ser.str.split("|")
expected = Series([["A", "B", "C"]], dtype=object)
tm.assert_series_equal(result, expected)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = ser.str.replace("|", " ", regex=False)
expected = Series(["A B C"], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, step, expected",
[
(2, 5, None, ["foo", "bar", np.nan, "baz"]),
(0, 3, -1, ["", "", np.nan, ""]),
(None, None, -1, ["owtoofaa", "owtrabaa", np.nan, "xuqzabaa"]),
(3, 10, 2, ["oto", "ato", np.nan, "aqx"]),
(3, 0, -1, ["ofa", "aba", np.nan, "aba"]),
],
)
def test_slice(start, stop, step, expected, any_string_dtype):
ser = Series(["aafootwo", "aabartwo", np.nan, "aabazqux"], dtype=any_string_dtype)
result = ser.str.slice(start, stop, step)
expected = Series(expected, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, step, expected",
[
(2, 5, None, ["foo", np.nan, "bar", np.nan, np.nan, np.nan, np.nan, np.nan]),
(4, 1, -1, ["oof", np.nan, "rab", np.nan, np.nan, np.nan, np.nan, np.nan]),
],
)
def test_slice_mixed_object(start, stop, step, expected):
ser = Series(["aafootwo", np.nan, "aabartwo", True, datetime.today(), None, 1, 2.0])
result = ser.str.slice(start, stop, step)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start,stop,repl,expected",
[
(2, 3, None, ["shrt", "a it longer", "evnlongerthanthat", "", np.nan]),
(2, 3, "z", ["shzrt", "a zit longer", "evznlongerthanthat", "z", np.nan]),
(2, 2, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),
(2, 1, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),
(-1, None, "z", ["shorz", "a bit longez", "evenlongerthanthaz", "z", np.nan]),
(None, -2, "z", ["zrt", "zer", "zat", "z", np.nan]),
(6, 8, "z", ["shortz", "a bit znger", "evenlozerthanthat", "z", np.nan]),
(-10, 3, "z", ["zrt", "a zit longer", "evenlongzerthanthat", "z", np.nan]),
],
)
def test_slice_replace(start, stop, repl, expected, any_string_dtype):
ser = Series(
["short", "a bit longer", "evenlongerthanthat", "", np.nan],
dtype=any_string_dtype,
)
expected = Series(expected, dtype=any_string_dtype)
result = ser.str.slice_replace(start, stop, repl)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["aa", "bb", np.nan, "cc"]],
["lstrip", ["aa ", "bb \n", np.nan, "cc "]],
["rstrip", [" aa", " bb", np.nan, "cc"]],
],
)
def test_strip_lstrip_rstrip(any_string_dtype, method, exp):
ser = Series([" aa ", " bb \n", np.nan, "cc "], dtype=any_string_dtype)
result = getattr(ser.str, method)()
expected = Series(exp, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["aa", np.nan, "bb"]],
["lstrip", ["aa ", np.nan, "bb \t\n"]],
["rstrip", [" aa", np.nan, " bb"]],
],
)
def test_strip_lstrip_rstrip_mixed_object(method, exp):
ser = Series([" aa ", np.nan, " bb \t\n", True, datetime.today(), None, 1, 2.0])
result = getattr(ser.str, method)()
expected = Series(exp + [np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["ABC", " BNSD", "LDFJH "]],
["lstrip", ["ABCxx", " BNSD", "LDFJH xx"]],
["rstrip", ["xxABC", "xx BNSD", "LDFJH "]],
],
)
def test_strip_lstrip_rstrip_args(any_string_dtype, method, exp):
ser = Series(["xxABCxx", "xx BNSD", "LDFJH xx"], dtype=any_string_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = getattr(ser.str, method)("x")
expected = Series(exp, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"prefix, expected", [("a", ["b", " b c", "bc"]), ("ab", ["", "a b c", "bc"])]
)
def test_removeprefix(any_string_dtype, prefix, expected):
ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype)
result = ser.str.removeprefix(prefix)
ser_expected = Series(expected, dtype=any_string_dtype)
tm.assert_series_equal(result, ser_expected)
@pytest.mark.parametrize(
"suffix, expected", [("c", ["ab", "a b ", "b"]), ("bc", ["ab", "a b c", ""])]
)
def test_removesuffix(any_string_dtype, suffix, expected):
ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype)
result = ser.str.removesuffix(suffix)
ser_expected = Series(expected, dtype=any_string_dtype)
| tm.assert_series_equal(result, ser_expected) | pandas._testing.assert_series_equal |
#%% [markdown]
## Project Name: covid_misinformation
### Program Name: CoronaV_Trends.py
### Purpose: To download google trends data related to coronavirus.
##### Date Created: Apr 8th 2020
####
# Pytrends Documentation:https://github.com/GeneralMills/pytrends
#%% [markdown]
from IPython import get_ipython
get_ipython().magic('reset -sf')
import datetime
from datetime import datetime as dt
from datetime import date
import os
import pathlib
import colorlover as cl
import plotly.graph_objs as go
import chart_studio.plotly as py
import plotly.express as px
import pandas as pd
from pytrends.request import TrendReq
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
pytrends = TrendReq(hl='en-US', tz=360, retries=2, backoff_factor=0.1)
#%% [markdown]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Bat soup theory~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
today=datetime.date(2020,4,26)
search_time='2020-01-01 '+str(today)
searches_bat=[
'bat soup',
'coronavirus bat soup',
'china bat soup',
'chinese bat soup',
'wuhan bat soup',
'bat soup virus',
]
groupkeywords = list(zip(*[iter(searches_bat)]*1))
groupkeywords = [list(x) for x in groupkeywords]
# Download search interest of bat key words
dicti = {}
i = 1
for trending in groupkeywords:
pytrends.build_payload(
trending,
timeframe = search_time,
)
dicti[i] = pytrends.interest_over_time()
i+=1
result = | pd.concat(dicti, axis=1) | pandas.concat |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return | PeriodIndex(data, copy=copy, name=name, **kwargs) | pandas.core.indexes.period.PeriodIndex |
# -*- coding: utf-8 -*-
"""
Created on Tue May 29 12:32:16 2018
@author: jonatha.costa
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
import datetime
import re
import numpy as np
from readability import Document
import requests
from readability.readability import Document
import time
def get_date(k):
soup = BeautifulSoup(k,'lxml')
date = soup.findAll("span",{'class':'date-display-single'})[0]
date = date.text
try:
date = datetime.datetime.strptime(date, "%d/%m/%Y").strftime('%Y-%m-%d')
except ValueError:
date = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S").strftime('%Y-%m-%d')
return(date)
def get_manchete(k):
soup = BeautifulSoup(k, 'lxml')
#manchete = soup.findAll('h1',{'class':'content-head__title'})
manchete = soup.findAll('h1',{'property':'na:headline'})
try:
manchete_ok = manchete[0].text
except IndexError:
page_content = Document(k)
manchete_ok = page_content.title()
return(manchete_ok)
def boilerpipe_api_article_extract(k):
soup = BeautifulSoup(k, 'lxml')
text = soup.find_all('p')
texto = ""
for news in range(len(text)):
#print('concatenate part '+ str(news) + ' of ' + str(len(text)))
aux = text[news].text
texto = texto + aux
return(texto)
url = 'http://blogdoibre.fgv.br/home?page='
page = 0
df_links = pd.DataFrame(columns = ["links_brutos"])
url_extract = url + str(page)
r = requests.get(url_extract)
while(r.status_code == 200 and page < 27):
print("get page:" + str(page))
url_extract = url + str(page)
r = requests.get(url_extract)
soup = BeautifulSoup(r.content, 'lxml')
teste = soup.findAll('a')
time.sleep(1)
for i in range(len(teste)):
if('http://blogdoibre.fgv.br/posts/' in teste[i].attrs['href'] and '?page=' not in teste[i].attrs['href']):
df_links = df_links.append({'links_brutos': teste[i].attrs['href']},ignore_index=True)
page = page + 1
df_links = df_links.drop_duplicates()
df_links = df_links.reset_index(drop=True )
df_html = pd.DataFrame(columns = ["html"])
for i in range(len(df_links)):
print("get html:" + str(i) + ' of ' + str(len(df_links)))
r = requests.get(df_links['links_brutos'][i])
time.sleep(2)
df_html = df_html.append({'html': r.content},ignore_index=True)
df_links = | pd.concat([df_links,df_html],axis = 1) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # data visualization library
import matplotlib.pyplot as plt
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score,confusion_matrix
from sklearn.metrics import accuracy_score
import time
from subprocess import check_output
data = pd.read_csv('/home/pborovska/Downloads/data.csv')
data.head()
col = data.columns
print(col)
y = data.diagnosis
list = ['Unnamed: 32','id','diagnosis']
x = data.drop(list,axis = 1 )
x.head()
ax = sns.countplot(y,label="Count")
B, M = y.value_counts()
print('Number of Benign: ',B)
print('Number of Malignant : ',M)
print(x.describe())
data_dia = y
data = x
data_n_2 = (data - data.mean()) / (data.std())
data = pd.concat([y,data_n_2.iloc[:,0:10]],axis=1)
data = pd.melt(data,id_vars="diagnosis",
var_name="features",
value_name='value')
plt.figure(figsize=(10,10))
sns.violinplot(x="features", y="value", hue="diagnosis", data=data,split=True, inner="quart")
plt.xticks(rotation=90)
data = pd.concat([y,data_n_2.iloc[:,10:20]],axis=1)
data = pd.melt(data,id_vars="diagnosis",
var_name="features",
value_name='value')
plt.figure(figsize=(10,10))
sns.violinplot(x="features", y="value", hue="diagnosis", data=data,split=True, inner="quart")
plt.xticks(rotation=90)
data = pd.concat([y,data_n_2.iloc[:,20:31]],axis=1)
data = pd.melt(data,id_vars="diagnosis",
var_name="features",
value_name='value')
plt.figure(figsize=(10,10))
sns.violinplot(x="features", y="value", hue="diagnosis", data=data,split=True, inner="quart")
plt.xticks(rotation=90)
plt.figure(figsize=(10,10))
sns.boxplot(x="features", y="value", hue="diagnosis", data=data)
plt.xticks(rotation=90)
sns.jointplot(x.loc[:,'concavity_worst'], x.loc[:,'concave points_worst'], kind="regg", color="#ce1414")
sns.set(style="white")
df = x.loc[:,['radius_worst','perimeter_worst','area_worst']]
g = sns.PairGrid(df, diag_sharey=False)
g.map_lower(sns.kdeplot, cmap="Blues_d")
g.map_upper(plt.scatter)
g.map_diag(sns.kdeplot, lw=3)
sns.set(style="whitegrid", palette="muted")
data_dia = y
data = x
data_n_2 = (data - data.mean()) / (data.std())
data = pd.concat([y,data_n_2.iloc[:,0:10]],axis=1)
data = pd.melt(data,id_vars="diagnosis",
var_name="features",
value_name='value')
plt.figure(figsize=(10,10))
tic = time.time()
sns.swarmplot(x="features", y="value", hue="diagnosis", data=data)
plt.xticks(rotation=90)
data = | pd.concat([y,data_n_2.iloc[:,10:20]],axis=1) | pandas.concat |
from __future__ import division
from __future__ import print_function
import math
import numpy
import pandas
from scipy.stats.stats import pearsonr
from sklearn.neighbors import NearestNeighbors
from commonutils import map_on_cluster, check_sample_weight
__author__ = '<NAME>'
# About
# This module contains procedures to generate Toy Monte-Carlo
# by using modified SMOTE approach
# TODO test whether we really need to symmetrize, in the other case everything can be simplified
# TODO use classifier-based measure of quality
# TODO random state
def count_probabilities(primary_weights, secondary_weights, knn):
"""Computes probabilities of all points to be chosen as the second point in pair
:type primary_weights: numpy.array, shape = [n_samples],
the first event is generated according to these weights
:type secondary_weights: numpy.array, shape = [n_samples],
the second event is chosen between knn of first according to this weights
:type knn: dict, {event_id: list of neighbors ids}.
:rtype: numpy.array, shape = [n_samples], the probabilities
"""
size = len(knn)
primary_weights /= numpy.sum(primary_weights)
secondary_weights = numpy.array(secondary_weights)
probabilities = numpy.zeros(size, dtype=float)
for index, neighbours in knn.iteritems():
knn_weights = numpy.take(secondary_weights, neighbours)
knn_proba = knn_weights / numpy.sum(knn_weights) * primary_weights[index]
probabilities[neighbours] += knn_proba
return probabilities
def generate_toymc(data, size, knn=4, symmetrize=True, power=2.0, reweighting_iterations=5, sample_weight=None):
"""Generates toy Monte-Carlo, the dataset with distribution very close to the original one
:type data: numpy.array | pandas.DataFrame, the original distribution
:type size: int, the number of events to generate
:type knn: int | None, how many neighbours should we consider
:type symmetrize: bool, if symmetrize==True, knn will be computed in symmetric way: if a in knn of b,
then b in knn of a, this helps to fight covariance shrinking
:type power: float, instead of uniform distribution, makes the point to tend to one of initial points
(the greater, the closer to first point)
:type reweighting_iterations: int, an iterative algorithm is used, which changes the probabilities
so that all the points have equal probability to be chosen as neighbour
:rtype: (pandas.DataFrame, int), returns the generated toymc and the number of events
that were copied from original data set.
"""
data = pandas.DataFrame(data)
input_length = len(data)
sample_weight = check_sample_weight(data, sample_weight=sample_weight)
sample_weight /= numpy.sum(sample_weight)
if input_length <= 2:
# unable to generate new events with only one-two given
return data, len(data)
if knn is None:
knn = int(math.pow(input_length, 0.33) / 2)
knn = max(knn, 2)
knn = min(knn, 25)
knn = min(knn, input_length)
assert knn > 0, "knn should be positive"
# generating knn
neighbors_helper = NearestNeighbors(n_neighbors=knn, algorithm='ball_tree', )
neighbors_helper.fit(data)
neighbours = neighbors_helper.kneighbors(data, return_distance=False)
two_side_neighbours = {}
for i, neighbours_i in enumerate(neighbours):
two_side_neighbours[i] = list(neighbours_i[1:])
if symmetrize:
# symmetrization goes here
for i in range(len(neighbours)):
for n in neighbours[i]:
two_side_neighbours[n].append(i)
# removing duplicates in neighbors
old_neighbours = two_side_neighbours
two_side_neighbours = {}
for i, neighbours_i in old_neighbours.iteritems():
two_side_neighbours[i] = numpy.unique(neighbours_i)
secondary_weights = numpy.ones(len(neighbours), dtype=float)
for _ in range(reweighting_iterations):
probabilities = count_probabilities(sample_weight, secondary_weights, two_side_neighbours)
secondary_weights *= ((sample_weight / probabilities) ** 0.5)
# generating indices and weights
k_1 = numpy.random.choice(input_length, p=sample_weight, size=size)
# randint(0, input_length, size)
t_1 = 0.6 * numpy.random.random(size) ** power
t_2 = 1. - t_1
k_2 = numpy.zeros(size, dtype=int)
for i in range(size):
neighs = two_side_neighbours[k_1[i]]
neigh_weights = numpy.take(secondary_weights, neighs)
neigh_weights /= numpy.sum(neigh_weights)
# selected_neigh = getRandom(neighs, weights)
k_2[i] = numpy.random.choice(neighs, p=neigh_weights)
numpied_df = data.values
first = numpy.multiply(t_1[:, numpy.newaxis], numpied_df[k_1, :])
second = numpy.multiply(t_2[:, numpy.newaxis], numpied_df[k_2, :])
return pandas.DataFrame(numpy.add(first, second), columns=data.columns), 0
def prepare_toymc(group, clustering_features, stayed_features, size_factor):
"""This procedure prepares one block of data, written specially for parallel execution
:type group: pandas.grouping = (group_key, group_data), the data used to generate monte-carlo
:type clustering_features: the features that were used to split data
:type stayed_features: the names of other features, needed to reconstruct
:type size_factor: float, the size of generated toymc is about size_factor * len(group)
:rtype: (pandas.DataFrame, int)
"""
group_values, df = group
toymc_part, n_copied = generate_toymc(df[stayed_features], int(len(df) * size_factor), knn=None)
for i, col in enumerate(clustering_features):
toymc_part[col] = group_values[i]
return toymc_part, n_copied
def generate_toymc_with_special_features(data, size, clustering_features=None, integer_features=None,
ipc_profile=None):
"""Generate the toymc.
:type data: numpy.array | pandas.DataFrame, from which data is generated
:type size: int, how many events to generate
:type clustering_features: the events with different values of this feature can not be mixed together.
For instance: is_signal, number of jets / muons.
:type integer_features: this features are treated as usual,
but after toymc is generated, they are rounded to the closest integer value
:type ipc_profile: toymc can be generated on the cluster,
provided there is at least one clustering feature
:rtype: pandas.DataFrame with result,
all the columns should be the same as in input
"""
if integer_features is None:
integer_features = []
if clustering_features is None:
clustering_features = []
stayed_features = [col for col in data.columns if col not in clustering_features]
size_factor = float(size) / len(data)
copied_groups = 0
if len(clustering_features) == 0:
result, copied = generate_toymc(data, size=size, knn=None)
else:
grouped = data.groupby(clustering_features)
print("Generating ...")
n_groups = len(grouped)
results = map_on_cluster(ipc_profile, prepare_toymc, grouped, [clustering_features] * n_groups,
[stayed_features] * n_groups, [size_factor] * n_groups)
toymc_parts, copied_list = zip(*results)
copied = numpy.sum(copied_list)
copied_groups = numpy.sum(numpy.array(copied_list) != 0)
result = pandas.concat(toymc_parts)
for col in integer_features:
result[col] = result[col].astype(numpy.int)
if copied > 0:
print("Copied %i events in %i groups from original file. Totally generated %i rows " %
(copied, copied_groups, len(result)))
return result
# def compare_covariance_3d(data, toy_data, n_features=6):
# import pylab
# data_cov = numpy.cov(data.T)[:n_features,:n_features]
# toy_cov = numpy.cov(toy_data.T)[:n_features,:n_features]
#
# pylab.figure(figsize=(12, 5))
# assert data_cov.shape == toy_cov.shape, "different size of matrices"
# vars1, vars2 = data_cov.shape
# x, y = range(vars1), range(vars2)
# X, Y = numpy.meshgrid(x, y)
# X = X.flatten()
# Y = Y.flatten()
# Z_min = numpy.zeros_like(X)
# Z_max_left = data_cov.flatten()
# Z_max_right = toy_cov.flatten()
#
# maximal_cov = max(numpy.max(toy_cov), numpy.max(data_cov))
#
# pylab.subplot(121, projection='3d')
# pylab.bar3d(X - 0.5, Y - 0.5, Z_min, 1, 1, Z_max_left, color='b', zsort='average')
# pylab.zlim(0, maximal_cov)
# pylab.title("Original MC")
# pylab.view_init(35, 225 + 30)
#
# pylab.subplot(122, projection='3d')
# pylab.bar3d(X - 0.5, Y - 0.5, Z_min, 1, 1, Z_max_right, color='b', zsort='average')
# pylab.zlim(0, maximal_cov)
# pylab.title("Toy MC")
# pylab.view_init(35, 225 + 30)
# pylab.show()
def test_on_dataframe(df, excluded_features=None, clustering_features=None, integer_features=None):
"""Prints comparison of distributions: original one and toymc.
:type excluded_features: features we absolutely don't take into account
:type clustering_features: list | None, very close to integer ones, usually have some bool or integer values,
but events with different values in these columns should not be mixed together
example: 'isSignal', number of muons
:type integer_features: list | None, features that have some discrete values, but events can be mixed together
if they have different values in these columns, the result will be integer
example: some weight of event, which should be integer due to technical restrictions
"""
from IPython.display import display_html
import pylab
if excluded_features is None:
excluded_features = []
else:
print("\nEXCLUDED columns:\n", list(excluded_features))
selected_columns = [col for col in df.columns if col not in excluded_features]
print("\nSTAYED columns:\n", selected_columns)
if integer_features is None:
integer_features = []
else:
print("\nINTEGER columns:\n", list(integer_features))
if clustering_features is None:
clustering_features = []
else:
print("\nCLUSTERING columns:\n", list(clustering_features))
data = df[selected_columns]
toy_data = generate_toymc_with_special_features(data, len(data), clustering_features=clustering_features,
integer_features=integer_features)
numpy.set_printoptions(precision=4, suppress=True)
n_cols = 3
n_rows = (len(data.columns) + n_cols - 1) // n_cols
pylab.figure(figsize=(18, 5 * n_rows))
for i, column in enumerate(data.columns):
pylab.subplot(n_rows, n_cols, i + 1)
pylab.title(column)
pylab.hist([data[column], toy_data[column]], histtype='step', bins=20)
pylab.show()
print("\nMEANS AND STD")
mean_index = []
mean_rows = []
for column in data.columns:
mean_index.append(column)
mean1 = numpy.mean(data[column])
mean2 = numpy.mean(toy_data[column])
std1 = numpy.std(data[column])
std2 = numpy.std(toy_data[column])
mean_rows.append([mean1, mean2, mean2 - mean1, abs((mean1-mean2) * 100. / mean1),
std1, std2, std2 - std1, abs((std2 - std1) * 100. / std1)])
display_html( | pandas.DataFrame(mean_rows, index=mean_index,
columns=['mean orig', 'mean toy', 'difference', 'error, %', 'std orig', 'std toy', 'difference', 'error, %']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from multiprocessing.dummy import Pool as ThreadPool
import os
import time
import pandas as pd
import nltk
import numpy as np
import re
import spacy
from sklearn.feature_extraction.text import CountVectorizer
import progressbar as bar
import extractUnique as xq
import tristream_processor as stream
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier # 248
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score
#_start = time.time()
testB = pd.read_csv("CSV/Restaurants_Test_Data_phaseB.csv")
trainB = pd.read_csv("CSV/Restaurants_Train_v2.csv")
trainB_1 = trainB.iloc[:, [0, 7, 5]]
testB_1 = testB.iloc[:, [0, 5, 4]]
del testB
fullB = pd.concat([trainB_1, testB_1], axis=0, ignore_index=True)
dataset = fullB # MAJOR DATA-SET
# --------------------- FUNCTIONS --------------------------
def check_dep_parse(token_dep):
dep_str = token_dep
# if dep_str.startswith('nsub'):
# pass
# elif dep_str.startswith('amod'):
# pass
# elif dep_str.startswith('rcmod'):
# pass
# elif dep_str.startswith('dobj'):
# pass
# elif dep_str.startswith('neg'):
# pass
if dep_str.startswith('det'):
pass
else:
return False
return True
def streamers(full_dataset):
dataset = full_dataset
# --------------------- STREAM INITIALIZER ----------------------------
PoS_Tag_sent = list()
S1_corpus = [] # CORPUS (For Collecting Lemmas)
corpora = '' # CORPORA (For Collecting Corpora of single sentence)
S2_super_corpus = [] # CORPUS (For Collecting Bigrams sentence wise)
# --------------------- SPACY SPECS ------------------------
nlp_en = spacy.load('en_core_web_sm')
plot_nlp = 0 # For Plotting of Dependency chart
S3_dep_corpus = [] # CORPUS (For Collecting Dependency Relations)
# ---------------------------------------------------------- STREAM 1 - LEMMATIZATION
stream1 = stream.lemmatize(dataset)
# ----------------------------------------------------------- STREAM 2 - BIGRAMS
stream2 = stream.bigram(dataset)
# ----------------------------------------------------------- STREAM 3 - DEPENDENCY FEATURES (spaCy)
stream3 = stream.dep_rel(dataset)
stream1.to_csv('Wave2/stream1.csv', index=False)
stream2.to_csv('Wave2/stream2.csv', index=False)
stream3.to_csv('Wave2/stream3.csv', index=False)
del S1_corpus, S2_super_corpus, S3_dep_corpus
return stream1, stream2, stream3
def sheet_generator(s1, s2, s3):
stream1 = s1
stream2 = s2
stream3 = s3
df = pd.concat([stream1, stream2, stream3], axis=1)
df = df.rename(columns={0: 'lemmas', 1: 'bigrams', 2: 'depenrel'})
df.to_csv('Wave2/FeatureSet.csv', index=False)
df = pd.read_csv('Wave2/FeatureSet.csv', sep=',')
del df
# try:
# pool = ThreadPool(2)
# pool.map(os.system('firefox localhost:5000 &'), spacy.displacy.serve(plot_nlp, style='dep')).join()
# exit(0)
# except OSError:
# print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
# except TypeError:
# print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
# Get Unique Features from Bi-grams, Dependency Rel
whole_df = pd.concat([dataset.iloc[0:, 0], stream1, stream2, stream3, dataset.iloc[0:, 2]], axis=1)
whole_df = whole_df.rename(columns={'text': 'reviews', 0: 'lemmas', 1: 'bigrams', 2: 'depenrel',
'aspectCategories/aspectCategory/0/_category': 'aspectCategory'})
whole_df.to_csv('Wave2/WholeSet.csv', index=False)
whole_df = pd.read_csv('Wave2/WholeSet.csv', sep=',')
u_feat = list()
try:
u_feat = xq.unique(whole_df=whole_df, bigram_col=2, dep_rel_col=3)
print("Unique Features Extracted")
except KeyboardInterrupt:
print("[STAGE 3] Manual Interrupt to Unique Features")
exit(0)
except Exception as e:
print('[STAGE 3] Improper Termination due to:', e)
exit(0)
# DF with Review, Lemmas, U_feat, Aspect Cat
Feature_df = whole_df[['reviews', 'lemmas']][0:]
Feature_df = pd.concat([Feature_df, pd.Series(u_feat), whole_df.iloc[0:, -1]], axis=1)
Feature_df = Feature_df.rename(columns={0: 'ufeat'})
Feature_df.to_csv('Wave2/Feature.csv', index=False)
del whole_df,
# Aspect Cat, Lemmas + U_feat (from All sentences)
c_list = list()
try:
Feature_df = Feature_df.dropna()
c_list = xq.combiner(Feature_df=Feature_df, lemma_col=1, uniqueFeat_col=2, use_ast=True)
except KeyboardInterrupt:
print("[STAGE 4] Manual Interrupt to Combiner")
exit(0)
except Exception as e:
print("[STAGE 4] Improper Termination due to:", e)
exit(0)
return Feature_df, c_list
def corrector(combined_features_list):
c_list = combined_features_list
ngram_list = list()
try:
st = time.time()
ngram_list = xq.get_correct_spell(word_list=c_list, split_by=';')
#syn_list = stream.syns_of_ngrams(ngram_list)
#ngram_list+=syn_list
et = time.time()
print('Time elapsed %.3f' % float(((et-st)/60)/60))
except ValueError:
print("[STAGE 5] Spell Checker | Interrupted")
except TypeError:
print("[STAGE 5] Spell Checker | Multi-threading issue")
except AttributeError:
print("[STAGE 5] Spell Checker | Attrition")
except KeyboardInterrupt:
print("[STAGE 5] Spell Checker | Forced Drop")
pd.Series(ngram_list).to_csv('Wave2/ngram_list.csv', index=False)
return ngram_list
# Creating Bag of Words Model
def creating_bow(corrected_list, features_dataframe, max_features=33433):
ngram_list = list(corrected_list)
Feature_df = features_dataframe
max_ft = max_features
cv = CountVectorizer(max_features=max_ft, ngram_range=(1, 2))
# key_Book = pd.DataFrame(itemDict, index=range(itemDict.__len__()))
# key_Book.to_csv('key_Book.csv', index=True, sep=',')
# ============================== Preparing Train set =============================
# ML with Bag of Words to Aspect Categories
X = cv.fit_transform(ngram_list).toarray()
y = Feature_df['aspectCategory']
del ngram_list
return X, y, cv.vocabulary_
def split_train_test(X, y):
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
return X_train, X_test, y_train, y_test
def evaluator(prf, li2, total):
li = ['Precision', 'Recall\t', 'F1 Measure']
print("EVALUATION RESULTS".center(60,'_'))
cmx = [[73.6, 81.3, 90.9, 89.9, 92.2, 87.5],
[66.1, 70.5, 83.3, 95.2, 89.0, 80.3],
[69.6, 75.5, 86.9, 92.4, 90.5, 83.5]]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
y = cmx[i]
print('%s \t %r \t\t %r \t %r \t %r \t %r \t %r' % (li[i], x[0] >= y[0], x[1] >= y[1], x[2] >= y[2],
x[3] >= y[3], x[4] >= y[4], total[i] >= y[5]))
def prf_to_csv(prf, fileName):
PRF = np.array(prf)
PRF_DF = | pd.DataFrame(PRF, index=['Precision', 'Recall', 'F1 Measure', 'Support']) | pandas.DataFrame |
import pandas as pd
import joblib
import numpy as np
def permutation(data, all_features, stores, d_store_id, baseline_rmse, dict_error, FOLDER_MODEL):
''' Simulate the impact on RMSE if we remove each feature once at a time'''
# All validation predition
df_validall = pd.DataFrame()
# Validation Set
valid = data[(data['d']>=1914) & (data['d']<1942)][['id','d','sold']]
# Validation Prediction
valid_set = valid['sold']
for col in all_features:
# Validation + Predicition for all stores by step
df_validpred = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
import matplotlib
matplotlib.use('TkAgg')
import multiprocessing as mp
import itertools
import numpy as np
from scipy import interpolate
from pylab import flipud
import pandas as pd
try:
from pandas import Categorical
except ImportError:
from pandas.core.categorical import Categorical
import re
from collections import defaultdict
from multiflexxlib import plotting
from multiflexxlib import ub
from multiflexxlib.ub import UBMatrix, etok, ktoe, angle_to_q
import pyclipper
import matplotlib.pyplot as plt
import matplotlib.patches as mpl_patches
import matplotlib.path as mpl_path
from matplotlib.collections import PatchCollection
from matplotlib.colors import LogNorm
from matplotlib.widgets import Button
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
import pickle
import sys
import os
import pkg_resources
from multiflexxlib._version import __version__
try:
import tkinter
from tkinter import filedialog
except ImportError:
import Tkinter as tkinter
import tkFileDialog as filedialog
import logging
logger = logging.getLogger()
logger.setLevel('INFO')
logger.addHandler(logging.StreamHandler(sys.stdout))
BIN_ADAPTIVE = 'adaptive'
BIN_REGULAR = 'regular'
NUM_CHANNELS = 31
EF_LIST = [2.5, 3.0, 3.5, 4.0, 4.5]
CHANNEL_SEPARATION = 2.5
NORM_FACTOR = [1.0, 1.16, 1.23, 1.30, 1.27]
# Apeture angle correction
try:
DETECTOR_WORKING = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/alive.csv'))
except IOError:
print('Dead detector map not found - assuming all working.')
DETECTOR_WORKING = np.ones([NUM_CHANNELS, len(EF_LIST)])
try:
WEIGHTS = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/weights.csv'), delimiter=',')
except IOError:
print('Boundary angle channel strategy not defined - assuming equal weights.')
WEIGHTS = np.ones([NUM_CHANNELS, len(EF_LIST)])
try:
INTENSITY_COEFFICIENT = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/int_corr.csv'), delimiter=',')
except IOError:
print('Intensity correction matrix not found - assuming all ones.')
INTENSITY_COEFFICIENT = np.ones([NUM_CHANNELS, len(EF_LIST)])
# TODO: do something with this abomination
INTENSITY_COEFFICIENT = INTENSITY_COEFFICIENT / NORM_FACTOR
def _nan_float(string):
try:
return float(string)
except ValueError:
if '*' in string:
return np.NaN
else:
raise
def _nan_int(string):
try:
return int(string)
except ValueError:
if '*' in string:
return np.NaN
else:
raise
def _extract_ki_from_header(en, fx, kfix):
e_fix = ktoe(kfix)
if fx == 2:
ei = e_fix + en
return etok(ei)
elif fx == 1:
ei = e_fix - en
return etok(ei)
else:
raise ValueError('Invalid FX value: 2 for fix kf, 1 for fix ki, got %d' % fx)
def _number_to_scan(num):
if isinstance(num, int):
return '{:06d}'.format(num)
else:
return num
def _parse_flatcone_line(line):
data = np.array([_nan_int(x) for x in line.split()])
array = np.reshape(data, (-1, len(EF_LIST)))[0: -1, :] # throws out last line which is only artifact
ang_channels = np.asarray([np.arange(1, NUM_CHANNELS + 1)]).T # starts at 1 to match stickers
array_with_ch_no = np.hstack([ang_channels, array])
dataframe_flatcone = pd.DataFrame(data=array_with_ch_no, columns=['aCh', 'e1', 'e2', 'e3', 'e4', 'e5'])
dataframe_flatcone.set_index('aCh', inplace=True)
return dataframe_flatcone
def _parse_param_line(line):
line_name = line[0:5]
line_body = line[6:].strip()
if line_name == 'COMND':
no_points = int(re.findall('(?<=NP)[\s\t0-9]*', line_body)[0].strip())
return line_name, {'value': line_body, 'NP': no_points}
elif '=' not in line_body:
return line_name, line_body
else:
equations = line_body.split(',')
line_dict = {}
for eq in equations:
param_name, value_raw = [x.strip() for x in eq.split('=')]
try:
value = _nan_float(value_raw)
except ValueError:
value = value_raw
line_dict[param_name] = value
return line_name, line_dict
def parse_ill_data(file_object, start_flag='DATA_:\n'):
"""
Parses ILL TASMAD scan files.
:param file_object: Handle to opened file or stream. Or alternately path to scan file.
:param start_flag: Start flag of data section. Omit for default.
:return: (header_dict, dataframe)
"""
# first parse headers
try:
file_object.seek(0, 0)
except AttributeError:
file_object = open(file_object, 'r')
text_data = file_object.read()
headers = re.findall('^[A-Z_]{5}:.*', text_data, re.MULTILINE)
header_dict = defaultdict(dict)
for line in headers:
line_name, line_body = _parse_param_line(line)
if type(line_body) is dict:
header_dict[line_name].update(line_body)
else:
header_dict[line_name].update({'value': line_body})
# then parse scan parameters and counts
data_section = text_data[text_data.find(start_flag) + len(start_flag) + 1:]
column_names = data_section.splitlines()[0].split()
# line only w 0-9, . -, spc, tab
parameters_text_lines = re.findall('^[0-9*\-\s\t.]+?$', data_section, re.MULTILINE)
parameters_value_array = np.asarray([[_nan_float(num) for num in line.split()] for line in parameters_text_lines])
data_frame = pd.DataFrame(data=parameters_value_array, columns=column_names)
data_frame['PNT'] = data_frame['PNT'].astype('int16')
df_clean = data_frame.T.drop_duplicates().T
# parse flatcone data if present
flat_all = re.findall('(?<=flat: )[0-9w\s\t\n*]+(?=endflat)', text_data, re.MULTILINE)
flat_number_lines = len(flat_all)
if len(df_clean) == 0:
raise ValueError('file %s does contain any data.' % file_object.name)
if len(df_clean) - flat_number_lines <= 1: # sanity check: only 1 missing flatcone line is acceptable
flat_frames = []
for nth, line in enumerate(flat_all):
try:
flat_frames.append(_parse_flatcone_line(line))
except ValueError:
raise ValueError('point %d in file %s is faulty.' % (nth + 1, file_object.name))
if len(df_clean) - flat_number_lines == 1:
df_clean.drop(df_clean.index[-1], inplace=True) # if only one line is missing then just drop last line
df_clean = df_clean.assign(flat=flat_frames)
else:
pass
return dict(header_dict), df_clean
def ub_from_header(scan_header):
# type: ((dict, Scan)) -> UBMatrix
"""
Make a UBMatrix object from TASMAD scan header.
:param scan_header:
:return: UBMatrix object
"""
if isinstance(scan_header, Scan):
scan_header = scan_header.header
param = scan_header['PARAM']
lattice_parameters = [param['AS'], param['BS'], param['CS'], param['AA'], param['BB'], param['CC']]
hkl1 = [float(param['AX']), float(param['AY']), float(param['AZ'])]
hkl2 = [float(param['BX']), float(param['BY']), float(param['BZ'])]
ub_matrix = UBMatrix(lattice_parameters, hkl1, hkl2)
return ub_matrix
class Scan(object):
"""
Reads a TASMAD scan file, extracts metadata and do essential conversions. Assumes const-Ei scan!
Usually not instantiated on its own. Use read_mf_scan() or read_mf_scans() instead.
"""
def __init__(self, file_name, ub_matrix=None, intensity_matrix=None, a3_offset=0.0, a4_offset=0.0):
"""
Scan object.
:param file_name: File name of TASMAD scan file.
:param ub_matrix: UBMatrix object to be used. Omit to generate from file header.
:param intensity_matrix: Intensity correction matrix to be used. Omit to use default.
:return: Scan object.
Examples:
>>> import multiflexxlib as mfl
>>> s1 = mfl.Scan('068577') # opens scan file 068577
>>> s2 = mfl.Scan(68577) # also possible to provide filename in number form. Will be padded to full length.
>>> u = mfl.UBMatrix([4.05, 4.05, 4.05, 90, 90, 90], [1, 0, 0], [0, 0, 1])
>>> s3 = mfl.Scan(68577, ub_matrix=u, a3_offset=1.2) # Applies a custom UBMatrix and add 1.2 degrees to all A3
angles.
>>> s3.a3_offset = 1.95 # a3_offset and a4_offset can be set after creation.
"""
file_name = _number_to_scan(file_name)
f = open(file_name)
self.header, self.data = parse_ill_data(f)
self.file_name = os.path.abspath(file_name)
self._a3_offset = a3_offset
self._a4_offset = a4_offset
self._apply_offsets(a3_offset, a4_offset)
if 'flat' not in self.data.columns:
raise AttributeError('%s does not contain MultiFLEXX data.' % file_name)
elif 'A3' not in self.header['STEPS'].keys():
raise AttributeError('%s is not A3 scan.' % file_name)
elif 'EI' in self.header['STEPS'].keys():
raise AttributeError('%s is not a const-E scan.' % file_name)
if intensity_matrix:
self.intensity_matrix = intensity_matrix
else:
self.intensity_matrix = INTENSITY_COEFFICIENT
if not ub_matrix:
self.ub_matrix = ub_from_header(self.header)
else:
self.ub_matrix = ub_matrix
self.converted_dataframes = []
self._update_data_array()
print('finished loading %s, a3_offset = %.2f, a4_offset = %.2f' %
(file_name, self.a3_offset, self.a4_offset))
@property
def ki(self):
try:
ki = self.data.iloc[0]['KI']
except KeyError:
try:
ki = etok(self.data.iloc[0]['EI'])
except KeyError:
ki = _extract_ki_from_header(self.header['POSQE']['EN'], self.header['PARAM']['FX'],
self.header['PARAM']['KFIX'])
return ki
@property
def tt(self):
try:
tt = self.data.iloc[-1]['TT'] # takes final value as signature value for the scan
except KeyError:
tt = None
return tt
@property
def mag(self):
try:
mag = self.data.iloc[-1]['MAG']
except KeyError:
mag = None
return mag
@property
def ei(self):
"""
Initial Energy (Ei) of scan.
:return: Ei in meV
"""
return ktoe(self.ki)
@property
def np_planned(self):
"""
Total planned points in scan based on command.
:return: Integer steps.
"""
return self.header['COMND']['NP']
@property
def np_actual(self):
"""
Actual finished points. Different from planned if scan is unfinished.
:return: Integer steps.
"""
return len(self.data)
@property
def scan_number(self):
"""
Scan number.
:return: String of scan file name, which should be numeric for TASMAD files.
"""
return os.path.split(self.file_name)[1]
@property
def a3_offset(self):
return self._a3_offset
@property
def a4_offset(self):
return self._a4_offset
@a3_offset.setter
def a3_offset(self, value):
a3_offset_old = self.a3_offset
a3_offset_new = value
a3_add = a3_offset_new - a3_offset_old
self._apply_offsets(a3_add, 0.0)
self._update_data_array()
self._a3_offset = a3_offset_new
@a4_offset.setter
def a4_offset(self, value):
a4_offset_old = self.a3_offset
a4_offset_new = value
a4_add = a4_offset_new - a4_offset_old
self._apply_offsets(0.0, a4_add)
self._update_data_array()
self._a4_offset = a4_offset_new
@property
def planned_locus_list(self):
kf_list = [etok(e) for e in EF_LIST]
a3_start, a3_end_actual, a3_end_planned = self.a3_ranges
a4_start, a4_end_actual, a4_end_planned = self.a4_ranges
return [calculate_locus(self.ki, kf, a3_start, a3_end_planned, a4_start, a4_end_planned,
self.ub_matrix, expand_a3=True) for kf in kf_list]
@property
def actual_locus_list(self):
kf_list = [etok(e) for e in EF_LIST]
a3_start, a3_end_actual, a3_end_planned = self.a3_ranges
a4_start, a4_end_actual, a4_end_planned = self.a4_ranges
return [calculate_locus(self.ki, kf, a3_start, a3_end_actual, a4_start, a4_end_actual,
self.ub_matrix) for kf in kf_list]
def _apply_offsets(self, a3_offset, a4_offset):
self.data.A3 = self.data.A3 + a3_offset
self.data.A4 = self.data.A4 + a4_offset
def _update_data_array(self):
num_ch = NUM_CHANNELS
channel_separation = CHANNEL_SEPARATION
num_flat_frames = len(self.data)
# an numpy array caching a3, a4 angles and monitor counts, shared across all energy channels
a3_a4_mon_array = np.zeros([num_flat_frames * num_ch, 3])
a4_angle_mask = np.linspace(-channel_separation * (num_ch - 1) / 2,
channel_separation * (num_ch - 1) / 2, num_ch)
for i in range(num_flat_frames):
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 0] = self.data.loc[i, 'A3']
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 1] = self.data.loc[i, 'A4'] + a4_angle_mask
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 2] = self.data.loc[i, 'M1']
data_template = pd.DataFrame(index=range(num_flat_frames * num_ch),
columns=['A3', 'A4', 'MON', 'px', 'py', 'pz', 'h', 'k', 'l',
'counts', 'valid', 'coeff', 'ach', 'point'], dtype='float64')
data_template.loc[:, ['A3', 'A4', 'MON']] = a3_a4_mon_array
self.converted_dataframes = [data_template.copy() for _ in range(len(EF_LIST))]
for ef_channel_num, ef in enumerate(EF_LIST):
qs = self.ub_matrix.angle_to_q(self.ki, etok(ef), a3_a4_mon_array[:, 0], a3_a4_mon_array[:, 1])
self.converted_dataframes[ef_channel_num].loc[:, ['px', 'py', 'pz']] = self.ub_matrix.convert(qs, 'sp').T
self.converted_dataframes[ef_channel_num].loc[:, ['h', 'k', 'l']] = self.ub_matrix.convert(qs, 'sr').T
coefficient = INTENSITY_COEFFICIENT
detector_working = DETECTOR_WORKING
for ef_channel_num in range(len(EF_LIST)):
dataframe = self.converted_dataframes[ef_channel_num]
counts = np.zeros(num_ch * num_flat_frames, dtype='float64')
valid = np.zeros(num_ch * num_flat_frames, dtype='float64')
coeff = np.zeros(num_ch * num_flat_frames, dtype='float64')
point = np.zeros(num_ch * num_flat_frames, dtype='float64')
ach = np.zeros(num_ch * num_flat_frames, dtype='float64')
for point_num in range(num_flat_frames):
flatcone_array = np.asarray(self.data.loc[point_num, 'flat'])
# START direct access to DataFrame
# rows = slice(point_num * num_ch, (point_num + 1) * num_ch - 1, None)
# dataframe.at[rows, 'counts'] = flatcone_array[:, ef_channel_num]
# dataframe.at[rows, 'valid'] = detector_working[:, ef_channel_num]
# dataframe.at[rows, 'coeff'] = coefficient[:, ef_channel_num]
# dataframe.at[rows, 'point'] = self.data.loc[point_num, 'PNT']
# dataframe.at[rows, 'ach'] = range(1, num_ch + 1)
# END direct access to DataFrame
# Buffer results into ndarray first, 4x faster than direct access, for some reason.
rows = slice(point_num * num_ch, (point_num + 1) * num_ch, None)
counts[rows] = flatcone_array[:, ef_channel_num]
valid[rows] = detector_working[:, ef_channel_num]
coeff[rows] = coefficient[:, ef_channel_num]
point[rows] = self.data.loc[point_num, 'PNT']
ach[rows] = range(1, num_ch + 1)
dataframe.counts = counts
dataframe.valid = valid
dataframe.coeff = coeff
dataframe.point = point
dataframe.ach = ach
@property
def a3_ranges(self):
a3_start = self.data.iloc[0]['A3']
a3_end_actual = self.data.iloc[-1]['A3']
try:
a3_end_planned = self.header['VARIA']['A3'] + \
self.header['STEPS']['A3'] * (self.header['COMND']['NP'] - 1) + self._a3_offset
except KeyError:
a3_end_planned = a3_end_actual
return a3_start, a3_end_actual, a3_end_planned
@property
def a4_ranges(self):
a4_start = self.header['VARIA']['A4'] + self._a4_offset # A4 is not necessarily outputted in data
if 'A4' not in self.header['STEPS']:
a4_end_planned = a4_start
a4_end_actual = a4_start
else:
a4_end_planned = self.header['VARIA']['A4'] + \
self.header['STEPS']['A4'] * (self.header['COMND']['NP'] - 1) + self._a4_offset
a4_end_actual = self.data.iloc[-1]['A4']
return a4_start, a4_end_actual, a4_end_planned
def to_csv(self, file_name=None, channel=None):
raise NotImplementedError('Not yet implemented, please export from BinnedData class instead.')
def make_bin_edges(values, tolerance=0.2, strategy=BIN_ADAPTIVE, detect_diffuse=True):
# type: ((list, pd.Series), float) -> list
"""
:param values: An iterable list of all physical quantities, repetitions allowed.
:param tolerance: maximum difference in value for considering two points to be the same.
:param strategy: (str, iterable) 'adaptive' to bin points based on proximity, 'regular' to bin points into a regular
set of bins. Provide an iterable to manually set bin EDGES.
:param detect_diffuse: Raise an exception if a bin is striding over a diffuse group of points.
:return: a list of bin edges
Walks through sorted unique values, if a point is further than tolerance away from the next, a bin edge is
dropped between the two points, otherwise no bin edge is added. A beginning and ending edge is added at
tolerance / 2 further from either end.
"""
if isinstance(strategy, str):
if strategy == BIN_ADAPTIVE:
values_array = np.asarray(values).ravel()
unique_values = np.asarray(list(set(values_array)))
unique_values.sort()
bin_edges = [unique_values[0] - tolerance / 2] # First bin edge should be to the 'left' of smallest value.
current_walk = 0
for i in range(len(unique_values) - 1):
if unique_values[i+1] - unique_values[i] > tolerance: # New bin edge if two points further than tol.
bin_edges.append((unique_values[i] + unique_values[i+1]) / 2)
current_walk = 0
else:
# Keep track of how much this bin is spanning.
current_walk = current_walk + unique_values[i+1] - unique_values[i]
if current_walk > 2 * tolerance and detect_diffuse:
raise ValueError('Bin edge creation failed due to diffuse clustering of values.')
bin_edges.append(unique_values[-1] + tolerance / 2)
return bin_edges
elif strategy == BIN_REGULAR:
values_array = np.asarray(values).ravel()
unique_values = np.asarray(list(set(values_array)))
unique_values.sort()
bin_edges = list(np.arange(unique_values[0] - tolerance / 2, unique_values[-1], tolerance))
bin_edges.append(unique_values[-1] + tolerance / 2)
return bin_edges
else:
raise ValueError('Invalid binning strategy provided: (\'%s \', \'%s\', list) expected, got %s' %
(BIN_ADAPTIVE, BIN_REGULAR, strategy))
else: # if strategy is not a string
return [x for x in strategy] # it will at least be an iterable
def _merge_locus(locus_list):
clipper = pyclipper.Pyclipper()
for locus in locus_list:
clipper.AddPath(pyclipper.scale_to_clipper(locus), pyclipper.PT_SUBJECT)
merged_locus = pyclipper.scale_from_clipper(clipper.Execute(pyclipper.CT_UNION, pyclipper.PFT_NONZERO))
return merged_locus
def _merge_scan_points(data_frames, a3_tolerance=0.2, a4_tolerance=0.2, a3_bins=BIN_ADAPTIVE, a4_bins=BIN_ADAPTIVE):
"""
Bins actual detector counts together from multiple runs.
:param data_frames: Pandas data frames from Scan objects.
:param a3_tolerance: Max angle difference before two A3 angles are considered discreet.
:param a4_tolerance: See a3_tolerance.
:return: An intermediate data structure even I don't really remember.
"""
joined_frames = pd.concat(data_frames, axis=0, ignore_index=True)
joined_frames = joined_frames.assign(counts_norm=joined_frames.counts/joined_frames.coeff)
joined_frames = joined_frames.drop(joined_frames[joined_frames.valid != 1].index) # delete dead detectors
a3_cuts = bin_and_cut(joined_frames.A3, tolerance=a3_tolerance, strategy=a3_bins)
try:
a4_cuts = bin_and_cut(joined_frames.A4, tolerance=a4_tolerance, strategy=a4_bins)
result = _decoupled_angle_merge(joined_frames, a3_cuts, a4_cuts)
return result
except ValueError as err: # If A4 is diffused across entire range due to small yet non-zero A4 step.
if type(a4_bins) is str:
if a4_bins == BIN_ADAPTIVE: # Decided not to rely on 'and' condition shortcut.
result = _coupled_angle_merge(joined_frames, a3_tolerance, a3_bins, a4_tolerance, a4_bins)
return result
raise err
def _decoupled_angle_merge(joined_frames, a3_cuts, a4_cuts):
# helper function for merging scan points. Used if A3 and A4 angles can be binned independently.
group = joined_frames.groupby([a3_cuts, a4_cuts])
sums = group[['counts', 'counts_norm', 'MON']].sum()
means = group[['A3', 'A4', 'px', 'py', 'pz', 'h', 'k', 'l']].mean()
error_bars = np.sqrt(sums.counts)
per_monitor = sums.counts_norm / sums.MON
result = pd.concat([sums, means], axis=1)
result = result.assign(err=error_bars, permon=per_monitor)
result = result.dropna().reset_index(drop=True)
return result
def _coupled_angle_merge(joined_frames, a3_tolerance, a3_bins, a4_tolerance, a4_bins):
# Used if A4 angle has a non-zero step that is smaller than precision, and there are enough steps to make A4 angles
# seem 'continuous'. MUCH SLOWER than decoupled binning!
a3_bin_edges = make_bin_edges(joined_frames.A3, tolerance=a3_tolerance, strategy=a3_bins)
fragments = []
for i in range(len(a3_bin_edges) - 1):
a3_left = a3_bin_edges[i]
a3_right = a3_bin_edges[i+1]
filtered = joined_frames.loc[joined_frames.A3.between(a3_left, a3_right)]
a4_cuts = bin_and_cut(filtered.A4, tolerance=a4_tolerance, strategy=a4_bins)
group = filtered.groupby([a4_cuts])
sums = group[['counts', 'counts_norm', 'MON']].sum()
means = group[['A3', 'A4', 'px', 'py', 'pz', 'h', 'k', 'l']].mean()
error_bars = np.sqrt(sums.counts)
per_monitor = sums.counts_norm / sums.MON
fragment = pd.concat([sums, means], axis=1)
fragment = fragment.assign(err=error_bars, permon=per_monitor)
fragment = fragment.dropna().reset_index(drop=True)
fragments.append(fragment)
result = pd.concat(fragments, axis=0).reset_index(drop=True)
return result
def bin_and_cut(data, tolerance=0.2, strategy=BIN_ADAPTIVE, detect_diffuse=True):
# type: (pd.Series, float) -> Categorical
"""
Applies adaptive binning and return a pandas.Categorical cut object
:param data: a series or list of numbers. Repetition allowed.
:param tolerance: Binning tolerance.
:param strategy: 'adaptive', 'regular' or a list describing bin edges.
:param detect_diffuse: Detect of the values are semi-continuous and cannot be cut into bins using adaptive mode.
:return: pd.cut
"""
bin_edges = make_bin_edges(data, tolerance, strategy=strategy, detect_diffuse=detect_diffuse)
cut = pd.cut(data, bin_edges)
return cut
def series_to_binder(items):
"""
Helper function for converting list to _DataBinder object. The _DataBinder class is just for overriding str method.
:param items: Anything that makes sense with list(items).
:return:
"""
# type: (pd.Series)->_DataBinder
return _DataBinder(list(items))
def bin_scans(list_of_data, # type: ['Scan']
nan_fill=0, ignore_ef=False,
en_tolerance=0.05, tt_tolerance=1.0, mag_tolerance=0.05, a3_tolerance=0.2, a4_tolerance=0.2,
en_bins=BIN_ADAPTIVE, tt_bins=BIN_ADAPTIVE, mag_bins=BIN_ADAPTIVE, a3_bins=BIN_ADAPTIVE,
a4_bins=BIN_ADAPTIVE,
angle_voronoi=False):
# type: (...)-> BinnedData
"""
Bin raw Scan objects into BinnedData object.
:param list_of_data: a list of Scan objects.
:param nan_fill: how to deal NaNs in metadata such as temperature. Default is fill 0.
:param ignore_ef: Not implemented, default is False.
:param en_tolerance: Energy binning tolerance.
:param tt_tolerance: Temperature binning tolerance.
:param mag_tolerance: Magnetic field binning tolerance.
:param a3_tolerance: A3 angle binning tolerance of data points.
:param a4_tolerance: A4 angle binning tolerance of data points.
:param en_bins: (str, iterable) Strategy for bin creation. 'adaptive' to bin points based on proximity; 'regular'
creates a regular grid of bins. Provide an iterable to manually set bin EDGES.
:param mag_bins: see en_bins.
:param tt_bins: see en_bins.
:param a3_bins: see en_bins.
:param a4_bins: see en_bins.
:param angle_voronoi: Performs Voronoi partition in angle plane instead of reciprocal plane.
:return: BinnedData object.
"""
all_data = pd.DataFrame(index=range(len(list_of_data) * len(EF_LIST)),
columns=['name', 'ei', 'ef', 'en', 'tt', 'mag', 'points', 'locus_a', 'locus_p'],
dtype=np.object)
file_names = [data.file_name for data in list_of_data]
for i, scan in enumerate(list_of_data):
for j in range(len(EF_LIST)):
ef = EF_LIST[j]
all_data.loc[i * len(EF_LIST) + j, ['name', 'ei', 'ef', 'en']] = [scan.file_name, scan.ei, ef, scan.ei - ef]
all_data.loc[i * len(EF_LIST) + j, ['tt', 'mag']] = [scan.tt, scan.mag]
all_data.loc[i * len(EF_LIST) + j]['points'] = scan.converted_dataframes[j]
all_data.loc[i * len(EF_LIST) + j]['locus_a'] = scan.actual_locus_list[j]
all_data.loc[i * len(EF_LIST) + j]['locus_p'] = scan.planned_locus_list[j]
all_data = all_data.fillna(nan_fill)
cut_ei = bin_and_cut(all_data.ei, en_tolerance, strategy=en_bins)
cut_en = bin_and_cut(all_data.en, en_tolerance, strategy=en_bins)
cut_tt = bin_and_cut(all_data.tt, tt_tolerance, strategy=tt_bins)
cut_mag = bin_and_cut(all_data.mag, mag_tolerance, strategy=mag_bins)
if ignore_ef:
raise NotImplementedError('For the love of god do not try to mix data from different final energies!')
else:
grouped = all_data.groupby([cut_ei, cut_en, cut_tt, cut_mag])
grouped_meta = grouped[['ei', 'ef', 'en', 'tt', 'mag']].mean()
grouped_data = grouped['points'].\
apply(series_to_binder).\
apply(lambda x: _MergedDataPoints(x, a3_tolerance, a4_tolerance, a3_bins, a4_bins) if np.any( | pd.notna(x) | pandas.notna |
# -*- coding: utf-8 -*-
import pandas as pd
class BaseRecommender(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def recommend(self, n=10, **kwargs):
""" Recommend items
:param n int: number of items to recommend. Default 10.
If zero or None, all items will be returned.
:rtype pandas.DataFrame
:returns: DataFrame with recommendations
"""
return | pd.DataFrame() | pandas.DataFrame |
#####################################################.
# This file stores all the functions #
# used in CSEARCH #
#####################################################.
import os
import sys
import subprocess
from pathlib import Path
from pkg_resources import resource_filename
import pandas as pd
import ast
import numpy as np
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import rdDistGeom, rdMolAlign
from aqme.utils import (
get_info_input,
get_conf_RMS,
mol_from_sdf_or_mol_or_mol2,
read_xyz_charge_mult,
)
TEMPLATES_PATH = Path(resource_filename("aqme", "templates"))
# Auxiliar functions of this module
def load_template(complex_type, log):
"""
Checks if the templates are reachable and if so returns the molecule object.
Returns
-------
rdkit.Chem.Mol
The molecule file of the corresponding template.
"""
type2template = dict()
type2template["squareplanar"] = "template-4-and-5.sdf"
type2template["squarepyramidal"] = "template-4-and-5.sdf"
type2template["linear"] = "template-2.sdf"
type2template["trigonalplanar"] = "template-3.sdf"
folder = TEMPLATES_PATH
if not folder.exists():
log.write(
"x The templates folder was not found, probably due to a problem while installing AQME"
)
log.finalize()
sys.exit()
file_template = folder / Path(type2template[complex_type])
templates = Chem.SDMolSupplier(str(file_template))
template = templates[
-1
] # RAUL: I'm assuming that there is only one molecule per template and in case there are several, it's the last one
return template
def calc_neighbours(molecule, metals_idx):
"""
Changes the atomic number (and charge) of the first metal found
and returns its neighbours, the number of neighbours and the idx of the
metal.
Parameters
----------
molecule : rdkit.Chem.Mol
[description]
metals_idx : list
List containing the Idx of the metals. The first match is the only one
considered.
Returns
-------
list
list of neighbour atoms
"""
bonds2AtNum = dict()
bonds2AtNum[5] = 14
bonds2AtNum[4] = 14
bonds2AtNum[3] = 53
bonds2AtNum[2] = 53
for atom in molecule.GetAtoms():
idx = atom.GetIdx()
if idx in metals_idx:
n_bonds = len(atom.GetBonds())
AtNum = bonds2AtNum[n_bonds]
atom.SetAtomicNum(AtNum)
if n_bonds == 5:
atom.SetFormalCharge(1)
neighbours = atom.GetNeighbors()
return neighbours
return []
def get_mappings(molecule, template, conformer_id=-1):
match = molecule.GetSubstructMatch(template)
conformer = template.GetConformer(conformer_id)
coordMap = {}
algMap = []
for i, atomidx in enumerate(match):
algMap.append((atomidx, i))
coordMap[atomidx] = conformer.GetAtomPosition(i)
return coordMap, algMap
def get_distance_constrains(coordMap):
atom_idxs = list(coordMap.keys())
constrains = []
for k, i in enumerate(atom_idxs):
for j in atom_idxs[k + 1 :]:
d = coordMap[i].Distance(coordMap[j])
constrains.append((i, j, d))
return constrains
def template_embed_optimize(target, template, maxsteps, log):
"""
Embeds a new conformation into a molecule, optimizes it using UFF and
realigns it.
Parameters
----------
target : rdkit.Chem.Mol
Molecule where you want to embed the new conformation
mol_template : rdkit.Chem.Mol?
Template molecule to identify the core of the molecule that will have
its distances frozen in the optimization.
maxsteps : int
Number of maximum optimization steps in RDKit.
log : Logger
[description]
Returns
-------
molecule, coordMap, algMap, conf_id
molecule embedded, mapping to the atom instances,
list of tuples with position and id and int with the conformer id.
"""
seed = -1
force_constant = 10000
coord_map, alg_map = get_mappings(target, template, conformer_id=-1)
# add H's to molecule
molecule = Chem.AddHs(target)
conf_id = rdDistGeom.EmbedMolecule(molecule, coordMap=coord_map, randomSeed=seed)
if conf_id < 0:
log.write("Could not embed molecule.")
return molecule, None, None, conf_id
forcefield = Chem.UFFGetMoleculeForceField(molecule, confId=conf_id)
constraints = get_distance_constrains(coord_map)
for i, j, d in constraints:
forcefield.AddDistanceConstraint(i, j, d, d, force_constant)
forcefield.Initialize()
forcefield.Minimize(maxIts=maxsteps)
# rotate the embedded conformation onto the core_mol:
rdMolAlign.AlignMol(molecule, template, atomMap=alg_map, reflect=True, maxIters=100)
return molecule, coord_map, alg_map, conf_id
def filter_template_mol(molecule_new, mol_objects, heavyonly, max_matches):
"""
Returns if a mol object should be kept or not.
Parameters
----------
molecule_new : [type]
[description]
mol_objects : [type]
[description]
heavyonly : bool
If True only non-H atoms are considered for the RMS calculation
max_matches : int
Maximum number of matches in the RMSD?
Returns
-------
bool
Returns True when the molecule should be kept and False when it should
be discarded.
"""
if not mol_objects:
return True
# check if molecule also exixts in the mol_objects
for mol in mol_objects:
rms = get_conf_RMS(mol, molecule_new, -1, -1, heavyonly, max_matches)
if rms < 0.5:
return False
return True
# Decorators for documentation
def doc_parameters(f):
"""
Decorator that adds the "Parameters" section at the end of the
docstrings of the decorated function. Care to use this decorator 'below' the
doc_returns decorator.
Parameters
----------
f : function
function to decorate.
Returns
-------
function
returns the same function with the docstring modified.
"""
description = f.__doc__
parameters = [
("molecule", "rdkit.Chem.Mol", "Molecule to be embedded "),
("template", "rdkit.Chem.Mol", "Template molecule to do the embedding"),
("neighbours", "list", "Idx of the atoms neighbouring the metal"),
("name_input", "str", "Base name for the embedded molecules"),
("maxsteps", "int", "Maximum number of optimization steps run with rdkit."),
("log", "Logger", "[description]"),
]
item_fmt = "{} : {}\n {}".format
params_txt = "\n".join([item_fmt(*items) for items in parameters])
f.__doc__ = f"{description}\nParameters\n----------\n{params_txt}\n"
return f
def doc_returns(f):
"""
Decorator that adds the "Returns" section at the end of the
docstrings of the decorated function.
Parameters
----------
f : function
function to decorate.
Returns
-------
function
returns the same function with the docstring modified.
"""
description = f.__doc__
item_fmt = "{} : {}\n {}".format
outputs = [
("mol_objects", "list", "Embedded molecules."),
("name_returns", "list", "Names for the embedded molecules"),
(
"coord_maps",
"list",
"Mappings to the Idx of the atoms afected by the embedding",
),
("alg_maps", "list", "Mappings to the Idx of the core to do alignments"),
("mol_templates", "list", "List of template molecules used"),
]
outs_txt = "\n".join([item_fmt(*items) for items in outputs])
f.__doc__ = f"{description}\nReturns\n-------\n{outs_txt}\n"
return f
# Embedding functions
@doc_returns
@doc_parameters
def two_embed(molecule, template, neighbours, name, maxsteps, log):
"""
Embedding function for linear geometries. Requires 'linear.sdf' template.
"""
template.GetAtomWithIdx(0).setAtomicNum(neighbours[0].GetAtomicNum())
template.GetAtomWithIdx(1).setAtomicNum(neighbours[1].GetAtomicNum())
template.GetAtomWithIdx(2).setAtomicNum(53)
# assigning and embedding onto the core
mol_obj, coord_map, alg_map, ci = template_embed_optimize(
molecule, template, maxsteps, log
)
if ci >= 0: # writing to mol_object file
return [mol_obj], [name], [coord_map], [alg_map], [template]
return [], [], [], [], []
@doc_returns
@doc_parameters
def three_embed(molecule, template, neighbours, name, maxsteps, log):
"""
Embedding function for trigonal planar geometry. Requires
'trigonalplanar.sdf' template.
"""
template.GetAtomWithIdx(0).setAtomicNum(53)
template.GetAtomWithIdx(1).setAtomicNum(neighbours[0].GetAtomicNum())
template.GetAtomWithIdx(2).setAtomicNum(neighbours[1].GetAtomicNum())
template.GetAtomWithIdx(3).setAtomicNum(neighbours[2].GetAtomicNum())
# assigning and embedding onto the core
mol_obj, coord_map, alg_map, conf_id = template_embed_optimize(
molecule, template, maxsteps, log
)
if conf_id >= 0: # writing to mol_object file
return [mol_obj], [name], [coord_map], [alg_map], [template]
return [], [], [], [], []
@doc_returns
@doc_parameters
def four_embed(molecule, template, neighbours, name, maxsteps, log):
"""
Embedding function for squareplanar geometry. Requires 'template-4-and-5.sdf'
template. Attempts 3 embeddings.
"""
mol_objects = []
name_return = []
coord_maps = []
alg_maps = []
mol_templates = []
# Remove F atoms from the template
for atom in template.GetAtoms():
if atom.GetSymbol() == "F":
template = Chem.RWMol(template)
template.RemoveAtom(atom.GetIdx())
template = template.GetMol()
# three cases for square planar
atn0 = neighbours[0].GetAtomicNum()
atn1 = neighbours[1].GetAtomicNum()
atn2 = neighbours[2].GetAtomicNum()
atn3 = neighbours[3].GetAtomicNum()
replacements_list = [
(atn0, atn1, atn2, atn3, 14),
(atn0, atn2, atn3, atn1, 14),
(atn0, atn3, atn1, atn2, 14),
]
for i, replacements in enumerate(replacements_list):
# Create a copy of the mol object
mol = Chem.Mol(template)
# Assign atomic numbers to neighbour atoms
for idx, atn in enumerate(replacements):
mol.GetAtomWithIdx(idx).SetAtomicNum(atn)
# embedding of the molecule onto the core
mol_obj, coord_map, alg_map, conf_id = template_embed_optimize(
molecule, mol, maxsteps, log
)
if conf_id >= 0:
name_out = f"{name.split()[0]}_{i}"
mol_objects.append(mol_obj)
name_return.append(name_out)
coord_maps.append(coord_map)
alg_maps.append(alg_map)
mol_templates.append(mol)
return mol_objects, name_return, coord_maps, alg_maps, mol_templates
@doc_returns
@doc_parameters
def five_embed(molecule, mol_template, neighbours, name, maxsteps, log):
"""
Embedding function for squarepyramidal geometry. Requires
'template-4-and-5.sdf' template. Attempts 15 embeddings.
"""
mol_objects = []
name_return = []
coord_maps = []
alg_maps = []
mol_templates = []
counter = 0
atomic_numbers = [mol_template.GetAtomWithIdx(i).GetAtomicNum() for i in neighbours]
replacements = [
[4, 0, 1, 2, 3],
[4, 0, 2, 3, 1],
[4, 0, 3, 1, 2],
[3, 0, 1, 2, 4],
[3, 0, 2, 4, 1],
[3, 0, 4, 1, 2],
[2, 0, 1, 4, 3],
[2, 0, 4, 3, 1],
[2, 0, 4, 1, 3],
[1, 0, 4, 2, 3],
[1, 0, 2, 3, 4],
[1, 0, 3, 4, 2],
[0, 4, 1, 2, 3],
[0, 4, 2, 3, 1],
[0, 4, 3, 1, 2],
]
for replacement in replacements:
at0, at1, at2, at3, at4 = [atomic_numbers[r] for r in replacement]
mol_template.GetAtomWithIdx(0).SetAtomicNum(at0)
mol_template.GetAtomWithIdx(1).SetAtomicNum(at1)
mol_template.GetAtomWithIdx(2).SetAtomicNum(at2)
mol_template.GetAtomWithIdx(3).SetAtomicNum(at3)
mol_template.GetAtomWithIdx(4).SetAtomicNum(at4)
mol_template.GetAtomWithIdx(5).SetAtomicNum(14)
mol_template.GetAtomWithIdx(5).SetFormalCharge(1)
# assigning and embedding onto the core
mol_obj, coord_map, alg_map, conf_id = template_embed_optimize(
molecule, mol_template, maxsteps, log
)
if conf_id >= 0:
name_out = f"{name}_{counter}"
mol_objects.append(mol_obj)
name_return.append(name_out)
coord_maps.append(coord_map)
alg_maps.append(alg_map)
mol_templates.append(mol_template)
counter += 1
return mol_objects, name_return, coord_maps, alg_maps, mol_templates
def template_embed(self, mol, complex_type, metal_idx, maxsteps, heavyonly, maxmatches):
"""
Wrapper function to select automatically the appropiate embedding function
depending on the number of neighbours of the metal center.
"""
embed_functions = dict()
embed_functions[2] = two_embed
embed_functions[3] = three_embed
embed_functions[4] = four_embed
embed_functions[5] = five_embed
template = load_template(complex_type, self.args.log)
# Generate the embeddings
neighbours = calc_neighbours(mol, metal_idx)
embed = embed_functions[len(neighbours)]
items = embed(mol, template, neighbours, self.args.name, maxsteps, self.args.log)
# Filter the results
molecules = items[0]
if len(molecules) > 1:
ignored = []
for i, mol_filter in enumerate(molecules):
has_big_rmsd = filter_template_mol(
mol_filter, molecules, heavyonly, maxmatches
)
if has_big_rmsd:
ignored.append(i)
items = [item for i, item in enumerate(items) if i not in ignored]
return items
def creation_of_dup_csv_csearch(program):
"""
Generates a pandas.DataFrame object with the appropiate columns for the
conformational search and the minimization.
Parameters
----------
csearch : str
Conformational search method. Current valid methods are:
['rdkit','fullmonte','summ']
Returns
-------
pandas.DataFrame
"""
# Boolean aliases from args
is_rdkit = program == "rdkit"
is_fullmonte = program == "fullmonte"
is_crest = program == "crest"
is_summ = program == "summ"
# column blocks definitions
base_columns = [
"Molecule",
"RDKit-Initial-samples",
"RDKit-energy-window",
"RDKit-initial_energy_threshold",
"RDKit-RMSD-and-energy-duplicates",
"RDKit-Unique-conformers",
]
end_columns_no_min = ["CSEARCH time (seconds)", "Overall charge"]
fullmonte_columns = [
"FullMonte-Unique-conformers",
]
#'FullMonte-conformers',
#'FullMonte-energy-window',
#'FullMonte-initial_energy_threshold',
#'FullMonte-RMSD-and-energy-duplicates']
summ_columns = [
"summ-conformers",
"summ-energy-window",
"summ-initial_energy_threshold",
"summ-RMSD-and-energy-duplicates",
"summ-Unique-conformers",
]
crest_columns = ["Molecule", "crest-conformers"]
# Check Conformer Search method
if is_rdkit:
columns = base_columns
elif is_fullmonte:
columns = base_columns + fullmonte_columns
elif is_summ:
columns = base_columns + summ_columns
elif is_crest:
columns = crest_columns
else:
return None
columns += end_columns_no_min
return pd.DataFrame(columns=columns)
def constraint_fix(
constraints_atoms, constraints_dist, constraints_angle, constraints_dihedral
):
# this avoids problems when running AQME through command lines
if pd.isnull(constraints_atoms):
constraints_atoms = []
else:
if not isinstance(constraints_atoms, list):
constraints_atoms = ast.literal_eval(constraints_atoms)
if pd.isnull(constraints_dist):
constraints_dist = []
else:
if not isinstance(constraints_dist, list):
constraints_dist = ast.literal_eval(constraints_dist)
if | pd.isnull(constraints_angle) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 11:41:47 2020
@author: CheikhMoctar
"""
import pandas as pd
import numpy as np
import utils
from functools import reduce
file = "../premium/Ross - Premium Allocation.xlsx"
sov = pd.read_excel('final_Overlay_07-13.xlsx')
cleaned_dfs = {}
def headerFunc(d):
empty_cols = [col for col in d.columns if d[col].isnull().all()]
empty_rows = d.index[d.isnull().all(1)]
d.drop(empty_cols , axis=1,inplace=True)
d.drop(d.index[empty_rows], inplace=True)
d[~d.isin(['Unnamed:'])].dropna(how='all')
if 'Unnamed:' in list(d.columns) :
d.columns = d.iloc[0]
d[~d.isin(['Unnamed:'])].dropna(how='all')
cleaned_dfs.append(d)
return d
elif len(list(d.columns))>0:
pass
else:
return d
sheets = {'BRE Hotels Allocation':12, 'Mfg Homes Allocation':1, 'GLP Allocation':17, 'Office Retail Allocation':6, 'Link Allocation':15,
'Space Center Allocation':0, 'LivCor Allocation':1}
#df = pd.read_excel(file, None);
# Data Cleaning and Address normalization functions
def replace_last(source_string, replace_what, replace_with):
head, _sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail
def replace_second(source_string, dict_):
s = source_string.split(' ')
if len(s) >1 :
if s[1].replace('.', '').lower() in dict_.keys():
s[1] = dict_[s[1].replace('.', '')]
return ' '.join(s)
else:
return source_string
else:
return source_string
#def similar(a, b):
#return SequenceMatcher(None, a, b).ratio()
""" Retained columns from each file
====================================================================================================================="""
Us_States = utils.us_state_abbrev
st_dict = utils.st_dict
dir_dict = utils.dir_dict
sov['State'] = sov['State'].apply(lambda x: Us_States[x] if x in Us_States.keys() else Us_States[x.capitalize()])
Add_cols = ['Address', 'City', 'State', 'Zip']
""" Adding geolocation into the MDS
====================================================================================================================="""
def lower_No_spaces(df, col):
df[col] = [(' '.join(str(d).split())).lower().strip() for d in df[col]]
df[col] = [d.replace(' ', '') for d in df[col]]
return df[col].astype(str)
def Address_Normal(df):
#k1 = df.columns[4]
#df['State'] = df['State'].apply(lambda x: Us_States[x])
df['Address'] = [(' '.join(str(d).split())).lower().strip() for d in df['Address']]
df['st_add'] = [replace_last(d, d.split(' ')[-1], st_dict[d.split(' ')[-1].replace('.','')]) if d.split(' ')[-1].replace('.','') in st_dict.keys() else d for d in df['Address']]
df['st_add'] = [replace_second(d,dir_dict) for d in df['st_add']]
df['Add'] = [d.replace(' ', '') for d in df['st_add']] + lower_No_spaces(df, 'City') + lower_No_spaces(df, 'State')
return df
def merging(df1, df2):
df = pd.merge(Address_Normal(df1), Address_Normal(df2), on = 'Add', how='outer', suffixes=('_x', '_y'), indicator=True)
return df
xl = pd.ExcelFile(file)
for sh in xl.sheet_names:
if sh in sheets.keys():
var = sh.split(' ')[0]
df = xl.parse(sh, skiprows=sheets[sh])
empty_rows = df.index[df.isnull().all(1)]
df.drop(df.index[empty_rows], inplace=True)
cleaned_dfs[var] = df
overlay = {k: merging(sov, df) for k, df in cleaned_dfs.items()}
found = {k:len(df[df['_merge'] == 'both']) for k, df in overlay.items()}
left_only = {k:len(df[df['_merge'] == 'left_only']) for k, df in overlay.items()}
right_only = {k:len(df[df['_merge'] == 'right_only']) for k, df in overlay.items()}
dfs = [Address_Normal(df) for df in cleaned_dfs.values()]
#dfs.insert(0, Address_Normal(sov))
#final = reduce(lambda df1, df2: df1.merge(df2, on="Add", how='outer', indicator=True), dfs)
#dfs = [Address_Normal(df).set_index("Add", drop=True) for df in cleaned_dfs.values()]
#final = pd.concat(dfs, axis=1, keys=range(len(dfs)), join='outer', copy=False)
merg1 = pd.merge(Address_Normal(sov), Address_Normal(dfs[0]), on = 'Add', how='outer', suffixes=('', '_z') )
merg1['Track'] = np.nan
merg1['Track'] = np.where( | pd.notnull(merg1['Potential Total']) | pandas.notnull |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = StataReader(path).VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0,27):
val = StataMissingValue(offset+1+i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data,columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999,12,24)
expected[2][3] = datetime(9999,12,1)
expected[2][4] = datetime(9999,10,1)
expected[2][5] = datetime(9999,7,1)
expected[4][2] = datetime(2262,4,16)
expected[4][3] = expected[4][4] = datetime(2262,4,1)
expected[4][5] = expected[4][6] = datetime(2262,1,1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677,10,1)
expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
date_conversion = dict((c, c[-2:]) for c in columns)
#{c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category') for col in original], axis=1)
expected['incompletely_labeled'] = expected['incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
tm.assert_equal(len(w), 1) # should get a warning for mixed content
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c', 'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd', 'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', ['a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', ['a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'], np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort("srh")
parsed_117 = parsed_117.sort("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
expected = pd.Series(pd.Categorical.from_codes(codes=codes,
categories=categories))
tm.assert_series_equal(expected, parsed_115["srh"])
tm.assert_series_equal(expected, parsed_117["srh"])
def test_categorical_ordering(self):
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
parsed_115_unordered = read_stata(self.dta19_115,
order_categoricals=False)
parsed_117_unordered = read_stata(self.dta19_117,
order_categoricals=False)
for col in parsed_115:
if not is_categorical_dtype(parsed_115[col]):
continue
tm.assert_equal(True, parsed_115[col].cat.ordered)
tm.assert_equal(True, parsed_117[col].cat.ordered)
tm.assert_equal(False, parsed_115_unordered[col].cat.ordered)
tm.assert_equal(False, parsed_117_unordered[col].cat.ordered)
def test_read_chunks_117(self):
files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
self.dta4_117, self.dta14_117, self.dta15_117,
self.dta16_117, self.dta17_117, self.dta18_117,
self.dta19_117, self.dta20_117]
for fname in files_117:
for chunksize in 1,2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(fname, iterator=True)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
try:
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
except AssertionError:
# datetime.datetime and pandas.tslib.Timestamp may hold
# equivalent values but fail assert_frame_equal
assert(all([x == y for x, y in zip(from_frame, chunk)]))
pos += chunksize
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
itr = read_stata(fname, iterator=True)
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
itr = read_stata(fname, iterator=True)
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = | read_stata(fname, chunksize=5) | pandas.io.stata.read_stata |
from FINE.component import Component, ComponentModeling
from FINE import utils
import warnings
import pyomo.environ as pyomo
import pandas as pd
class Transmission(Component):
"""
Doc
"""
def __init__(self, esM, name, commodity, losses=0, distances=None,
hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1,
hasIsBuiltBinaryVariable=False, bigM=None,
operationRateMax=None, operationRateFix=None, tsaWeight=1,
locationalEligibility=None, capacityMin=None, capacityMax=None, sharedPotentialID=None,
capacityFix=None, isBuiltFix=None,
investPerCapacity=0, investIfBuilt=0, opexPerOperation=0, opexPerCapacity=0,
opexIfBuilt=0, interestRate=0.08, economicLifetime=10):
# TODO add unit checks
# Set general component data
utils.checkCommodities(esM, {commodity})
self._name, self._commodity = name, commodity
self._distances = utils.checkAndSetDistances(esM, distances)
self._losses = utils.checkAndSetTransmissionLosses(esM, losses, distances)
# Set design variable modeling parameters
utils.checkDesignVariableModelingParameters(capacityVariableDomain, hasCapacityVariable,
hasIsBuiltBinaryVariable, bigM)
self._hasCapacityVariable = hasCapacityVariable
self._capacityVariableDomain = capacityVariableDomain
self._capacityPerPlantUnit = capacityPerPlantUnit
self._hasIsBuiltBinaryVariable = hasIsBuiltBinaryVariable
self._bigM = bigM
# Set economic data
self._investPerCapacity = utils.checkAndSetCostParameter(esM, name, investPerCapacity, '2dim')
self._investIfBuilt = utils.checkAndSetCostParameter(esM, name, investIfBuilt, '2dim')
self._opexPerOperation = utils.checkAndSetCostParameter(esM, name, opexPerOperation, '2dim')
self._opexPerCapacity = utils.checkAndSetCostParameter(esM, name, opexPerCapacity, '2dim')
self._opexIfBuilt = utils.checkAndSetCostParameter(esM, name, opexIfBuilt, '2dim')
self._interestRate = utils.checkAndSetCostParameter(esM, name, interestRate, '2dim')
self._economicLifetime = utils.checkAndSetCostParameter(esM, name, economicLifetime, '2dim')
self._CCF = self.getCapitalChargeFactor()
# Set location-specific operation parameters
if operationRateMax is not None and operationRateFix is not None:
operationRateMax = None
warnings.warn('If operationRateFix is specified, the operationRateMax parameter is not required.\n' +
'The operationRateMax time series was set to None.')
utils.checkOperationTimeSeriesInputParameters(esM, operationRateMax, locationalEligibility, '2dim')
utils.checkOperationTimeSeriesInputParameters(esM, operationRateFix, locationalEligibility, '2dim')
self._fullOperationRateMax = utils.setFormattedTimeSeries(operationRateMax)
self._aggregatedOperationRateMax = None
self._operationRateMax = utils.setFormattedTimeSeries(operationRateMax)
self._fullOperationRateFix = utils.setFormattedTimeSeries(operationRateFix)
self._aggregatedOperationRateFix = None
self._operationRateFix = utils.setFormattedTimeSeries(operationRateFix)
self._tsaWeight = tsaWeight
# Set location-specific design parameters
self._sharedPotentialID = sharedPotentialID
utils.checkLocationSpecficDesignInputParams(esM, hasCapacityVariable, hasIsBuiltBinaryVariable,
capacityMin, capacityMax, capacityFix,
locationalEligibility, isBuiltFix, sharedPotentialID,
'2dim')
self._capacityMin, self._capacityMax, self._capacityFix = capacityMin, capacityMax, capacityFix
self._isBuiltFix = isBuiltFix
# Set locational eligibility
operationTimeSeries = operationRateFix if operationRateFix is not None else operationRateMax
self._locationalEligibility = utils.setLocationalEligibility(esM, locationalEligibility, capacityMax,
capacityFix, isBuiltFix,
hasCapacityVariable, operationTimeSeries,
'2dim')
# Variables at optimum (set after optimization)
self._capacityVariablesOptimum = None
self._isBuiltVariablesOptimum = None
self._operationVariablesOptimum = None
def getCapitalChargeFactor(self):
""" Computes and returns capital charge factor (inverse of annuity factor) """
return 1 / self._interestRate - 1 / (pow(1 + self._interestRate, self._economicLifetime) * self._interestRate)
def addToEnergySystemModel(self, esM):
esM._isTimeSeriesDataClustered = False
if self._name in esM._componentNames:
if esM._componentNames[self._name] == TransmissionModeling.__name__:
warnings.warn('Component identifier ' + self._name + ' already exists. Data will be overwritten.')
else:
raise ValueError('Component name ' + self._name + ' is not unique.')
else:
esM._componentNames.update({self._name: TransmissionModeling.__name__})
mdl = TransmissionModeling.__name__
if mdl not in esM._componentModelingDict:
esM._componentModelingDict.update({mdl: TransmissionModeling()})
esM._componentModelingDict[mdl]._componentsDict.update({self._name: self})
def setTimeSeriesData(self, hasTSA):
self._operationRateMax = self._aggregatedOperationRateMax if hasTSA else self._fullOperationRateMax
self._operationRateFix = self._aggregatedOperationRateFix if hasTSA else self._fullOperationRateFix
def getDataForTimeSeriesAggregation(self):
fullOperationRate = self._fullOperationRateFix if self._fullOperationRateFix is not None \
else self._fullOperationRateMax
if fullOperationRate is not None:
fullOperationRate = fullOperationRate.copy()
uniqueIdentifiers = [self._name + "_operationRate_" + locationIn + '_' + locationOut
for locationIn, locationOut in fullOperationRate.columns]
compData = pd.DataFrame(index=fullOperationRate.index, columns=uniqueIdentifiers)
compDict = {}
for locationIn, locationOut in fullOperationRate.columns:
uniqueIdentifier = self._name + "_operationRate_" + locationIn + '_' + locationOut
compData[uniqueIdentifier] = fullOperationRate.pop((locationIn, locationOut))
compDict.update({uniqueIdentifier: self._tsaWeight})
return compData, compDict
else:
return None, {}
def setAggregatedTimeSeriesData(self, data):
fullOperationRate = self._fullOperationRateFix if self._fullOperationRateFix is not None \
else self._fullOperationRateMax
if fullOperationRate is not None:
uniqueIdentifiers = [self._name + "_operationRate_" + locationIn + '_' + locationOut
for locationIn, locationOut in fullOperationRate.columns]
compData = data[uniqueIdentifiers].copy()
compData = | pd.DataFrame(index=data.index, columns=fullOperationRate.columns) | pandas.DataFrame |
import os
import sys
import multiprocessing as mp
import string
import platform
import shutil
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
import calendar
import pyemu
import flopy
# some global config for plotting
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
abet = string.ascii_uppercase
# some global config for path/directory structure
old_h_dir = os.path.join("..", "ver")
h_nam_file = "eaa_ver.nam"
h_dir = "history"
h_start_datetime = "1-1-2001"
h_end_datetime = "12-31-2015"
old_s_dir = os.path.join("..", "pred")
s_dir = "scenario"
s_nam_file = "eaa_pred.nam"
# history and scenarion simulation start datetimes
s_start_datetime = "1-1-1947"
s_end_datetime = "12-31-1958"
# files with history and scenario observation locations and states
h_hds_file = os.path.join("_data", "reformatted_head_obs.smp")
h_drn_file = os.path.join("_data", "springflow_obs.smp")
h_crd_file = os.path.join("_data", "head_obs.crd")
s_hds_file = os.path.join("_data", "pred_head_obs.smp")
s_drn_file = os.path.join("_data", "pred_springflow_obs.smp")
s_crd_file = os.path.join("_data", "pred_head_obs.crd")
# value of dry cells
hdry = -1.0e+20
# platform-specific binary information
exe_name = "mf2005"
ies_name = "pestpp-ies"
if "window" in platform.platform().lower():
bin_path = os.path.join("bin", "win")
exe_name = exe_name + ".exe"
ies_name = ies_name + ".exe"
elif "darwin" in platform.platform().lower():
bin_path = os.path.join("bin", "mac")
else:
bin_path = os.path.join("bin", "linux")
# the numeric IDs of J-17 and J-27
j17_id = 6837203
j27_id = 6950302
def _setup_model(old_dir, new_dir, start_datetime, nam_file, run=False):
"""load an existing model (either history or scenario) and configure it for
PEST interface construction
Args:
old_dir (str): directory location where the original model resides
new_dir (str): directory location where the new model files will be written
start_datetime (str): string rep of model starting datetime
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run the model once it is written to new_dir. Default is False
"""
# load the existing model and set some attributes
m = flopy.modflow.Modflow.load(nam_file, model_ws=old_dir, check=False,
verbose=True, forgive=False)
m.start_datetime = start_datetime
m.lpf.hdry = hdry
m.bas6.hnoflo = hdry
# change the workspace to new_dir
m.change_model_ws(new_dir, reset_external=True)
# set the external path so that arrays and lists are outside of the
# terrible MODFLOW file formats
m.external_path = "."
# write the inputs
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_dir, exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd=new_dir)
def _rectify_wel(model_ws, nam_file, run=True):
"""rectify the stress period WEL file entries so that every
stress period has the same entries (filling missing wells with
"dummy" entries with zero pumping)
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run model once the WEL file has been rectified.
Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,
verbose=True, forgive=False)
# get the current WEL file datasets
spd = m.wel.stress_period_data
df_dict = {}
all_kij = set()
# run thru all stress periods to get the union of well locations
for kper in range(m.nper):
ra = spd[kper]
df = pd.DataFrame.from_records(ra)
df.loc[:, "kij"] = df.apply(lambda x: (x.k, x.i, x.j), axis=1)
df.loc[:, "kij_str"] = df.kij.apply(lambda x: "{0:01.0f}_{1:03.0f}_{2:03.0f}".format(*x))
df.index = df.kij_str
all_kij.update(set(df.kij_str.tolist()))
print(kper)
df_dict[kper] = df
# work up fast-lookup containers for well location indices
new_index = list(all_kij)
new_k = {s: int(s.split('_')[0]) for s in new_index}
new_i = {s: int(s.split('_')[1]) for s in new_index}
new_j = {s: int(s.split('_')[2]) for s in new_index}
new_index.sort()
# process each stress period
new_spd = {}
for kper, df in df_dict.items():
# reindex with the full kij locations index
df = df.reindex(new_index)
# map the new kijs to the old kijs
for f, d in zip(["k", "i", "j"], [new_k, new_i, new_j]):
isna = df.loc[:, f].isna()
df.loc[isna, f] = [d[kij] for kij in df.loc[isna, :].index.values]
# fill the nans with 0.0
isna = df.flux.isna()
df.loc[isna, "flux"] = 0.0
# deal with the platform numpy int casting issue
if "window" in platform.platform():
df.loc[:, "i"] = df.i.astype(np.int32)
df.loc[:, "j"] = df.j.astype(np.int32)
df.loc[:, "k"] = df.k.astype(np.int32)
else:
df.loc[:, "i"] = df.i.astype(np.int)
df.loc[:, "j"] = df.j.astype(np.int)
df.loc[:, "k"] = df.k.astype(np.int)
spd[kper] = df.loc[:, ["k", "i", "j", "flux"]].to_records(index=False)
# create a new WEL package and replace the old one
flopy.modflow.ModflowWel(m, stress_period_data=spd, ipakcb=m.wel.ipakcb)
# write to a new model_ws with a "_wel" suffix
m.change_model_ws("{0}_wel".format(model_ws))
m.external_path = '.'
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join("{0}_wel".format(model_ws), exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd="{0}_wel".format(model_ws))
# just to make sure the model ran
new_lst = flopy.utils.MfListBudget(os.path.join("{0}_wel".format(model_ws), nam_file.replace(".nam", ".list")))
def build_rch_zone_array(model_ws, nam_file, plot=False):
"""build a recharge zone integer array for zone-based parameters
using unique values in the in recharge arrays
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
plot (bool): flag to plot the zone array. Default is False
"""
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, load_only=["rch"], check=False,
verbose=True, forvive=False)
arr = m.rch.rech[0].array
full_arr = m.rch.rech.array
mn = full_arr.mean(axis=0)[0, :, :]
mn_u, mn_c = np.unique(mn, return_counts=True)
zn_arr = np.zeros_like(arr, dtype=np.int)
for i, u_val in enumerate(mn_u):
# this contional makes sure we keep zeros as zero in the zone array
if u_val == 0.0:
continue
zn_arr[mn == u_val] = i
np.savetxt(os.path.join("_data", "rch_zn_arr.dat"), zn_arr, fmt="%3d")
if plot:
zn_arr = zn_arr.astype(np.float)
zn_arr[zn_arr == 0] = np.NaN
cb = plt.imshow(zn_arr)
plt.colorbar(cb)
plt.show()
def _setup_pst(org_model_ws, new_model_ws, nam_file):
"""construct the PEST interface, set parameter bounds and
generate the prior ensemble
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace/directory where the
PEST interface will be constructed
nam_file (str): MODFLOW-2005 nam file
"""
# make sure the model simulated heads file exists - need this for observations
if not os.path.exists(os.path.join(org_model_ws, nam_file.replace(".nam", ".hds"))):
raise Exception("need to call _setup_model()")
# load the model from org_model_ws
m= flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws,
load_only=["dis"], check=False,
verbose=True, forgive=False)
# load the recharge zone array
rch_zn_arr = np.loadtxt(os.path.join("_data", "rch_zn_arr.dat"), dtype=np.int)
# array-based model inputs to parameterize by layer (zero-based)
props = [["lpf.hk", 0], ["lpf.ss", 0], ["lpf.sy", 0], ["bas6.strt", 0]]
# copy to constant (global props)
const_props = props.copy()
# fill a zone-based array inputs container with recharge
# zone pars for each stress period
zone_props = []
zone_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# extend the global parameter container with recharge for each stress period
const_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# include the final simulated groundwater level in every active
# model cell as an "observation" in PEST interface
hds_kperk = [[m.nper - 1, 0]]
# parameterize WEL flux and DRN cond spatially (one par for each entry)
spatial_bc_props = [["wel.flux", 0], ["drn.cond", 0]]
# parameterize WEL flux with a single global multiplier for ecah stress period
temporal_bc_props = [["wel.flux", kper] for kper in range(m.nper)]
#create the pest interface...
ph = pyemu.helpers.PstFromFlopyModel(nam_file, org_model_ws=org_model_ws, new_model_ws=new_model_ws,
grid_props=props,
hds_kperk=hds_kperk, zone_props=zone_props, hfb_pars=True,
remove_existing=True, build_prior=False, k_zone_dict={0: rch_zn_arr},
spatial_bc_props=spatial_bc_props, temporal_bc_props=temporal_bc_props,
model_exe_name=exe_name, pp_props=props, pp_space=30, const_props=const_props)
# set the parameter bounds to Edwards-based physically-plausible values
_set_par_bounds(ph.pst, nam_file)
# geostatistcal draws from the prior
pe = ph.draw(num_reals=300, use_specsim=True)
#add the control file initial values as a realization
pe.add_base()
# enforce parameter bounds on the ensemble
pe.enforce()
# save the ensemble to compressed (PEST extended binary) format
pe.to_binary(os.path.join(new_model_ws, "prior.jcb"))
# save the control file
ph.pst.write(os.path.join(new_model_ws, nam_file.replace(".nam", ".pst")))
# read the array parameter multiplier config file and set a hard upper bound
# on specific yield
df = pd.read_csv(os.path.join(new_model_ws, "arr_pars.csv"))
df.loc[:, "upper_bound"] = np.NaN
df.loc[:, "lower_bound"] = np.NaN
df.loc[df.org_file.apply(lambda x: "sy_" in x), "upper_bound"] = 0.25
df.to_csv(os.path.join(new_model_ws, "arr_pars.csv"))
# put the MODFLOW-2005 and PESTPP-IES binaries in the new_model_ws
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_model_ws, exe_name))
shutil.copy2(os.path.join(bin_path, ies_name), os.path.join(new_model_ws, ies_name))
def _set_par_bounds(pst, nam_file):
"""set the parameter bounds to expert-knowledge-based
ranges
Args:
pst (pyemu.Pst): PEST control file instance
nam_file (str): MODFLOW-2005 nam file
"""
par = pst.parameter_data
# special case for WEL flux pars: more recent time has metering, so less uncertainty
names = par.loc[par.pargp.apply(lambda x: "welflux" in x), "parnme"]
if nam_file == h_nam_file:
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
else:
par.loc[names, "parlbnd"] = 0.7
par.loc[names, "parubnd"] = 1.3
# DRN conductance
names = par.loc[par.pargp.apply(lambda x: "drncond" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.5
par.loc[names, "parubnd"] = 1.5
# initial conditions
names = par.loc[par.pargp.apply(lambda x: "strt" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
# recharge
names = par.loc[par.pargp.apply(lambda x: "rech" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.8
par.loc[names, "parubnd"] = 1.2
# HK
names = par.loc[par.pargp.apply(lambda x: "hk" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.01
par.loc[names, "parubnd"] = 100
def _add_smp_obs_to_pst(org_model_ws, new_model_ws, pst_name, nam_file, hds_crd_file):
"""add observations to the control file for the locations where groundwater levels
have been measured. The actual value of the observations will be set elsewhere
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace
pst_name (str): PEST control file name
nam_file (str): MODFLOW-2005 nam file
hds_crd_file (str): PEST-style coordinate file that has been processed
to include k,i,j indices
"""
# make sure the control file exists
pst_name = os.path.join(new_model_ws, pst_name)
assert os.path.exists(pst_name)
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=new_model_ws,
load_only=["dis"], check=False,
forgive=False)
# load the control file
pst = pyemu.Pst(pst_name)
# load GW level location dataframe
crd_df = pd.read_csv(hds_crd_file + ".csv")
#load DRN location dataframe
drn_df = pd.read_csv(os.path.join("_data", "DRN_dict.csv"), delim_whitespace=True,
header=None, names=["name", "k", "i", "j"])
# build a dict of name-index location for DRN locations
kij_dict = {n: [0, i, j] for n, i, j in zip(drn_df.name, drn_df.i, drn_df.j)}
# the name of the DRN budget file
cbd_file = nam_file.replace(".nam", ".cbd")
# get one from the org model workspace and update the path to it
shutil.copy2(os.path.join(org_model_ws, cbd_file), os.path.join(new_model_ws, cbd_file))
cbd_file = os.path.join(new_model_ws, cbd_file)
# setup the forward run DRN budget post processor
prec = "double"
if "win" not in platform.platform().lower(): # not win or darwin
prec = "singl"
cbd_frun, cbd_df = pyemu.gw_utils.setup_hds_timeseries(cbd_file, kij_dict, prefix="drn",
include_path=True, fill=-1.0e+30,
text="drains", precision=prec,
model=m)
# make sure the new DRN instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(cbd_file)
assert os.path.exists(ins_file), ins_file
# add the new DRN observations to the control file
pst.add_observations(ins_file=ins_file, pst_path=".")
# set meaningful obs group names
pst.observation_data.loc[cbd_df.index, "obgnme"] = cbd_df.obgnme
# build a dict of name-index locations for the GW level observations locations
kij_dict = {n: [0, i, j] for n, i, j in zip(crd_df.name, crd_df.i, crd_df.j)}
# setup GW level post processor
hds_file = os.path.join(new_model_ws, nam_file.replace(".nam", ".hds"))
assert os.path.exists(hds_file)
hds_frun, hds_df = pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, prefix="hds",
include_path=True, fill=-1.0e+30, model=m)
# make sure the GW level instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(hds_file)
assert os.path.exists(ins_file), ins_file
# add the GW level obs to the control file and set meaningful
# obs group names
pst.add_observations(ins_file=ins_file, pst_path=".")
pst.observation_data.loc[hds_df.index, "obgnme"] = hds_df.obgnme
# write the updated control file
pst.write(pst_name)
# add the post processor commands to the forward run script
frun_file = os.path.join(new_model_ws, "forward_run.py")
with open(frun_file, 'r') as f:
lines = f.readlines()
idx = None
for i, line in enumerate(lines):
if "__name__" in line:
idx = i
assert idx is not None
lines.insert(idx, " " + cbd_frun + '\n')
lines.insert(idx, " " + hds_frun + '\n')
with open(frun_file, 'w') as f:
for line in lines:
f.write(line)
def add_ij_to_hds_smp(crd_file):
"""intersect the GW level observation coordinates against the
model grid to get k,i,j index information
Args:
crd_file (str): PEST-style "bore coordinates" file
"""
from shapely.geometry import Point
# read the bore coord file
crd_df = pd.read_csv(crd_file, delim_whitespace=True, header=None, names=["name", "x", "y", "layer"])
# set a shapely point attribute
crd_df.loc[:, "pt"] = crd_df.apply(lambda x: Point(x.x, x.y), axis=1)
# load the history model
m = flopy.modflow.Modflow.load(h_nam_file, model_ws=h_dir,
load_only=["dis"], check=False,
forgive=False)
# use the flopy grid intersect functionality
gi = flopy.utils.GridIntersect(m.modelgrid)
crd_df.loc[:, 'ij'] = crd_df.pt.apply(lambda x: gi.intersect_point(x)[0][0])
# split out the i and j indices
crd_df.loc[:, 'i'] = crd_df.ij.apply(lambda x: x[0])
crd_df.loc[:, 'j'] = crd_df.ij.apply(lambda x: x[1])
# remove extra columns
crd_df.pop("ij")
crd_df.pop("pt")
# save the new dataframe to a CSV file
crd_df.to_csv(crd_file + ".csv")
def _set_obsvals(d, nam_file, hds_file, drn_file, pst_file, run=True):
"""samples the groundwater and spring discharge observations to
the model stress periods and sets the "obsval" attribute in the control
file. Also plots up org obs and sampled obs in a multipage pdf
Args:
d (str): directory where the control file exists
nam_file (str): MODFLOW-2005 nam file
hds_file (str): PEST-style site sample file with groundwater
level observations
drn_file (str): PEST-style site sample file with spring discharge
observations
pst_file (str): PEST control file
run (bool): flag to run PESTPP-IES with NOPTMAX=0 after the
observation values have been updated. Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=d, load_only=["dis"],
check=False, forgive=False)
# work out the stress period ending datetime
sp_end_dts = pd.to_datetime(m.start_datetime) + pd.to_timedelta(np.cumsum(m.dis.perlen.array), unit='d')
# cast the model start_datetime from a str to a datetime instance
start_datetime = pd.to_datetime(m.start_datetime)
# load the gw level and spring discharge site sample files
# into pandas dataframes
hds_df = pyemu.smp_utils.smp_to_dataframe(hds_file)
drn_df = pyemu.smp_utils.smp_to_dataframe(drn_file)
# plotting limits
xmn, xmx = pd.to_datetime(start_datetime), pd.to_datetime(sp_end_dts[-1])
ymn, ymx = hds_df.value.min(), hds_df.value.max()
# containers for the sampled observation series
hds_sampled_dfs = []
drn_sampled_dfs = []
# a function to sample each observation in a given site
# dataframe to the model stress period ending datetimes
# uses nearest neighbor
def sample_to_model(udf):
d, v = [], []
for dt, val in zip(udf.index.values, udf.value.values):
# difference between this obs datetime and the
# stress period end datetime
diff = (sp_end_dts - dt).map(np.abs).values
# the index of the minimum diff (nearest neighbor)
idxmin = np.argmin(diff)
# minimum diff in days
day_diff = diff[idxmin].astype('timedelta64[D]')
# the diff is greater than a month, something is wrong...
if day_diff > np.timedelta64(31, 'D'):
print(idxmin, sp_end_dts[idxmin], dt, day_diff)
continue
# save the datetime and value
d.append(sp_end_dts[idxmin])
v.append(val)
# form a new dataframe and return
udf_mod = pd.DataFrame({"value": v}, index=d)
return udf_mod
# save a multipage PDF for inspection
with PdfPages(os.path.join("_data", "obs.pdf")) as pdf:
ax_per_page = 10
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
# process each unique GW level site entry
for usite in hds_df.name.unique():
print(usite)
# get a dataframe of just this site
udf = hds_df.loc[hds_df.name == usite, ["datetime", "value"]].copy()
# set the index to datetime
udf.index = udf.pop("datetime")
# sample to stress period ending datetimes
udf_mod = sample_to_model(udf)
#set a name attribute
udf_mod.loc[:, "name"] = usite
# store new sample site dataframe
hds_sampled_dfs.append(udf_mod)
# plot
ax = axes[ax_count]
ax.plot(udf.index, udf.value, lw=0.5, marker='.', color='0.5', ms=5, alpha=0.5)
ax.plot(udf_mod.index, udf_mod.value, lw=0.5, marker='.', color='b', ms=5, alpha=0.5)
ax.set_title("site:{0}, org count:{1}, reindexed count:{2}".format(usite, udf.shape[0], udf_mod.shape[0]),
loc="left")
ax.set_xlim(xmn, xmx)
# ax.set_ylim(ymn,ymx)
ax_count += 1
if ax_count >= ax_per_page:
plt.tight_layout()
pdf.savefig()
plt.close(fig)
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
#process each unqiue DRN site entry
for usite in drn_df.name.unique():
print(usite)
# get a dataframe of just this site
udf = drn_df.loc[drn_df.name == usite, ["datetime", "value"]].copy()
# use the datetime as the index
udf.index = udf.pop("datetime")
# sample to stress period ending datetime
udf_mod = sample_to_model(udf)
# set a name attribute
udf_mod.loc[:, "name"] = usite
# store
drn_sampled_dfs.append(udf_mod)
# plot
ax = axes[ax_count]
ax.plot(udf.index, udf.value, lw=0.5, marker='.', color='0.5', ms=5, alpha=0.5)
ax.plot(udf_mod.index, udf_mod.value, lw=0.5, marker='.', color='b', ms=5, alpha=0.5)
ax.set_title("site:{0}, org count:{1}, reindexed count:{2}".format(usite, udf.shape[0], udf_mod.shape[0]),
loc="left")
ax.set_xlim(xmn, xmx)
ax_count += 1
if ax_count >= ax_per_page:
plt.tight_layout()
pdf.savefig()
plt.close(fig)
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
plt.tight_layout()
pdf.savefig()
# concatenate the sampled GW level dataframes into one large dataframe
hds_df = pd.concat(hds_sampled_dfs)
# set the datetime index as a column
hds_df.loc[:, "datetime"] = hds_df.index
# set a generic and nonduplicated index
hds_df.index = np.arange(hds_df.shape[0])
# save the sampled dataframe
pyemu.smp_utils.dataframe_to_smp(hds_df, hds_file.replace(".smp", "_sampled.smp"))
# concatenate the sample spring discharge dataframes into one large dataframe
drn_df = pd.concat(drn_sampled_dfs)
# set the datetime index as a column
drn_df.loc[:, "datetime"] = drn_df.index
# set a generic and nonduplicated index
drn_df.index = np.arange(drn_df.shape[0])
# save the sampled dataframe
pyemu.smp_utils.dataframe_to_smp(drn_df, drn_file.replace(".smp", "_sampled.smp"))
# build up observation names ("obsnme") in the sampled GW level dataframe
# these are the same names that are in the control file
hds_df.loc[:, "dt_str"] = hds_df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
hds_df.loc[:, "site_name"] = hds_df.name
hds_df.loc[:, "obsnme"] = hds_df.apply(lambda x: "hds_{0}_{1}".format(str(x.site_name), x.dt_str), axis=1)
hds_df.loc[:, "obsnme"] = hds_df.obsnme.apply(str.lower)
hds_df.index = hds_df.obsnme
# load the control file
pst = pyemu.Pst(os.path.join(d, pst_file))
obs = pst.observation_data
# set all observations to zero weight
obs.loc[:, "weight"] = 0.0
# get set containers for observation names in the
# control file and in the GW level dataframe
pnames = set(list(obs.obsnme.values))
snames = set(list(hds_df.obsnme.values))
# make sure all GW level dataframe names are in the
# control file
print(snames - pnames)
assert len((snames - pnames)) == 0
# set the obsval attribute for space-time locations where
# we have actual GW level observations
obs.loc[hds_df.obsnme, "obsval"] = hds_df.value
# set a generic non-zero weight for the actual
# GW level observation locations
obs.loc[hds_df.obsnme, "weight"] = 1.0
# build up observation names ("obsnme") in the sampled spring discharge dataframe
# these are the same names that are in the control file
drn_df.loc[:, "dt_str"] = drn_df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
drn_df.loc[:, "site_name"] = drn_df.name
drn_df.loc[:, "obsnme"] = drn_df.apply(lambda x: "drn_{0}_{1}".format(str(x.site_name), x.dt_str), axis=1)
drn_df.loc[:, "obsnme"] = drn_df.obsnme.apply(str.lower)
drn_df.index = drn_df.obsnme
# get set container for observation names in the
# spring discharge dataframe
snames = set(list(drn_df.obsnme.values))
# make sure all spring discharge dataframe names are in the
# control file
print(snames - pnames)
assert len((snames - pnames)) == 0
# set the obsval attribute for space-time locations where
# we have actual spring discharge observations
# negative 1 since drn out is negative, convert from cfs to cfd
obs.loc[drn_df.obsnme, "obsval"] = -1.0 * drn_df.value * (60. * 60. * 24.)
# set a generic non-zero weight
obs.loc[drn_df.obsnme, "weight"] = 1.0
# set noptmax to 0 for testing
pst.control_data.noptmax = 0
# save the updated control file
pst.write(os.path.join(d, pst_file))
# run PESTPP-IES?
if run:
pyemu.os_utils.run("pestpp-ies {0}".format(pst_file), cwd=d)
def run_local(b_d, m_d, pst_name, num_workers=10):
"""run PESTPP-IES in parallel on the current machine
Args:
b_d (str): "base" directory that contains all the files needed
to run PESTPP-IES (MODFLOW file and PEST interface files)
m_d (str): "master" directory that will be created and where the
PESTPP-IES master instance will be started
pst_name (str): control file name. Must exist in b_d
num_workers (int): number of parallel workers to start.
Default is 10.
"""
pyemu.os_utils.start_workers(b_d, "pestpp-ies", pst_name, num_workers=num_workers,
master_dir=m_d, worker_root=".",
reuse_master=True)
def plot_obs_vs_sim_case(m_d, case="eaa_ver", post_iter=None,
plt_name="obs_v_sim.pdf", focus=False):
"""plot ensemble-based observed vs simulated GW level and spring discharge time
series for a given PEST "case".
Args:
m_d (str): "master" directory that holds the simulated output ensembles
case (str): the PEST "case" name. Default is "eaa_ver". various suffixes are
appended to this case to form control file and ensemble file names
post_iter (int): the PESTPP-IES iteration to use as the "posterior" ensemble.
If None, no posterior will be plotted. If True, only the maximum of the
prior is plotted (to help with figure "busy-ness"). Default is None.
plt_name (str): the name of the multi-page PDF to create. It is written in the
m_d directory. Default is :"obs_v_sim.pdf:.
focus (bool): flag to plot only the four locations of management interest. If
True, then only 4 axes are plotted - this creates the figures shown in the
manuscript. If False, all locations are plotted - this creates the
multipage PDFs shown in the supplementary material
Notes:
calls plot_obs_vs_sim()
"""
pst = pyemu.Pst(os.path.join(m_d, case + ".pst"))
base_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=os.path.join(m_d, case + ".base.obs.jcb"))
pr_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=os.path.join(m_d, case + ".0.obs.jcb"))
pt_en = None
if post_iter is not None:
pt_en = pyemu.ObservationEnsemble.from_binary(pst=pst,
filename=os.path.join(m_d, "{0}.{1}.obs.jcb". \
format(case, post_iter)))
if "eaa_ver" in case:
s, e = h_start_datetime, h_end_datetime
elif "eaa_pred" in case:
s, e = s_start_datetime, s_end_datetime
else:
raise Exception()
plot_obs_vs_sim(pst=pst, start_datetime=s, end_datetime=e,
base_en=base_en, pr_en=pr_en, pt_en=pt_en,
plt_name=os.path.join(m_d, plt_name), focus=focus)
def plot_obs_vs_sim(pst, start_datetime, end_datetime, base_en=None, pr_en=None, pt_en=None,
plt_name="obs_v_sim.pdf", mask_invalid=True, focus=False):
"""plot ensemble-based observed vs simulated
Args:
pst (pyemu.Pst): control file instance
start_datetime (str): model start datetime string
end_datetime (str): model end datetime string
base_en (pyemu.ObservationEnsemble): the observed plus noise ensemble.
Default is None (dont plot)
pr_en (pyemu.ObservationEnsemble): prior simulated output ensemble.
Default is None (dont plot)
pt_en: (pyemu.ObservationEnsemble): posterior simulated output ensmeble.
Default is None (dont plot)
plt_name (str): name of plot to generate. Default is "obs_v_sim.pdf"
mask_invalid (bool): flag to mask invalid values in the simulated output
ensembles (defined by hdry). Default is True.
focus (bool): flag to plot only the four locations of management interest. If
True, then only 4 axes are plotted - this creates the figures shown in the
manuscript. If False, all locations are plotted - this creates the
multipage PDFs shown in the supplementary material
"""
# get the non-zero observation data
obs = pst.observation_data
nz_obs = obs.loc[pst.nnz_obs_names, :].copy()
# set the datetimes for each non-zero observation
nz_obs.loc[:, "datetime"] = pd.to_datetime(nz_obs.obsnme.apply(lambda x: x.split('_')[-1]))
# spring discharge obs names
drn_names = nz_obs.loc[nz_obs.obsnme.apply(lambda x: "drn" in x), "obsnme"]
# convert from model units to (positive) CFS for plotting
nz_obs.loc[drn_names, "obsval"] *= -1.0 / (60.0 * 60.0 * 24.0)
# unique nonzero observation groups (site names)
nz_grps = nz_obs.obgnme.unique()
# if focus is True, drop the non-focus sites
focus_sites = ["comal", "sanmar", str(j17_id), "j-17", str(j27_id), "j-27"]
focus_labels = ["Comal", "<NAME>", "J-17", "J-17", "J-27", "J-27"]
nz_grps.sort()
if focus:
keep = []
labels = []
for nz_grp in nz_grps:
for fs, lab in zip(focus_sites, focus_labels):
print(nz_grp, fs, fs in nz_grp)
if fs in nz_grp:
keep.append(nz_grp)
labels.append(lab)
nz_grps = keep
with PdfPages(plt_name) as pdf:
xmn, xmx = pd.to_datetime(start_datetime), | pd.to_datetime(end_datetime) | pandas.to_datetime |
"""count_docs.py.
Count the number of documents per unique source per year in a given project.
For use with count_documents.ipynb v 2.0.
Last update: 2020-06-25
"""
import csv
import os
import string
import unidecode
import json
import re
import shutil
import collections
import operator
import pandas as pd
from collections import defaultdict
from pymongo import MongoClient
import qgrid
from IPython.display import display, HTML
grid_options = {
# SlickGrid options
'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': False,
'defaultColumnWidth': 110,
'rowHeight': 28,
'enableColumnReorder': True,
'enableTextSelectionOnCells': False,
'editable': False,
'autoEdit': False,
'explicitInitialization': True,
# Qgrid options
'maxVisibleRows': 15,
'minVisibleRows': 8,
'sortable': True,
'filterable': True,
'highlightSelectedCell': False,
'highlightSelectedRow': True
}
def source_from_filename(row):
'''Given a row from dfr-browser's metadata file (i.e., one document in the model), return the name of the source
of the document using the document filename. For use when Lexis Nexis does not provide publication source
information.'''
# Grab the filename.
filename = row['id']
# If the filename begins with a digit, look in specific place for the source name.
if re.match("json/\d", filename):
try:
match = re.search("_\d+_([a-z]+)_", filename)
title = match.group(1)
except AttributeError as err:
title = 'unknown'
# If the filename begins with 'we1schomp' or 'chomp', look in another place.
if re.match("json/we1schomp_", filename) or re.match("json/chomp_", filename):
try:
match = re.search("_([a-z]+)_", filename)
title = match.group(1)
except AttributeError as err:
title = 'unknown'
return title
def year_from_row(row):
'''Given a row from dfr-browser's metadata file (i.e., one document in the model), return the publication year
of the document using the document filename. For use when Lexis Nexis does not provide publication date
information.'''
# Grab the filename.
filename = row['id']
# If the filename begins with a digit, look in specific place for the publication year.
if re.match("json/\d", filename):
try:
match = re.search("_(\d\d\d\d)-\d\d-\d\d", filename)
year = match.group(1)
# Use 'unknown' as publication year if we can't determine it.
except AttributeError as err:
year = 'unknown'
# If the filename begins with 'we1schomp' or 'chomp', look in another place.
if re.match("json/we1schomp_", filename) or re.match("json/chomp_", filename):
try:
match = re.search("_(\d\d\d\d)\d\d\d\d_", filename)
year = match.group(1)
# Use 'unknown' as publication year if we can't determine it.
except AttributeError as err:
year = 'unknown'
return year
def year_from_fpath(file):
'''Given a json filename, return the publication year of the document using the document filename. For use when Lexis Nexis does not provide publication date information.'''
# If the filename begins with a digit, grab the relevant publication year from filename.
# If we can't do it, we assign it as 'unknown.'
year = ''
if re.match("^\d", file):
try:
match = re.search("_(\d\d\d\d)-\d\d-\d\d", file)
year = match.group(1)
except AttributeError as err:
year = 'unknown'
# If the filename begins with `we1schomp` or `chomp`, grab the pub year from filename.
# If we can't do it, we assign it as 'unknown'.
if re.match("^we1schomp_", file) or re.match("^chomp_", file):
try:
match = re.search("_(\d\d\d\d)\d\d\d\d_", file)
year = match.group(1)
except AttributeError as err:
year = 'unknown'
if year == '':
year = 'unknown'
return year
def year_from_pubdate(pubdate):
'''Grab just the publication year from Lexis Nexis UTC pubdates.'''
# Grab the first four digits of the given Lexis Nexis UTC pubdate.
try:
match = re.search('(\d\d\d\d)', pubdate)
year = match.group(1)
# If you can't do it, stamp year as 'unknown'
except AttributeError as err:
year = 'unknown'
return year
def source_count_by_year(mode, md_file, json_dir, title_field, date_field):
'''Count the number of articles per source and per publication year in a given project.
Can use dfr-browser's metadata file or a directory of json files. Returns a dataframe where each row is a unique source and
each column is a unique year.'''
# Define variables.
count_title = 0
count_date = 0
d = defaultdict(list)
sourceyear_list = []
year_list = []
# Open up dfr-browser metadata file and read it in as csv.
if mode == 'dfr-browser':
try:
with open(md_file) as mdf:
mdreader = csv.DictReader(mdf)
# Grab the source name and the pubdate from the metadata file.
for row in mdreader:
source = row['journaltitle']
pubdate = row['pubdate']
# If there's not a source name, call source_from_filename function to find it.
if not source:
source = source_from_filename(row)
# Find the publication year, depending on the info we have.
# If all options fail, the below functions will stamp the pubdate as 'unknown'
if re.match('\d\d\d\d', pubdate):
year = year_from_pubdate(pubdate)
if not pubdate:
year = year_from_row(row)
if pubdate == '':
year = year_from_row(row)
if pubdate == 'none':
year = year_from_row(row)
if pubdate == 'unknown':
year = year_from_row(row)
if year == None:
year = 'unknown'
if year == '':
year = 'unknown'
# coerce into string
year = str(year)
# Add the publication year to a list of all publication years for counting purposes.
year_list.append(year)
# Create a source,year tuple.
syt = (source, year)
# Add the source,year tuple to a list of all source,year tuples.
sourceyear_list.append(syt)
except FileNotFoundError:
display(HTML('<p style="color:#FF0000";>Dfr-browser metadata file not found. Dfr-browser may not exist for this project. See `md_file` value under Settings. Use `json` mode instead.</p>'))
return
# Take all of the publication years from all of the documents and create a list where each unique year only appears
# once.
unique_years = sorted(set(year_list))
# Take the list of all source,year tuples and add them to a dictionary of dictionaries where the key is each source
# and the value is the year.
for k,v in sourceyear_list:
d[k].append(v)
# Then for each unique source, count the number of documents published in that source for each unique year.
# Store this in the dictionary of dictionaries.
for k, v in d.items():
new_cols = []
for year in unique_years:
num = v.count(year)
new_cols.append(num)
d[k] = new_cols
if mode == 'json':
# start with a directory
for file in os.listdir(json_dir):
# json check!
if file.endswith('.json'):
# take one file at a time
fpath = os.path.join(json_dir, file)
with open(fpath) as f:
# read that file and load its data
json_decoded = json.loads(f.read())
# try to grab the document's source
try:
source = json_decoded[title_field]
# if a document doesn't have the specified title_file, the source = unkown
except KeyError as err:
count_title += 1
source = 'unknown'
# if the source still isn't set, it's unknown
if source == '':
source = 'unknown'
# check to see if the document has the specified date field to grab the publication year
try:
json_date = json_decoded[date_field]
# if it doesn't, mark date as 'unknown'
except KeyError as err:
count_date += 1
json_date = 'unknown'
# check if date is already a 4-digit year:
result = isinstance(json_date, int)
if result == True:
year = json_date
# if not, try to derive the year of publication from the full publication UTC format date
else:
if json_date is not None:
year = year_from_pubdate(json_date)
else:
year = 'unknown'
# if the date is unknown, try to get it from the filename (will probably only work with WE1S data)
if json_date == 'unknown' or year == None or year == 'unknown':
year = year_from_fpath(file)
# last try (will only work if using WE1S data): if the publication year is listed as 'unknown', try to
# get an accurate year from the `pub_date` field.
# if that doesn't work, keep it at 'unknown'
if year == 'unknown':
try:
pubdate = json_decoded['pub_date']
match = re.search('(\d\d\d\d)', pubdate)
year = match.group(1)
except:
year = 'unknown'
if year == None:
year = 'unknown'
if year == '':
year = 'unknown'
# coerce into string
year = str(year)
# Add the publication year to a list of all publication years for counting purposes.
year_list.append(year)
# Create a source,year tuple.
syt = (source, year)
# Add the source,year tuple to a list of all source,year tuples.
sourceyear_list.append(syt)
# Take all of the publication years from all of the documents and create a list where each unique year only appears once.
unique_years = sorted(set(year_list))
# Take the list of all source,year tuples and add them to a dictionary of dictionaries where the key is each source
# and the value is the year.
for k,v in sourceyear_list:
d[k].append(v)
# Then for each unique source, count the number of documents published in that source for each unique year.
# Store this in the dictionary of dictionaries.
for k, v in d.items():
new_cols = []
for year in unique_years:
num = v.count(year)
new_cols.append(num)
d[k] = new_cols
# Convert dict of dicts to a pandas dataframe for easy viewing in the notebook and create a 'Total' column that displays
# the total number of documents for each unique source.
df = pd.DataFrame.from_dict(d, orient='index', columns=unique_years)
df = df.fillna('unknown')
df['Total'] = df.sum(axis=1)
if count_title > 0:
display(HTML('<p style="color:#FF0000";>Check title_field variable. Specified title field does not exist in 1 or more documents.</p>'))
if count_date > 0:
display(HTML('<p style="color:#FF0000";>Check date_field variable. Specified date field does not exist in 1 or more documents.</p>'))
return df
def docs_by_field(json_dir, field):
'''Counts number of documents per given field. Returns a dataframe of counts by field and lists of docs that
can't be opened or that don't contain the given field.'''
# Define variables.
bad_jsons = []
no_field = []
count = 0
count_dict = {}
df = 'did not count'
forbidden = ['custom', 'vectors', 'speed', 'language_model', 'features', 'bag_of_words']
# if the user enters a field this function isn't designed to use, print out statement and return values and stop.
if field in forbidden:
print("This function cannot count the values associated with this field. If you would like to count this field, talk to Lindsay.")
return bad_jsons, no_field, df
# otherwise, start with the json directory
for file in os.listdir(json_dir):
# json check!
if file.endswith('.json'):
# open each document in the directory
fpath = os.path.join(json_dir, file)
with open(fpath) as fin:
# load that document's json data; if it can't be loaded, append filename to list of files that can't be opened.
try:
json_data = json.loads(fin.read())
except ValueError as err:
bad_jsons.append(file)
continue
# see if the document has the field of interest. if it does, count.
try:
json_field = json_data[field]
if field == 'tags' or field == 'readability_scores':
for tag in json_field:
if tag in count_dict:
count_dict[tag] = count_dict[tag] + 1
else:
count_dict[tag] = 1
else:
if json_field in count_dict:
count_dict[json_field] = count_dict[json_field] + 1
else:
count_dict[json_field] = 1
# if a document doesn't have that field, add it to the appropriate list and keep going.
except KeyError as err:
no_field.append(file)
continue
# calculate number of files in json directory
json_dir_files = [file for file in os.listdir(json_dir) if file.endswith('.json')]
json_length = len(json_dir_files)
# turn counting dict into a pandas dataframe, define header and first row
# first row is just number of total documents in json directory
df = pd.DataFrame(list(count_dict.items()), columns= ['value', 'total number of docs'])
row = | pd.DataFrame([['total docs in project', json_length]], columns= ['value', 'total number of docs']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.models import load_model
from keras.callbacks import TensorBoard
# Import Dataset
dataset_train = | pd.read_csv('Google_Stock_Price_Train.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Multiple Linear Regression
# ## Chapter Introduction
# In the previous chapter, we discussed similarity and clustering as a data pre-processing step. It is an important organizational step that helps analyze the data as a whole before moving into regression analysis and identifying the direct impact of certain variables on others. In this chapter, we’ll go over the basics of that regression analysis, focusing on one of the most popular models for making predictions when we have a numerical outcome variable, Multiple Linear Regression (MLR).
#
# In this chapter, we’ll go over:
# 1. What is MLR?
# 2. Assumptions behind MLR
# 3. Steps to MLR
# 4. MLR + Predictions
# ### What is MLR
#
# 
# Recall from Chapter 2 that data mining methods fall under two main categories: supervised learning and unsupervised learning. These learning methods are classified based on the presence and type of outcome variable; in supervised learning, we predict a previously determined outcome variable, and in unsupervised learning, we identify patterns within the data. Under unsupervised learning, the main method we’ll cover in this class is clustering, and under supervised learning, the two branches we’ll cover are regression and classification.
#
# The goal of **Regression** is to predict numerical outcomes and can be used to predict values in various situations. For example, predicting the amount spent on a fraudulent transaction would be a regression problem.
#
# Regression can be used for explanatory or predictive modeling.
#
# **Explanatory Modeling** - Creating a model that fits well with existing data (the data on which it was trained)
# Uses the entire data set to maximize information and focus on the coefficients
# Represents the relationship between existing dependent and independent variables, not predicting new values for dependent variables
#
# **Predictive Modeling** – Creating a model that has high predictive accuracy on new data
# Partitions to assess predictive performance and avoids overfitting with the focus being on predictions
# Accurately predicts new values for dependent variables, not descriptions of associations between existing dependent and independent variables
#
# The goal of **Classification** is to predict categorical outcomes, such as whether or not an outcome will be true or the probability that an outcome will occur. Within classification, there are two types of outcomes: class membership and propensity. Class membership refers to the categorical variable itself, while propensity refers to the probability of the class membership. For example, predicting whether or not a transaction is fraudulent would be a classification problem.
#
# Here are some more examples:
#
# How much will a house sell for? - Regression (numerical outcome)
#
# Will a student pass a test or not? - Classification (class membership)
#
# What is the probability of a certain student passing a class? - Classification (propensity)
#
# One method of predicting numerical outcomes under regression is Linear Regression, which examines the relationship between a dependent variable (also referred to as the outcome variable, response, or target) and independent variable(s) (also referred to as the predictor(s), input variable(s), regressor(s), or covariate(s)) assuming a linear relationship. Simple Linear Regression (SLR) examines a single independent variable, while Multiple Linear Regression (MLR) examines multiple independent variables.
#
# The equation below represents a linear relationship between multiple predictors and the outcome. We assume that this is the true relationship between the predictor variables and outcome variable. We will go over some examples and explanations to the different parts of this equation in the rest of this chapter.
#
# y = β0 + β1x1 + β2x2 +⋯+ βpxp + ϵ
#
# y: outcome variable
# x1, ..., xp: predictor variables wherewith p is the= total number of predictors
# β0: intercept
# β1, ..., βp: coefficients with p = total number of predictors
# ϵ: error (factors affecting y other than predictors)
# The above equation is a generic model for the relationship between multiple predictor variables and the outcome variable. However, in explanatory modeling, the focus is on the existing coefficients (β), and in predictive modeling, the focus is on predicting the new outcome variable (Y).
#
# The remainder of this chapter will focus on predictive modeling, the more common ____ of the two in data mining. Recall that in a predictive MLR model, we are predicting new values for the coefficients, or betas, to create a model that has high predictive accuracy on new data. So, we estimate the betas () using ordinary least squares (OLS). This method minimizes the sum of squared deviations between the actual and predicted outcome variable values (Y and Y) based on the given model.
# ## Assumptions about MLR
# When dealing with multiple linear regression, we must always assume that there is a linear relationship between multiple predictors and the outcome. If a linear relationship is not present, a non-linear regression model must be utilized.
#
# We must also assume independent variables are not highly correlated to one another; this creates the issue of multicollinearity.
#
# When converting categorical variables to dummy variables, we must drop one dummy variable to avoid multicollinearity. For example, continuing on from the Toyota dealership example, if we were to dummy code the variable Fuel, we would get Fuel_Petrol, Fuel_Diesel, and Fuel_CNG. If Fuel_Patrol and Fuel_Diesel are both 0, then the car in question must run on CNG. Including the variable Fuel_CNG in our model means that any one of the Fuel dummy variables can be determined by the remaining two dummy variables.This is an example of perfect multicollinearity. Therefore, as shownyou see in the image above, Fuel_CNG must be dropped from the model. It doesn’t matter which dummy variable is dropped, as long as one of them is.
#
# By fitting the model to our data, we will estimate the coefficients to the predictor variables and use those to predict outcome values. We can then use those predicted outcome values to calculate residuals (also called errors). Estimates will remain unbiased if the mean of the error term is 0, and if the variance of the error term is constant, or homoskedastic.
# 
# ## MLR + Prediction
# In data mining, predictive accuracy methods indicate high accuracy when applied to new data sets, not existing ones. There are 5 common predictive accuracy methods we will discuss in this chapter. These include Mean Error (ME), Root-Mean-Squared Error (RMSE), Mean Absolute Error (MAE), Mean Percentage Error (MPE), and Mean Absolute Percentage Error (MAPE).
# 
# **Mean Absolute Error (MAE)**: This measure gives the absolute magnitude of the distance between your prediction and the outcome variable. For example, if your MAE is 12, your predictions are on average 12 units off from the actual value.
#
# **Mean Error (ME)**: This measure gives an indication of whether predictions overpredict or underpredict the outcome variable on average. It is similar to MAE, but does not take the absolute value of the error. A small mean error does NOT indicate good performance, but tells us whether the model consistently over- or underestimates outcomes.
#
# **Root-Mean-Squared Error (RMSE)**: This measure tells you how concentrated the data is around the line of best fit determined by the regression method. It is similar to the standard error of estimate in linear regression, but computed on the validation data.
#
# **Mean Absolute Percentage Error (MAPE)**: This measure gives the percentage score of how predictions deviate from the actual values on average.
#
# **Mean Percentage Error (MPE)**: This measure is similar to MAPE, but does not take the absolute value of the error.
# ## MLR Steps (Predictive Modeling)
# In Predictive MLR, the data are split into two sets or partitions: training and validation (also called holdout set). The training set is used to get predicted beta values and estimate the model, and the validation set is used to assess performance on new data. We first split the data into training and validation sets, using given percentages (eg: 60% and 40%). Then, we use the training set to fit a multiple linear regression model between the outcome variable and the other variables. Recall from Chapter 1 and earlier in this chapter that we create dummy variables for each predictor variable and drop 1 to avoid multicollinearity. We then apply the predicted beta values to the test set to get predicted y values, which are then compared to the actual y values to get error values and predictive accuracies. Lastly, we can use the predictive accuracy methods outlined above to choose the best model.
#
# Split data into training and validation sets
# Apply MLR to the training partition to get predicted 𝛽0 …, 𝛽p values, or
# The MLR algorithm automatically does this through minimizing the sum of squared residuals for the training data
# Apply predicted 𝛽0 ..., 𝛽p values to the test set to get predicted y values, or y
# Compare predicted y values to actual y values to get error values and predictive accuracy
# Compare models of different predictive accuracies to determine the best model
# ## MLR Example
# Let’s walk through an example of predicting numerical outcomes.
#
# You work at a Toyota dealership which buys back used cars from customers purchasing new cars. To make a profit, the dealership needs to predict the price that the used car will sell for.
# | Variable | Description |
# |------------|---------------------------------------|
# | Price | Offer price in Euros |
# | Age | Age in months as of August 2004 |
# | Kilometers | Accumulated Kilometers on odometer |
# | Fuel type | Fuel type (Petrol, Diesel, CNG) |
# | HP | Horsepower |
# | Metallic | Metallic color (Yes = 1, No= 0) |
# | Automatic | Automatic (Yes = 1, No = 0) |
# | CC | Cylinder volume in cubic centimeters |
# | Doors | Number of doors |
# | QuartTax | Quarterly road tax in Euros |
# | Weight | Weight in Kilograms |
# Example:
#
# In this example, a relationship between several variables and price is already given. Later in this chapter, we’ll explore how to get these coefficients through MLR.
#
# Price = 3 - 0.5 Age + 2 Automatic_binary + 3 PowerWindow_binary + 0.4 WarrantyLeft
#
# We can interpret these variables quantitatively and qualitatively. A quantitative interpretation of this equation would state a 1 unit increase in the age of a car reduces price by 0.5 units of price whereas a qualitative interpretation of this equation would simply state an increase in age will reduce the price of the car (without specifying numerical values.)
#
# We will use predictors (x1, ..., xp) to predict (instead of explain) outcome (y).
#
# For example:
# -Use house characteristics to predict selling price
# -Use advertising expenditures to predict sales
# -Use product demand to predict valuation
# 
# In[1]:
import pandas as pd
df = pd.read_csv("./data/ToyotaCorolla.csv")
# In[2]:
#Example
from sklearn.model_selection import train_test_split
X = df.drop('Price',axis = 1)
Y = df.Price
#Encoding variables
X = pd.get_dummies(X,drop_first = False)
#Partition
train_x,test_x, train_y, test_y = train_test_split(X,Y, test_size = 0.4, random_state = 2012)
# In[3]:
from sklearn.linear_model import LinearRegression
car_lm = LinearRegression()
car_lm.fit(train_x, train_y)
print('intercept ', car_lm.intercept_)
print( | pd.DataFrame({'Predictor': X.columns, 'coefficient': car_lm.coef_}) | pandas.DataFrame |
from pathlib import Path
import multiprocessing
import psutil
import logging
from itertools import combinations
import numpy as np
from scipy.stats import pearsonr
import pandas as pd
from skimage import io, exposure
from .util import natural_sort, bboxes_overlap, is_notebook
from .gauss import fit_gaussian_2D, fit_gaussian_1D
# Check for dask
try:
import dask.array as da
from dask_image.imread import imread
_has_dask = True
except ImportError:
logging.warn("Dask not installed. No support for large (> RAM) stacks.")
_has_dask = False
# Determine whether in notebook environment (for tqdm aesthetics)
if is_notebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
# Get CPU info
N_CORES = multiprocessing.cpu_count()
MEM_TOT = psutil.virtual_memory().total / 1e9
MEM_FREE = psutil.virtual_memory().free / 1e9
__all__ = ['load_stack',
'get_mip',
'get_min_masses',
'get_max_masses',
'detect_overlapping_features',
'detect_edge_features',
'extract_psfs',
'detect_outlier_psfs',
'localize_psf',
'localize_psfs',
'align_psfs',
'crop_psf',
'fit_features_in_stack',
'get_theta']
# TODO: LOGGING
def load_stack(file_pattern):
"""Loads image stack into dask array allowing manipulation
of large datasets.
Parameters
----------
file_pattern : list or str
Either a list of filenames or a string that is either
a) the individual filename of e.g. a tiff stack or
b) a directory from which all images will be loaded into the stack
Returns
-------
stack : dask array-like
Image stack as 32bit float with (0, 1) range in intensity
Examples
--------
* `file_pattern` is a list
>>> file_pattern = ['/path/to/data/image1.tif',
'/path/to/data/image2.tif',
'/path/to/data/image3.tif']
>>> get_stack(file_pattern)
* `file_pattern` is a directory
>>> file_pattern = '/path/to/data/'
>>> get_stack(file_pattern)
* `file_pattern is a tiff stack
>>> file_pattern = '/path/to/tiff/stack/multipage.tif'
>>> get_stack(file_pattern)
"""
# If a list of file names is provided
if isinstance(file_pattern, list):
logging.info("Creating stack from list of filenames.")
images = []
for i, fp in tqdm(enumerate(file_pattern),
total=len(file_pattern)):
logging.debug(f"Reading image file ({i+1}/{len(file_pattern)}) : {fp}")
image = io.imread(fp, plugin='pil')
images.append(image)
# Create 3D image stack (Length, Height, Width)
stack = np.stack(images, axis=0)
# If a directory or individual filename
elif isinstance(file_pattern, str):
# Directory
if Path(file_pattern).is_dir():
logging.info("Creating stack from directory.")
# Collect every png/tif/tiff image in directory
filepaths = list(Path(file_pattern).glob('*.png')) + \
list(Path(file_pattern).glob('*.tif')) + \
list(Path(file_pattern).glob('*.tiff'))
# Sort filepaths
filepaths = natural_sort([fp.as_posix() for fp in filepaths])
# Load images
images = []
for i, fp in tqdm(enumerate(filepaths),
total=len(filepaths)):
logging.debug(f"Reading image file ({i+1}/{len(filepaths)}) : {fp}")
image = io.imread(fp, plugin='pil')
images.append(image)
# Create 3D image stack (Length, Height, Width)
stack = np.stack(images, axis=0)
# Tiff stack or gif
elif (Path(file_pattern).suffix == '.tif') or \
(Path(file_pattern).suffix == '.tiff') or \
(Path(file_pattern).suffix == '.gif'):
logging.info("Creating stack from tiff stack")
# Create 3D image stack (Length, Height, Width)
stack = io.imread(file_pattern, plugin='pil')
# ?
else:
if Path(file_pattern).exists():
raise ValueError(f"Not sure what to do with `{file_pattern}`.")
else:
raise ValueError(f"`{file_pattern}` cannot be located or "
"does not exist.")
else:
raise TypeError("Must provide a directory, list of filenames, or the "
"filename of an image stack as either a <list> or <str>, "
f"not {type(file_pattern)}.")
# Return stack
logging.info(f"{stack.shape} image stack created succesfully.")
stack = exposure.rescale_intensity(stack, out_range=np.float32)
return stack
def get_mip(stack, normalize=True, log=False, clip_pct=0, axis=0):
"""Compute the maximum intensity projection along the given axis.
Parameters
----------
stack : array-like
3D image stack
normalize : bool (optional)
Whether to normalize the projection, also scales by 255
Default : True
log : bool (optional)
Whether to take the natural log
Default : False
clip_pct : scalar (optional)
% by which to clip the intensity
axis : int (optional)
Axis along which to compute the projection
0 --> z, 1 --> y, 2 --> x
Default : 0 (z)
Returns
-------
mip : MxN array
Maximum intensity projection image
"""
# Calculate the maximum projection image of the image stack
mip = np.max(stack, axis=axis)
# Take natural log
if log:
# Scaling factor (such that log(min) = 0
s = 1/mip[mip!=0].min()
# Funky out + where arguments to avoid /b0 error
mip = np.log(s*mip,
out=np.zeros_like(mip),
where=mip!=0)
# Normalize (rescale) the maximum intensity projection
if normalize or log or clip_pct: # automatically rescale if taking
# the log or if `clip_pct` provided
p1, p2 = np.percentile(mip, (clip_pct, 100-clip_pct))
mip = exposure.rescale_intensity(mip, in_range=(p1, p2), out_range=(0, 1))
return mip
def get_min_masses(mip, dx, n=6, b=5):
"""Infer range of candidate minimum masses.
Features returned by `trackpy.locate` are filtered by mass (essentially
a feature's total integrated brightness/intensity). It is important to
choose a reasonable lower bound for mass to filter out spurious bright
features (salt), smaller than the PSF, but it is difficult know what this
bound is a priori. So it is useful to sample a logarithmic range of
candidate lower bounds and choose a proper minimum mass based on visual
inspection.
Parameters
----------
mip : array-like
2D maximum intensity projection
dx : scalar
Expected feature diameter
A decent estimate is half the emissision wavelength divided by the NA
dx ~ λ/(2NA)
n : scalar (optional)
Number of candidate minimum masses to return
Default : 6
b : scalar (optional)
Scaling factor to broaden or shrink the range of masses
Default : 5
Returns
-------
min_masses : array-like
1D array of candidate minimum masses (length n)
Examples
--------
>>> image = generate_image(nx=300, ny=300, N_features=20, seed=37)
>>> get_min_masses(image, dx=9)
array([ 12.21489226, 23.25292776, 44.26552752,
84.26624581, 160.41377073, 305.37230648])
"""
# Estimate peak intensity of a typical *single* PSF
peak = np.percentile(mip, 99.9)
# "Integrate" intensity over a typical PSF
min_mass_0 = np.pi * (dx/2)**2 * peak
# Set logarithmic range of candidate minimum masses
min_masses = np.logspace(np.log10(min_mass_0/b),
np.log10(min_mass_0*b), n)
return min_masses
def get_max_masses(min_mass, n=6, b=5):
"""Infer range of candidate maximum masses.
Follows from `get_min_masses`, but for (surprise!) maximum mass filtering.
Ranges from (min_mass, b*min_mass)
Parameters
----------
mip : array-like
2D maximum intensity projection
min_mass : scalar
Minimum mass
n : scalar (optional)
Number of candidate maximum masses to return
Default : 6
b : scalar (optional)
Scaling factor to broaden or shrink the range of masses
Default : 5
Returns
-------
max_masses : array-like
1D array of candidate maximum masses (length n)
"""
# Set logarithmic range of candidate maximum masses
max_masses = np.logspace(np.log10(min_mass),
np.log10(min_mass*b), n)
return max_masses
def detect_overlapping_features(features, wx, wy=None):
"""Detects overlapping features from feature set.
Parameters
----------
features : `pd.DataFrame`
Feature set returned from `trackpy.locate`
wx, wy : scalar
Dimensions of bounding boxes
Returns
-------
overlapping : array-like
Indices of overlapping features (to be discarded)
Notes
-----
* Utilizes cell listing approach for huge speed increases over brute-force.
"""
# Set wy if not provided
wy = wx if wy is None else wy # (assumes a square box)
# Create a bounding box for each bead
df_bboxes = features.loc[:, ['x', 'y']]
df_bboxes['x_min'] = features['x'] - wx/2
df_bboxes['y_min'] = features['y'] - wy/2
df_bboxes['x_max'] = features['x'] + wx/2
df_bboxes['y_max'] = features['y'] + wy/2
# Keep track of overlapping features
overlapping = []
# Define cell parameters
cw = 2*wx # cell width
# Alias for features
X = features['x'].values
Y = features['y'].values
# Loop through a grid in x, y to create cells
Nx = X.max() + cw
Ny = Y.max() + cw
for x in tqdm(np.arange(0, Nx, cw)):
for y in np.arange(0, Ny, cw):
# Create cell
cell = [x-cw, y-cw, x+2*cw, y+2*cw]
# Get features in cell
in_cell = df_bboxes[((cell[0] < X) & (X < cell[2]) &\
(cell[1] < Y) & (Y < cell[3]))]
# Combinations
pairs = list(combinations(in_cell.reset_index().values, 2))
# Loop through pairs of bboxes
for (bbox_i, bbox_j) in pairs:
if bboxes_overlap(bbox_i[-4:], bbox_j[-4:]):
overlapping.append(bbox_i[0])
overlapping.append(bbox_j[0])
# Deduplicate indices
overlapping = np.unique(overlapping)
return overlapping
def detect_edge_features(features, Dx, Dy, wx, wy=None):
"""Detects edge features from feature set.
Parameters
----------
features : `pd.DataFrame`
Feature set returned from `trackpy.locate`
Dx, Dy : scalar
Dimensions of stack
wx, wy : scalar
Dimensions of bounding boxes
Returns
-------
edges : array-like
Indices of edge features (to be discarded)
"""
# Set wy if not provided
wy = wx if wy is None else wy # (assumes a square box)
# Create a bounding box for each bead
df_bboxes = features.loc[:, ['x', 'y']]
df_bboxes['x_min'] = features['x'] - wx/2
df_bboxes['y_min'] = features['y'] - wy/2
df_bboxes['x_max'] = features['x'] + wx/2
df_bboxes['y_max'] = features['y'] + wy/2
# Check boundaries
edges = features.loc[(df_bboxes['x_min'] < 0) |\
(df_bboxes['y_min'] < 0) |\
(df_bboxes['x_max'] > Dx) |\
(df_bboxes['y_max'] > Dy)].index.values
return edges
def extract_psfs(stack, features, shape, return_indices=False):
"""Extract the PSF (aka subvolume) from each detected feature while
simultaneously filtering out edge features.
Parameters
----------
stack : array-like
3D image stack
features : `pd.DataFrame`
DataFrame of detected features
shape : array-like or 3-tuple
The dimensions of the PSF to be extracted (wz, wy, wx)
return_features : bool
Whether to return updated feature set
Returns
-------
psfs : list
List of all the PSFs as numpy arrays
features : `pd.DataFrame` (optional)
DataFrame of features with edge features removed
Notes
-----
* A feature is considered to be an edge feature if the volume of the
extracted PSF extends outside the image stack in x or y
"""
# Unpack PSF shape
wz, wy, wx = shape
# Round up to nearest odd integer --> results in all extracted PSFs
# having the same shape
wz, wy, wx = np.ceil([wz, wy, wx]).astype(int) // 2 * 2 + 1
# Iterate through features
psfs = [] # collect PSFs
edge_features = [] # collect indices of edge features
for i, row in features.iterrows():
# Set z indices
if stack.shape[0] < wz: # image stack height < wz
# Take full image stack in z
z1, z2 = 0, stack.shape[0]
else:
# Place the subvolume at halfway in z
z1, z2 = (int(stack.shape[0]/2 - wz/2),
int(stack.shape[0]/2 + wz/2))
# Get x, y position of feature
x, y = row[['x', 'y']]
# Set y indices
if stack.shape[1] < wy: # image stack y width < wy
# Take full image stack in y
y1, y2 = 0, stack.shape[1]
else:
# Center the subvolume in y
y1, y2 = (int(y - wy/2),
int(y + wy/2))
# Set x indices
if stack.shape[2] < wx: # image stack x width < wx
# Take full image stack x
x1, x2 = 0, stack.shape[2]
else:
# Center the subvolume in x
x1, x2 = (int(x - wx/2),
int(x + wx/2))
# Determine if feature is along the edge of the image stack
if (x1 < 0) or (y1 < 0) or (x2 > stack.shape[2]) or (y2 > stack.shape[1]):
edge_features.append(i)
# Extract PSF
else:
psf = stack[z1:z2, y1:y2, x1:x2]
psfs.append(psf)
# Basically donezo
if not return_indices:
return psfs
# Return edge features
return psfs, edge_features
def detect_outlier_psfs(psfs, pcc_min=0.9, return_pccs=False):
"""Detect outlier PSFs based on the Pearson correlation coefficient (PCC).
Parameters
----------
psfs : list
List of PSFs
pcc_min : scalar
PCC threshold to determine suspicious (potential outlier) PSFs
Returns
-------
outliers : list
Indices of detected outlier PSFs
"""
# Collect PCCs
pccs = []
# Iterate through every (unique) pair of PSFs
ij = list(combinations(range(len(psfs)), 2))
for i, j in tqdm(ij, total=len(ij)):
# Get pairs of PSFs
mip_i = np.max(psfs[i], axis=0)
mip_j = np.max(psfs[j], axis=0)
# Calculate PCC of maximum intensity projections
pcc, _ = pearsonr(mip_i.ravel(),
mip_j.ravel())
pccs.append(pcc)
# Convert to array
pccs = np.array(pccs)
# Get indices of candidate outliers
suspects_i = np.argwhere(pccs < pcc_min)
# If no suspects exist
if suspects_i.size == 0:
outliers = np.array([])
else:
# Convert to indices of PSF pairs
suspects_ij = np.array(ij)[suspects_i[:, 0]]
# Determine frequency of out lying (?)
i, counts = np.unique(suspects_ij, return_counts=True)
outliers = i[counts > 3*counts.mean()]
if return_pccs:
return outliers, pccs
return outliers
def localize_psf(psf, integrate=False):
"""Localize a given PSF in the stack.
Parameters
----------
psf : array-like
3D array of PSF subvolume
integrate : bool
Whether to integrate the PSF over x and y before doing 1D fit.
Alternative is to take a slice in z at (x0, y0), the position
found from the 2D fit.
Returns
-------
x0, y0, z0 : scalars
Position data from Gaussian fit
sigma_x, sigma_y, sigma_z : scalars
Standard deviations from Gaussian fit
"""
# Take maximum intensity projection
mip = np.max(psf, axis=0)
# 2D Fit
x0, y0, sigma_x, sigma_y, A, B = fit_gaussian_2D(mip)
# 1D Fit
# TODO: seems like slice is better but not totally convinced
if integrate:
# Integrate over x and y
z_sum = psf.sum(axis=(1, 2))
z0, sigma_z, A, B = fit_gaussian_1D(z_sum)
else:
# Slice in through x0, y0
z_slice = psf[:,int(y0), int(x0)]
z0, sigma_z, A, B = fit_gaussian_1D(z_slice)
return (x0, y0, z0, sigma_x, sigma_y, sigma_z)
def localize_psfs(psfs, integrate=False):
"""Localize all PSFs in stack.
Parameters
----------
psfs : list or array-like
List of PSFs
integrate : bool
Whether to integrate the PSF over x and y before doing 1D fit.
Returns
-------
df : `pd.DataFrame`
DataFrame of the fit parameters from each PSF
"""
# Initialize DataFrame
cols = ['x0', 'y0', 'z0', 'sigma_x', 'sigma_y', 'sigma_z']
df = | pd.DataFrame(columns=cols) | pandas.DataFrame |
# encoding:utf-8
__author__ = 'shiliang'
__date__ = '2019/4/9 21:12'
import requests
from lxml import etree
from tqdm import tqdm
import pandas as pd
import xlrd
import time
import re
import multiprocessing
from multiprocessing import Pool
# 全局变量
# headers = {
# 'Cookie': 'OCSSID=sfg10a19had6hfavkctd32otf6',
# # 'Cookie': 'v=8ULm8Hr%<k:l5rrfgm?a; Qs_lvt…ts=PUbGvneL,qrbGvneL,L6cGvneL',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
# '(KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
# # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
# }
oneSheetList = [] # 一张整表的元数据列表
def getURLlist(openFile, sheetNum, colx, start_rowx):
'''
获取excel指定sheet指定列指定范围行的数据
:param openFile: excel路径
:param sheetNum: sheet序号
:param colx: 获取指定列数
:param start_rowx: 起始行数
:return urls: 一个包含所有URL的列表
'''
# 打开工作簿
data = xlrd.open_workbook(openFile)
# 选定sheet
table = data.sheets()[sheetNum]
# 获取excel表的指定列,start_rowx=1表示从第2行开始(从0计数)
urls = table.col_values(colx=colx, start_rowx=start_rowx)
return urls
def cleanStr(string):
'''
清洗字符串,如去除HTML标签等
:param string: 待清洗字符串
:return clean_str: 清洗后的较干净字符串
'''
# 1.使用strip函数清洗去除每个字符串的首尾\n和\t,此处也可写成\t\n
clean_str = string.strip('\n\t')
# 2.使用正则清洗掉HTML标签<>、/**/中的内容
clean_str = re.compile(r'\<.*?\>|\/\*.*?\*\/').sub('', clean_str)
return clean_str
def getOneURLMetaData(number, url):
'''
使用多进程获取多个URL的元数据
:param url: url链接(字符串类型)
:return oneURLList: 一个URL的所有元数据的列表
'''
resp = requests.get(url)
html = resp.content
# 解析数据
xml = etree.HTML(html)
content = xml.xpath('//*[@id="content"]/table/tr/td/text()')
# 格式化数据
oneURLList = [] # 存储一个URL包含所有元数据的列表
creatorList = [] # 当前URL所有作者、机构和国家信息列表,默认为'0'
title = '' # 当前URL标题
abstract = '' # 当前URL摘要
keywords = '0' # 当前URL关键字
for index, text in enumerate(content):
# '\xa0'是一行的首元素和尾元素,表示一行的开头或结尾
if text == '\xa0':
# 1.判断是否是'Title'行
if content[index+2] == 'Title':
title = content[index + 4] # 保存Title
title = cleanStr(title) # 清洗title
continue
if content[index+3] == 'Abstract':
abstract = content[index + 4] # 保存Abstract
continue
if content[index+3] == 'Keyword(s)':
# 如果Keyword(s)不为空则填,为空则默认字符'0'
if content[index+4] != '\xa0':
keywords = content[index + 4] # 保存Keyword(s)
keywords = cleanStr(keywords) # 清洗keywords
continue
if content[index+2] == 'Creator':
clean_creator = cleanStr(content[index + 4])
lst = clean_creator.split('; ') # 使用切片函数以"; "把字符串分割成三份,返回一个列表
for num, info in enumerate(lst): # 因存在官网元数据少录入情况,故对于少录入数据项默认为'0'
# 如果是官网数据录入错误,超过三个数据,则直接跳出循环
if num > 2:
break
creatorList.append(info) # 作者名字、机构、国家
# 如果是官网数据录入错误, 少于三个数据, 则最最后一个元素补'0'
if len(lst) < 3 and num == 1:
creatorList.append('0') # 作者名字、机构、国家
continue
oneURLList.append(number)
oneURLList.append(title)
oneURLList.append(abstract)
oneURLList.append(keywords)
oneURLList.append(creatorList)
# 存储oneURLList的最后一个元素
creatorList = oneURLList[-1]
# 删除oneURLList的最后一个元素,切片取列表中的[0,-1)
oneURLList = oneURLList[:-1]
# 将creator列表拆开一个一个添加到oneURLList后
for info in creatorList:
oneURLList.append(info)
print('已完成第' + str(number) + '个url的爬取+解析+清洗')
return oneURLList
def getMetaData(urls, topCount=None):
'''
获取所有url的元数据列表,返回一个列表
:param urls: URL列表
:param topCount: URL列表中前几个URL的个数(可选参数)
'''
urlsList = []
if topCount is None:
urlsList = urls
else:
for i in range(topCount):
urlsList.append(urls[i])
print('当前环境CPU核数是:%d核' % multiprocessing.cpu_count())
p = Pool(10) # 进程池
for number, url in enumerate(urlsList):
oneSheetList.append(p.apply_async(getOneURLMetaData, args=(number+1, url)))
p.close()
p.join() # 运行完所有子进程才能顺序运行后续程序
for index, result in enumerate(oneSheetList):
oneSheetList[index] = result.get()
def list2excel(saveFile, oneSheetList, startrow, startcol=2, sheet_name='Sheet1'):
'''
列表写入到excel中的指定行和指定列中
:param saveFile: 存储excel文件路径
:param oneSheetList: 一个存储一个Sheet元数据的列表
:param startrow: 该url位于excel表格中的行数
:param startcol: 写入excel表格中的起始列
:param sheet_name: 写入的sheet页名称
:return:
'''
df = | pd.DataFrame(oneSheetList) | pandas.DataFrame |
# import sys
# sys.path.append('JEMIPYC')
# from array_check_function_global import df,dfn,dfv,dfx,dfnx,dfvx
import pandas as pd
import numpy as np
tab = '__'
# no-extension , number of parameters is not limited, 2 or 3, whatever you want.
# ex) df(A,B,C,D,...,Z...)
# of course you just put one parameter.
def df(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
blank = pd.DataFrame(blank,columns=[tab])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = xx
else:
df_concat = pd.concat([df_concat,blank,xx], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfn(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
tabn = '{'+str(i+1)+'}'
blank = pd.DataFrame(blank,columns=[tabn])
xx = | pd.DataFrame(x[i]) | pandas.DataFrame |
import anndata as ad
import scanpy as sc
import torch
import random
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from evaluation.eval import evaluate
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def set_seed(seed):
torch.random.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def scores_mean_std(all_scores):
mean_scores = pd.DataFrame(all_scores).mean()
mean_scores = pd.Series(mean_scores.values, index=mean_scores.index.map(lambda x: x + "_mean"))
std_scores = pd.DataFrame(all_scores).std()
std_scores = pd.Series(std_scores.values, index=std_scores.index.map(lambda x: x + "_std"))
all_scores = | pd.concat((mean_scores, std_scores), axis=0) | pandas.concat |
import os
import re
import sys
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.gridspec as gridspec
from itertools import permutations, product
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
import seaborn as sns
from scipy.optimize import curve_fit
import utils
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
DATA_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Data_for_figs/'
FIGS_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Figs/'
REAL_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Processed/Real'
BIASES = ['none', 'S#1_n1', 'S#1_n2',#'',
'distI_n1', 'distI_n2', 'distI_n3', 'distW',#'',
'distI_n1_S#1_n1', 'distI_n1_S#1_n2', 'distI_n2_S#1_n1', 'distI_n2_S#1_n2',
'distW_S#1_n1', 'distW_S#1_n2', 'distW_S#2_n2', 'distW_S#2_n3',
'hs_n1_w05', 'hs_n1_w10', 'hs_n1_w15', 'hs_n1_w20',
'hs_n2_w05', 'hs_n2_w10', 'hs_n2_w15', 'hs_n2_w20',
'hs_n3_w05', 'hs_n3_w10', 'hs_n3_w15', 'hs_n3_w20',
'hs_r3_w05', 'hs_r3_w10', 'hs_r3_w15', 'hs_r3_w20'] + \
[f"im5_r{r:3.1f}_w{w:02d}" for r in [0, 0.5, 1, 2] for w in [5,10,15,20]] + \
[f"Nim5_r0.0_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n1_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n2_w{w:02d}" for w in [10,20]] + \
[f"Nhs_n3_w{w:02d}" for w in [10,20]]
BIAS_GROUPS = ['none', 'S#1', 'HS',
'distW', 'distW_S#1', 'distW_S#2',
'distI', 'distI_S#1']
BIAS_GROUPS = ['none', 'HS',
'S#1', 'distW',
'distW_S#1', 'distW_S#2',
'distI', 'distI_S#1', 'im5', 'AHS']
groups = ['none'] + ['S#1']*2 + ['distI']*3 + ['distW'] + ['distI_S#1']*4 + \
['distW_S#1']*2 + ['distW_S#2']*2 + ['HS']*12 + ['im5']*24 + ['HS']*8
BIAS_KEY = {BIASES[i]:groups[i] for i in range(len(BIASES))}
def plot_MC_dist(fName, X='pair_ints', out=False, f=False, cum=False):
df = pd.read_feather(fName)
if f:
sns.distplot(df[X], bins=100)
else:
if cum:
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100, hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
else:
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100)
if out:
return df
def plot_MC_kde(fName, X='pair_ints', out=False, f=False, ax='None'):
df = pd.read_feather(fName)
if f:
sns.kdeplot(df[X])
else:
sns.kdeplot(utils.extract_floats_from_string(df[X]))
if out:
return df
def rename_biases(df):
df.loc[df.bias=='distI_1_0', 'bias'] = 'distI_n1'
df.loc[df.bias=='distI_2_0', 'bias'] = 'distI_n2'
df.loc[df.bias=='distI_3_0', 'bias'] = 'distI_n3'
df.loc[df.bias=='distI_0_1', 'bias'] = 'S#1_n1'
df.loc[df.bias=='distI_0_2', 'bias'] = 'S#1_n2'
df.loc[df.bias=='distI_1_1', 'bias'] = 'distI_n1_S#1_n1'
df.loc[df.bias=='distI_1_2', 'bias'] = 'distI_n1_S#1_n2'
df.loc[df.bias=='distI_2_1', 'bias'] = 'distI_n2_S#1_n1'
df.loc[df.bias=='distI_2_2', 'bias'] = 'distI_n2_S#1_n2'
df.loc[df.bias=='opt_c', 'bias'] = 'distW'
df.loc[df.bias=='opt_c_I1', 'bias'] = 'distW_S#1_n1'
df.loc[df.bias=='opt_c_I2', 'bias'] = 'distW_S#1_n2'
df.loc[df.bias=='opt_c_s2', 'bias'] = 'distW_S#2_n2'
df.loc[df.bias=='opt_c_s3', 'bias'] = 'distW_S#2_n3'
return df
def rename_bias_groups(df):
df.loc[df.bias_group=='distI+small', 'bias_group'] = 'distI_S#1'
df.loc[df.bias_group=='distW+I', 'bias_group'] = 'distW_S#1'
df.loc[df.bias_group=='distW+S', 'bias_group'] = 'distW_S#2'
df.loc[df.bias_group=='small', 'bias_group'] = 'S#1'
df.loc[df.bias_group=='hs', 'bias_group'] = 'HS'
return df
def plot_violin(df, cat='pair_ints', X='bias_group', Y='JSD', kind='violin'):
df = rename_bias_groups(df)
violin_order = get_violin_order(df, X, Y)
sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind=kind, order=violin_order)
# sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind='violin', order=[0.0, 50.0, 60., 70., 80., 90., 100.])
# sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind='boxen', order=[0.0, 50.0, 60., 70., 80., 90., 100.])
# sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind='boxen', order=[400., 450., 500., 550., 1200.])
def get_violin_order(df, X, Y):
groups = np.array(df[X].unique())
min_J = [df.loc[(df[X]==g)&(df.cat=='pair_ints'),Y].min() for g in groups]
if 'fr' in Y:
violin_order = groups[np.argsort(min_J)[::-1]]
else:
violin_order = groups[np.argsort(min_J)]
return violin_order
def df_distplot_with_constraints(df, bias, MI, MA, q, cat='pair_ints', ret=0):
if 'hs' in bias:
cut = df.loc[(df.min_int>MI)&(df.max_int<MA), bias].quantile(1.-q)
print(cut)
tmp_df = df.loc[(df[bias]>cut)&(df.min_int>MI)&(df.max_int<MA)]
sns.distplot(utils.extract_floats_from_string(tmp_df.loc[:,cat]), bins=100, label=bias)
else:
cut = df.loc[(df.min_int>MI)&(df.max_int<MA), bias].quantile(q)
tmp_df = df.loc[(df[bias]<cut)&(df.min_int>MI)&(df.max_int<MA)]
sns.distplot(utils.extract_floats_from_string(tmp_df.loc[:,cat]), bins=100, label=bias)
plt.legend(loc='best')
if ret:
return tmp_df
def get_files_and_labels_from_idx(df, idx, kde=True, hist=False):
fNames = []
labels = []
for i in idx:
if kde:
fNames.append(df.loc[i, 'kde_path'])
labels.append("kde: int=[{0[0]}-{0[1]}]; beta={0[2]}".format(df.loc[i, ['min_int', 'max_int', 'beta']]))
if hist:
fNames.append(df.loc[i, 'hist_path'])
labels.append("hist: int=[{0[0]}-{0[1]}]; beta={0[2]}".format(df.loc[i, ['min_int', 'max_int', 'beta']]))
return fNames, labels
def plot_harmonic_similarity_distributions(df_grid, df_real, cat='Continent', leg=True, n=5, m=1):
fig, ax = plt.subplots(4,1)
ax = ax.reshape(ax.size)
# fig, ax = plt.subplots(1,1)
# ax = [ax]
# plt.subplots_adjust(hspace=0.8)
for i, lbl in enumerate([f'hs_n{m}_w{x:02d}' for x in range(5,25,5)]):
# for i, lbl in enumerate([f'hs_n{m}_w{x:02d}' for x in range(10,15,5)]):
sns.distplot(df_grid[lbl], label='no_constraint', ax=ax[i], color='k')
for c in df_real[cat].unique():
sns.kdeplot(df_real.loc[(df_real[cat]==c)&(df_real.n_notes==n), lbl], ax=ax[i], label=c)
if leg and not i:
# ax[i].legend(loc='best', frameon=False)
# ax[i].legend(bbox_to_anchor=(0.39, 1.3), frameon=False, ncol=7)
handles, labels = ax[i].get_legend_handles_labels()
ax[i].get_legend().set_visible(False)
else:
ax[i].get_legend().set_visible(False)
ax[i].set_ylabel('Prob')
if leg:
fig.legend(handles, labels, loc='upper center', frameon=False, ncol=4)
fig.savefig(f"{FIGS_DIR}/harm_sim_dist_notes{n}_ver{m}.png")
fig.savefig(f"{FIGS_DIR}/harm_sim_dist_notes{n}_ver{m}.pdf")
def plot_dists_by_npy_file(files, labels, real=True, kde=True, hist=False, n=7):
fig, ax = plt.subplots()
if hist or sum([1 for f in files if 'hist' in f]):
ax2 = ax.twinx()
if real:
if kde:
data = np.load(os.path.join(REAL_DIR, f"n_{n}_kde.npy"))
ax.plot(data[:,0], data[:,1], label='real_kde')
if hist:
data = np.load(os.path.join(REAL_DIR, f"n_{n}_hist.npy"))
ax2.plot(data[:,0], data[:,1], label='real_hist')
for i, f in enumerate(files):
data = np.load(f)
if 'hist' in f:
ax2.plot(data[:,0], data[:,1], label=labels[i])
else:
ax.plot(data[:,0], data[:,1], label=labels[i])
ax.legend(loc='best')
plt.show()
def set_xticks(ax, xMaj, xMin, xForm):
ax.xaxis.set_major_locator(MultipleLocator(xMaj))
ax.xaxis.set_major_formatter(FormatStrFormatter(xForm))
ax.xaxis.set_minor_locator(MultipleLocator(xMin))
def plot_dist_by_cat(df, X='scale', cat='Continent', lim=(-5,1250), bins=120):
uni_cat = np.array(sorted(df.loc[:,cat].unique()))
if cat=='n_notes':
uni_cat = np.array([4,5,6,7,8,9])
n_cat = uni_cat.size
if n_cat <=6:
fig, ax = plt.subplots(3,2, sharex=True)
elif n_cat <=12:
fig, ax = plt.subplots(4,3, sharex=True)
else:
print(n_cat, ' too large')
fig2, ax2 = plt.subplots()
ax = ax.reshape(ax.size)
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
if not isinstance(df.loc[idx[0],X], str):#
Xarr = df.loc[idx,X]
Xarr2 = [a for a in df.loc[idx,X] if 0<a<1200]
else:
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
Xarr2 = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a if 0<b<1200]
sns.distplot(Xarr, bins=bins, label=str(uni), ax=ax[i])
sns.kdeplot(Xarr2, label=str(uni), ax=ax2)
ax[i].legend(loc='best')
ax2.legend(loc='best')
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax[-1].set_xlim(lim)
# ax[-1].xaxis.set_ticks(np.arange(0,1300,100))
ax2.set_xlim(lim)
# ax2.xaxis.set_ticks(np.arange(0,1300,100))
plt.show()
def plot_score_histograms(df):
fig, ax = plt.subplots()
uni_cat = np.array([4,5,6,7,8,9])
for n in uni_cat:
sns.distplot(df.loc[df.n_notes==n, 'harm_sim'], label=str(n), kde=True, bins=40, ax=ax)
ax.legend(loc='best')
plt.show()
# This was used for creating a figure for my CSLM seminar
def plot_similar_cultures(df, X='scale', cat='Continent', lim=(-5,1250)):
groups = [ ['Western', 'East Asia', 'South Asia'],
['Western', 'Oceania'],
['Western', 'South America'],
['South East Asia', 'Africa'],
['Western', 'Middle East']]
fig, ax = plt.subplots(3,2, sharex=True)
plt.subplots_adjust(wspace=0.3, hspace=0.2)
ax = ax.reshape(ax.size)
extra_ax = []
for i, group in enumerate(groups):
# idx = df.loc[df.loc[:,cat].apply(lambda x: x in uni),:].index
for j, uni in enumerate(group):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, ax=ax[i], label=str(uni), kde=False, norm_hist=True)
ax[i].legend(loc='best', frameon=False)
ax[0].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[4].set_ylabel('Probability')
ax[4].set_xlabel('Notes in scale (cents)')
ax[5].set_xlabel('Notes in scale (cents)')
# ticks = np.arange(0, (int(lim[1]/100)+1)*100, 400)
# ax[-1].xaxis.set_ticks(ticks)
ax[-1].set_xlim(lim)
set_xticks(ax[-1], 200, 100, '%d')
# plt.savefig('Figs/culture_scale_comparison.png')
plt.show()
# This was used for creating a figure for my paper
def plot_similar_cultures_2(df, X='scale', cat='Continent', lim=(-5,1250)):
groups = [ [], ['Western', 'East Asia', 'South Asia', 'Middle East'],
['Oceania', 'South America', 'South East Asia', 'Africa']]
fig, ax = plt.subplots(3,1, sharex=True)
fig2, ax2 = plt.subplots(8,1, sharex=True)
plt.subplots_adjust(wspace=0.3, hspace=0.2)
ax = ax.reshape(ax.size)
ax2 = ax2.reshape(ax2.size)
extra_ax = []
lbls = ['All', 'Theory', 'Instrument']
cols = sns.color_palette('colorblind')
for i, group in enumerate(groups):
# idx = df.loc[df.loc[:,cat].apply(lambda x: x in uni),:].index
if i:
for j, uni in enumerate(group):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, ax=ax2[j+(i-1)*4], label=f"{str(uni):15s} N={len(idx)}", kde=False, norm_hist=True, color=cols[j+(i-1)*4])
sns.kdeplot(Xarr, ax=ax[i], label=f"{str(uni):15s} N={len(idx)}", clip=(5, 1150), color=cols[j+(i-1)*4])
ax2[j+(i-1)*4].legend(loc='upper right', frameon=False)
else:
for j, g in enumerate(groups[:]):
if j:
idx = df.loc[df.loc[:,cat].apply(lambda x: x in g),:].index
else:
idx = df.index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
# sns.distplot(Xarr, bins=120, ax=ax[i], label=lbls[j], kde=False, norm_hist=True)
sns.kdeplot(Xarr, ax=ax[i], label=f"{lbls[j]:15s} N={len(idx)}", clip=(5, 1150))
ax[i].legend(loc='best', frameon=False)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Intervals size (cents)')
# ax[5].set_xlabel('Notes in scale (cents)')
# ticks = np.arange(0, (int(lim[1]/100)+1)*100, 400)
# ax[-1].xaxis.set_ticks(ticks)
ax[-1].set_xlim(lim)
set_xticks(ax[-1], 200, 100, '%d')
# plt.savefig('Figs/culture_scale_comparison.png')
fig.savefig(os.path.join(FIGS_DIR, 'database_intervals_kde.png'))
fig.savefig(os.path.join(FIGS_DIR, 'database_intervals_kde.pdf'))
fig2.savefig(os.path.join(FIGS_DIR, 'database_intervals_hist.png'))
fig2.savefig(os.path.join(FIGS_DIR, 'database_intervals_hist.pdf'))
plt.show()
# This was used for creating a figure for my CSLM seminar
def plot_comparison_ints_by_n(df, X='pair_ints', cat='n_notes', lim=(-5, 605)):
uni_cat = np.array([4,5,6,7,8,9])
fig2, ax2 = plt.subplots(3,2, sharex=True)
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax2 = ax2.reshape(ax2.size)[[0,2,4,1,3,5]]
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax2[-1].xaxis.set_ticks(ticks)
ax2[-1].set_xlim(lim)
ax2[0].set_ylabel('Probability')
ax2[1].set_ylabel('Probability')
ax2[2].set_ylabel('Probability')
ax2[2].set_xlabel('Interval size (cents)')
ax2[5].set_xlabel('Interval size (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax2[i])
ax2[i].set_title("N = {0}".format(uni))
ax2[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_set_intervals.png')
plt.show()
fig, ax = plt.subplots(3,2, sharex=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax[-1].set_xlim(lim)
ax[-1].xaxis.set_ticks(ticks)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Interval size (cents)')
ax[5].set_xlabel('Interval size (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
# sns.kdeplot(Xarr, label="my dataset", ax=ax[i])
sns.kdeplot(Xarr, ax=ax[i])
ax[i].set_title("N = {0}".format(uni))
# ax[5].legend(loc='upper right', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/unrestricted_ints_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="any intervals")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_comparison_1.png')
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/restricted_ints_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="constrained")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_comparison_2.png')
plt.show()
# This was used for creating a figure for my CSLM seminar
def plot_comparison_ints_by_n_bias(df, X='pair_ints', cat='n_notes', lim=(-5, 605)):
uni_cat = np.array([4,5,6,7,8,9])
fig, ax = plt.subplots(3,2, sharex=True, sharey=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax[i])
ax[i].set_title("N = {0}".format(uni))
ax[5].legend(loc='best', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/biased_ints_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="bias model")
ax[5].legend(loc='best', frameon=False)
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Interval size (cents)')
ax[5].set_xlabel('Interval size (cents)')
ax[-1].set_xlim(lim)
ax[-1].set_ylim(0,0.015)
# ax[-1].xaxis.set_ticks(np.arange(0,1300,100))
plt.savefig('Figs/data_model_comparison_3.png')
plt.show()
def plot_comparison_scale_by_n(df, X='scale', cat='n_notes', lim=(-5,1250)):
uni_cat = np.array([4,5,6,7,8,9])
fig2, ax2 = plt.subplots(3,2, sharex=True)
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax2 = ax2.reshape(ax2.size)[[0,2,4,1,3,5]]
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax2[-1].xaxis.set_ticks(ticks)
ax2[-1].set_xlim(lim)
ax2[0].set_ylabel('Probability')
ax2[1].set_ylabel('Probability')
ax2[2].set_ylabel('Probability')
ax2[2].set_xlabel('Notes in scale (cents)')
ax2[5].set_xlabel('Notes in scale (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax2[i])
ax2[i].set_title("N = {0}".format(uni))
ax2[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_set_intervals.png')
plt.show()
fig, ax = plt.subplots(3,2, sharex=True, sharey=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax[-1].set_xlim(lim)
ax[-1].xaxis.set_ticks(ticks)
ax[-1].set_ylim(0,0.005)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Notes in scale (cents)')
ax[5].set_xlabel('Notes in scale (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
# sns.kdeplot(Xarr, label="my dataset", ax=ax[i])
sns.distplot(Xarr, ax=ax[i], bins=120)
ax[i].set_title("N = {0}".format(uni))
# ax[5].legend(loc='upper right', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/unrestricted_scale_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="any intervals")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_scale_comparison_1.png')
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/restricted_scale_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="constrained")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_scale_comparison_2.png')
plt.show()
def plot_comparison_scale_by_n_bias(df, X='scale', cat='n_notes', lim=(-5,1250)):
uni_cat = np.array([4,5,6,7,8,9])
fig, ax = plt.subplots(3,2, sharex=True, sharey=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax[i])
ax[i].set_title("N = {0}".format(uni))
ax[5].legend(loc='best', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/biased_scale_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="bias model")
ax[5].legend(loc='best', frameon=False)
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Notes in scale (cents)')
ax[5].set_xlabel('Notes in scale (cents)')
ax[-1].set_xlim(lim)
ax[-1].set_ylim(0,0.005)
# ax[-1].xaxis.set_ticks(np.arange(0,1300,100))
plt.savefig('Figs/data_model_scale_comparison_3.png')
plt.show()
def subplot_x_y(n):
if n == 1:
return [1]*2
elif n**0.5 == int(n**0.5):
return [int(n**0.5)]*2
else:
x = int(n**0.5)
y = x + 1
switch = 0
while n > x*y:
if switch:
x += 1
switch = 0
else:
y += 1
switch = 1
return y, x
def plot_best_pair_dist(df_best, df_m, df_real, X='pair_ints', n=7):
sub_y, sub_x = subplot_x_y(len(df_best))
fig, ax = plt.subplots(sub_y, sub_x, sharex=True, sharey=True)
try:
ax = ax.reshape(ax.size)
except:
ax = [ax]
for i in range(len(ax)):
sns.distplot(utils.extract_floats_from_string(df_real.loc[df_real.n_notes==n, X]), bins=100, ax=ax[i])
df = pd.read_feather(df_m.loc[df_best.loc[i, f"idx_{n}"], 'fName'] )
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100, ax=ax[i])
# ax[i].set_title(df_best.loc[i, 'bias'])
def simple_fit(X, Y, fit_fn='None'):
min_idx = np.argmin(X)
max_idx = np.argmax(X)
dX = X[max_idx] - X[min_idx]
dY = Y[max_idx] - Y[min_idx]
if fit_fn == 'None':
fit_fn = lambda x, m, a: m*x + a
popt, pcov = curve_fit(fit_fn, X, Y, p0=[dY/dX, Y[max_idx]])
else:
popt, pcov = curve_fit(fit_fn, X, Y, p0=[dY**2/dX**2, dY/dX, Y[max_idx]])
xnew = np.linspace(X[min_idx], X[max_idx], 10)
ynew = fit_fn(xnew, *popt)
return xnew, ynew, popt
def plot_JSD_vs_scales(df, X='JSD', Y='fr_20', bias_group='HS', n=5, fit=False):
df = rename_bias_groups(df)
df = rename_biases(df)
biases = [b for b in BIASES if BIAS_KEY[b]==bias_group]
sub_y, sub_x = subplot_x_y(len(biases))
sub_y, sub_x = 6,4
fig, ax = plt.subplots(sub_y, sub_x, sharex=True, sharey=True)
try:
ax = ax.reshape(ax.size)
except:
ax = [ax]
for i, bias in enumerate(biases):
if not len(bias):
continue
if n:
sns.scatterplot(x=X, y=Y, data=df.loc[(df.n_notes==n)&(df.bias_group==bias_group)], ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.n_notes==n)&(df.bias==bias)], ax=ax[i])
if fit:
x_fit, y_fit, popt = simple_fit(df.loc[(df.n_notes==n)&(df.bias==bias), X], df.loc[(df.n_notes==n)&(df.bias==bias), Y])
ax[i].plot(x_fit, y_fit)
ax[i].text(0.2, .20, f"m={popt[0]:7.5f}", transform=ax[i].transAxes)
else:
sns.scatterplot(x=X, y=Y, data=df, ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.bias==bias)], ax=ax[i])
print(bias)
ax[i].set_title(''.join(bias.split('_')))
def plot_JSD_vs_scales_bias_group(df, X='JSD', Y='fr_20', save=False, n=5):
df = rename_bias_groups(df)
df = rename_biases(df)
fig, ax = plt.subplots(4,3, sharex=True, sharey=True, figsize=(10,24))
plt.subplots_adjust(hspace=0.30) #wspace=0.3, hspace=0.2)
ax = ax.reshape(ax.size)
if 'cat' in df.columns:
df = df.loc[df.cat=='pair_ints']
for i, bias in enumerate(BIAS_GROUPS):
if n:
sns.scatterplot(x=X, y=Y, data=df.loc[df.n_notes==n], ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.n_notes==n)&(df.bias_group==bias)], ax=ax[i])
else:
sns.scatterplot(x=X, y=Y, data=df, ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.bias_group==bias)], ax=ax[i])
ax[i].set_title(bias)
# if i%2 == 0:
# ax[i].set_ylabel(r'$f_{real}$')
# plt.tight_layout()
# ax[0].set_xlim(df[X].min()*0.8, df[X].max()*1.2)
# ax[0].set_ylim(df[Y].min()*0.8, df[Y].max()*1.2)
if save:
plt.savefig(FIGS_DIR + 'model_comparison.pdf', bbox_inches='tight', pad_inches=0)
plt.savefig(FIGS_DIR + 'model_comparison.png')
def plot_scale_histograms(df, df_real, i, nbin=100, X='scale', neg=1.0):
fig, ax = plt.subplots()
n = df.loc[i, 'n_notes']
bins = np.linspace(0, 1200, num=nbin+1)
df_model = pd.read_feather(df.loc[i, 'fName'])
histM, bins = np.histogram(utils.extract_floats_from_string(df_model.loc[:,X]), bins=bins, normed=True)
histR, bins = np.histogram(utils.extract_floats_from_string(df_real.loc[df_real.n_notes==n,X]), bins=bins, normed=True)
xxx = bins[:-1] + 0.5 * (bins[1] - bins[0])
df_hist = pd.DataFrame(data={'bin':xxx, 'real':histR, 'model':histM*neg})
sns.lineplot(x='bin', y='real', data=df_hist)
sns.lineplot(x='bin', y='model', data=df_hist)
def plot_scale_histograms_compare(df, df_real, i, j, nbin=100, X='scale', neg=1.0, partabase='none', mix=[0,0]):
fig, ax = plt.subplots()
n = df.loc[i, 'n_notes']
bins = np.linspace(0, 1200, num=nbin+1)
if partabase=='none':
histR, bins = np.histogram(utils.extract_floats_from_string(df_real.loc[df_real.n_notes==n,X]), bins=bins, normed=True)
elif partabase=='theory':
histR, bins = np.histogram(utils.extract_floats_from_string(df_real.loc[(df_real.n_notes==n)&(df_real.Theory=='Y'),X]), bins=bins, normed=True)
elif partabase=='instrument':
histR, bins = np.histogram(utils.extract_floats_from_string(df_real.loc[(df_real.n_notes==n)&(df_real.Theory=='N'),X]), bins=bins, normed=True)
xxx = bins[:-1] + 0.5 * (bins[1] - bins[0])
df_hist = pd.DataFrame(data={'bin':xxx, 'real':histR})
sns.lineplot(x='bin', y='real', data=df_hist)
for count, k in enumerate([i, j]):
df_model = pd.read_feather(df.loc[k, 'fName'])
if mix[count]:
X2 = {'pair_ints':'mix_ints', 'scale':'mix_scale'}[X]
histM, bins = np.histogram(utils.extract_floats_from_string(df_model.loc[:,X2]), bins=bins, normed=True)
else:
histM, bins = np.histogram(utils.extract_floats_from_string(df_model.loc[:,X]), bins=bins, normed=True)
df_hist = pd.DataFrame(data={'bin':xxx, 'real':histR, f'model_{count+1}':histM*neg})
sns.lineplot(x='bin', y=f'model_{count+1}', data=df_hist, label=f'model_{count+1}')
ax.legend()
def plot_JSD_fr_against_input_variables(df, var, cat='pair_ints', w=20):
varArr = sorted(df[var].unique())
fig, ax = plt.subplots(2,1)
sns.set_style('darkgrid')
# for bias in df.bias.unique():
# JSD = [df.loc[(df.cat==cat)&(df[var]==v)&(df.bias==bias),'JSD'].mean() for v in varArr]
# JSDerr = [df.loc[(df.cat==cat)&(df[var]==v)&(df.bias==bias),'JSD'].std() for v in varArr]
# FR = [df.loc[(df.cat==cat)&(df[var]==v)&(df.bias==bias),f"fr_{w}"].mean() for v in varArr]
# FRerr = [df.loc[(df.cat==cat)&(df[var]==v)&(df.bias==bias),f"fr_{w}"].std() for v in varArr]
# ax[0].plot(varArr, JSD)# min(JSD))
# ax[1].plot(varArr, FR)# min(FR))
# JSDmin = [df.loc[(df.cat==cat)&(df[var]==v)&(df.bias==bias),'JSD'].min() for v in varArr]
# FRmax = [df.loc[(df.cat==cat)&(df[var]==v)&(df.bias==bias),f"fr_{w}"].max() for v in varArr]
# ax[0].plot(varArr, JSDmin, label='min')
# ax[1].plot(varArr, FRmax, label='max')
ax[0].set_ylabel('JSD')
ax[1].set_ylabel('frac_scales')
ax[1].set_xlabel(var)
# JSDmin = [df.loc[(df.cat==cat)&(df[var]==v),'JSD'].min() for v in varArr]
# FRmax = [df.loc[(df.cat==cat)&(df[var]==v),f"fr_{w}"].max() for v in varArr]
JSDmean = [df.loc[(df.cat==cat)&(df[var]==v),'JSD'].mean() for v in varArr]
FRmean = [df.loc[(df.cat==cat)&(df[var]==v),f"fr_{w}"].mean() for v in varArr]
ax[0].plot(varArr, JSDmean, label='mean')
ax[1].plot(varArr, FRmean, label='mean')
# ax[0].plot(varArr, JSDmin, label='min')
# ax[1].plot(varArr, FRmax, label='max')
# plt.
plt.show()
def plot_best_variables_cumulative(df, var, cat='pair_ints', w=20):
df_J = df.loc[df.cat==cat].sort_values(by='JSD')
df_f = df.loc[df.cat==cat].sort_values(by=f"fr_{w:02d}", ascending=False)
X = range(10, len(df_J))
Y_J = [df_J.loc[:,var].iloc[:i].mean() for i in X]
Y_f = [df_f.loc[:,var].iloc[:i].mean() for i in X]
fig, ax = plt.subplots(2,1)
sns.set_style('darkgrid')
ax[0].plot(X, Y_J, label='JSD')
ax[1].plot(X, Y_f, label='frac_scales')
ax[0].set_ylabel('JSD')
ax[1].set_ylabel('frac_scales')
ax[1].set_xlabel('averaged over top scoring N models')
# ax[0].set_xscale('log')
# ax[1].set_xscale('log')
plt.show()
def plot_best_constraints_cumulative(df, var, cat='pair_ints', w=20, n=10):
if var == 'min_int':
var_arr = np.arange(0., 110., 10.)
dV = 10
elif var == 'max_int':
var_arr = np.arange(400., 1300., 50.)
dV = 50
df_J = df.loc[df.cat==cat].sort_values(by='JSD').reset_index(drop=True)
df_f = df.loc[df.cat==cat].sort_values(by=f"fr_{w:02d}", ascending=False).reset_index(drop=True)
X = np.arange(0, len(df_J), n)
# df_J['N'] = np.array(df_J.index / n, dtype=int)
# df_f['N'] = np.array(df_J.index / n, dtype=int)
xi, yi = np.meshgrid(X[1:], var_arr)
distJ = np.zeros((X.size-1, var_arr.size), dtype=int)
distF = np.zeros((X.size-1, var_arr.size), dtype=int)
for i, x, in enumerate(X[1:]):
for y in df_J.loc[X[i]:x-1,var]:
distJ[i, int(y/dV - var_arr[0]/dV)] += 1
for y in df_f.loc[X[i]:x-1,var]:
distF[i, int(y/dV - var_arr[0]/dV)] += 1
fig, ax = plt.subplots(2,1)
sns.set_style('darkgrid')
cs1 = ax[0].contourf(xi, yi, distJ.T)
cs2 = ax[1].contourf(xi, yi, distF.T)
ax[0].set_ylabel(f'{var}')
ax[1].set_ylabel(f'{var}')
ax[0].set_xlabel(f'{var} histogram, sorted by JSD')
ax[1].set_xlabel(f'{var} histogram, sorted by frac_scales')
# ax[0].set_xscale('log')
# ax[1].set_xscale('log')
fig.colorbar(cs1, ax=ax[0])
fig.colorbar(cs2, ax=ax[1])
plt.show()
def plot_bias_performance_ranked(df, cat='pair_ints', w=20, n=10):
var = 'bias_group'
df = rename_bias_groups(df)
biases = np.array(['none', 'HS', 'S#1', 'distW', 'distI', 'distI_S#1', 'distW_S#1', 'distW_S#2'])
bias_dict = {biases[i]:i for i in range(len(biases))}
df_J = df.loc[df.cat==cat].sort_values(by='JSD').reset_index(drop=True)
df_f = df.loc[df.cat==cat].sort_values(by=f"fr_{w:02d}", ascending=False).reset_index(drop=True)
X = np.arange(0, len(df_J), n)
Y = np.arange(biases.size)
xi, yi = np.meshgrid(X[1:], Y)
distJ = np.zeros((X.size-1, biases.size), dtype=int)
distF = np.zeros((X.size-1, biases.size), dtype=int)
for i, x, in enumerate(X[1:]):
for y in df_J.loc[X[i]:x-1,var]:
distJ[i, bias_dict[y]] += 1
for y in df_f.loc[X[i]:x-1,var]:
distF[i, bias_dict[y]] += 1
fig, ax = plt.subplots(2,1)
sns.set_style('darkgrid')
ax[0].contourf(xi, yi, distJ.T)
ax[1].contourf(xi, yi, distF.T)
ax[0].set_yticklabels(biases)
ax[1].set_yticklabels(biases)
ax[0].set_ylabel(f'{var}')
ax[1].set_ylabel(f'{var}')
ax[0].set_xlabel(f'{var} histogram, sorted by JSD')
ax[1].set_xlabel(f'{var} histogram, sorted by frac_scales')
# ax[0].set_xscale('log')
# ax[1].set_xscale('log')
plt.show()
def instructional_diagram():
### Set up figure and styles
fig = plt.figure(figsize=(10,20))
gs = gridspec.GridSpec(3,1, height_ratios=[2.0, 1.5, 1.0])
# gs.update(wspace=0.25) #,hspace=0.20)
ax1 = fig.add_subplot( gs[0,0] )
ax2 = fig.add_subplot( gs[1,0] )
ax3 = fig.add_subplot( gs[2,0] )
sns.set_style('darkgrid')
font1 = 20
### Interval distribution major key
pair_ints = '200;200;100;200;200;200;100'
all_ints = '200;400;500;700;900;1100;1200;200;300;500;700;900;1000;100;300;500;700;800;200;400;600;700;200;400;500;200;300;100'
bins = np.linspace(0, 1200, num=13)
xxx = np.array(bins[1:], dtype=int)
hist, edges = np.histogram([int(x) for x in pair_ints.split(';')], bins=bins)
hist2, edges = np.histogram([int(x) for x in all_ints.split(';')], bins=bins)
out_dict = {'Interval':list(xxx)*2, 'Probability':list(hist*13)+list(hist2*13), 'interval set':['adjacent']*12 + ['all']*12}
sns.barplot(x='Interval', y='Probability', data=pd.DataFrame(data=out_dict), hue='interval set', ax=ax1)
ax1.set_ylabel('Probability', fontsize=font1)
ax1.set_xlabel('', fontsize=font1)
ax1.set_xticklabels([str(int(x)) if (x/100)%2 == 0 else '' for x in np.arange(0, 1222, 100)])
ax1.legend(frameon=False)
### Harmonic series attractors
data_base = np.load(os.path.join(DATA_DIR, 'attractors_base.npy'))
data_w40 = np.load(os.path.join(DATA_DIR, 'attractors_w40.npy'))
ax2.plot(data_base[:,0], data_base[:,1])
ax2.plot(data_w40[:,0], data_w40[:,1], 'o', fillstyle='none')
ax2.set_ylabel('harmonic\nsimilarity score', fontsize=font1)
# ax2.set_xlabel('Interval size', fontsize=font1)
### Smaller is better
ax3.plot(np.linspace(0,1200, num=100), np.exp(-np.linspace(0, 500, num=100)**2 / 1200 * 0.001))
ax3.plot(np.linspace(0,1200, num=100), np.exp(-np.linspace(0, 500, num=100)**2 / 1200 * 0.01))
ax3.plot(np.linspace(0,1200, num=100), np.exp(-np.linspace(0, 500, num=100)**2 / 1200 * 0.1))
ax3.set_ylabel('Probability', fontsize=font1)
ax3.set_xlabel('Interval size', fontsize=font1)
plt.tight_layout()
plt.savefig(FIGS_DIR + 'instructional_diagram.pdf')
plt.savefig(FIGS_DIR + 'instructional_diagram.png')
plt.show()
def plot_model_performance(df_real, df, mi, ma, bias, q, X='pair_ints'):
fig, ax = plt.subplots(2)
for i, n in enumerate([5,7]):
sns.distplot(utils.extract_floats_from_string(df_real.loc[df_real.n_notes==n, X]), bins=100, ax=ax[i])
df1 = | pd.read_feather(df.loc[(df.n_notes==n)&(df.min_int==mi)&(df.max_int==ma)&(df.bias=='none'), 'fName'].values[0]) | pandas.read_feather |
# coding: utf-8
# # DGA DETECTION MODEL
# In[1]:
# IMPORT MODULES
import pandas as pd
import numpy as np
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
# # Data Ingestion
# In[2]:
#Read Clean Domains
data_clean = pd.read_csv('Alexa_CleanDomains.csv',header=None)
data_clean['domains']=data_clean[0]
# In[3]:
# Strip out xn--
#data_clean['domains'] = data_clean['domains'].map(lambda x: str(x).lstrip('xn--'))
# In[4]:
# READ DGA Domains
data_dga = | pd.read_csv('DGADomains.csv',header=None) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 10:15:25 2021
@author: lenakilian
"""
import pandas as pd
import copy as cp
import geopandas as gpd
wd = r'/Users/lenakilian/Documents/Ausbildung/UoLeeds/PhD/Analysis/'
years = list(range(2007, 2018, 2))
geog = 'MSOA'
dict_cat = 'category_8'
lookup = | pd.read_csv(wd + 'data/raw/Geography/Conversion_Lookups/UK_full_lookup_2001_to_2011.csv') | pandas.read_csv |
#!/usr/bin/python
# This script populates the Company table with
# simulated data. Some data is obtained from
# public sources availables in Internet.
import os
import requests
import pandas as pd
import random
from datetime import datetime
from dotenv import load_dotenv
import database
import util
class Company:
def __init__(self):
random.seed(5) # For repeated experiments
load_dotenv()
def businessTypes(self):
"""
This method defines a set of business types.
A list of them is returned.
"""
business_types = [
'Sole proprietor',
'Partnership',
'Company',
'Franchise',
'Limited liability'
]
return business_types
def countryNames(self):
"""
This method returns a list of countries
"""
countries = pd.read_csv('third_parties/countries/data.csv')
return list(countries.Name)
def businessDir(self):
"""
Getting data from company dataset examples
"""
business_dir = | pd.read_csv('third_parties/port_moody_companies/data.csv') | pandas.read_csv |
import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
strip_examples = examples(
example_list=[
[],
[""],
[None],
[" "],
["\u2000"],
[" a"],
["a "],
[" a "],
# https://github.com/xhochy/fletcher/issues/174
["\xa0"],
["\u2000a\u2000"],
["\u2000\u200C\u2000"],
["\n\u200C\r"],
["\u2000\x80\u2000"],
["\t\x80\x0b"],
["\u2000\u10FFFF\u2000"],
[" \u10FFFF "],
]
+ [
[c]
for c in " \t\r\n\x1f\x1e\x1d\x1c\x0c\x0b"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u200B\u2028\u2029\u202F\u205F"
]
+ [[chr(c)] for c in range(0x32)]
+ [[chr(c)] for c in range(0x80, 0x85)]
+ [[chr(c)] for c in range(0x200C, 0x2030)]
+ [[chr(c)] for c in range(0x2060, 0x2070)]
+ [[chr(c)] for c in range(0x10FFFE, 0x110000)],
example_kword="data",
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string(), index=None):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array, index=index)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
"""Check a .str. function that returns a series with type t."""
tail_len = len(data) - test_offset
error = None
try:
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
except Exception as e:
error = e
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
if error:
# If pandas raises an exception, fletcher should do so, too.
with pytest.raises(type(error)):
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
else:
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_int(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(int, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def test_fr_str_accessor(fletcher_array):
data = ["a", "b"]
ser_pd = pd.Series(data)
# object series is returned
s = ser_pd.fr_str.encode("utf8")
assert s.dtype == np.dtype("O")
# test fletcher functionality and fallback to pandas
arrow_data = pa.array(data, type=pa.string())
fr_array = fletcher_array(arrow_data)
ser_fr = pd.Series(fr_array)
# pandas strings only method
s = ser_fr.fr_str.encode("utf8")
assert isinstance(s.values, fr.FletcherBaseArray)
def test_fr_str_accessor_fail(fletcher_variant):
data = [1, 2]
ser_pd = pd.Series(data)
with pytest.raises(Exception):
ser_pd.fr_str.startswith("a")
@settings(deadline=None)
@given(char=st.characters(blacklist_categories=("Cs",)))
def test_utf8_size(char):
char_bytes = char.encode("utf-8")
expected = len(char_bytes)
computed = fr.algorithms.string.get_utf8_size(char_bytes[0])
assert computed == expected
#####################################################
## String accessor methods (sorted alphabetically) ##
#####################################################
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_capitalize(data, str_accessor, fletcher_variant):
_check_str_to_str("capitalize", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_casefold(data, str_accessor, fletcher_variant):
_check_str_to_str("casefold", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
width=st.integers(min_value=0, max_value=50),
)
def test_center(data, width, str_accessor, fletcher_variant):
_check_str_to_str("center", data, str_accessor, fletcher_variant, width=width)
@string_patterns
def test_contains_no_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=False
)
@pytest.mark.parametrize(
"data, pat, expected",
[
([], "", []),
(["a", "b"], "", [True, True]),
(["aa", "Ab", "ba", "bb", None], "a", [True, False, True, False, None]),
],
)
def test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):
if str_accessor == "str":
pytest.skip(
"return types not stable yet, might sometimes return null instead of bool"
)
return
fr_series = _fr_series_from_data(data, fletcher_variant)
fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())
# Run over slices to check offset handling code
for i in range(len(data)):
ser = fr_series.tail(len(data) - i)
expected = fr_expected.tail(len(data) - i)
result = getattr(ser, str_accessor).contains(pat, regex=False)
tm.assert_series_equal(result, expected)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
def test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
case=True,
regex=False,
)
@string_patterns
def test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=False,
case=False,
)
regex_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", None], "a"),
(["aa", "ab", "ba", None], "a$"),
(["aa", "ab", "ba", None], "^a"),
(["Aa", "ab", "ba", None], "A"),
(["aa", "AB", "ba", None], "A$"),
(["aa", "AB", "ba", None], "^A"),
],
)
@regex_patterns
def test_contains_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=True
)
@regex_patterns
def test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=True,
case=False,
)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
@example(data_tuple=(["a"], "", 0), fletcher_variant="chunked")
def test_count_no_regex(data_tuple, str_accessor, fletcher_variant):
"""Check a .str. function that returns a series with type t."""
data, pat, test_offset = data_tuple
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, "count")(pat=pat)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
kwargs = {}
if str_accessor.startswith("fr_"):
kwargs["regex"] = False
result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)
_check_series_equal(result_fr, result_pd)
@regex_patterns
def test_count_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("count", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_text_endswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("endswith", data, str_accessor, fletcher_variant, pat=pat)
def _check_extract(func, str_accessor, fletcher_variant, data, regex):
if str_accessor == "str":
pytest.skip(f"{func} is not yet dispatched to the ExtensionArray")
return
index = pd.Index(range(1, len(data) + 1))
ser_fr = _fr_series_from_data(data, fletcher_variant, index=index)
result_fr = getattr(getattr(ser_fr, str_accessor), func)(regex)
assert isinstance(result_fr[0].dtype, fr.FletcherBaseDtype)
ser_pd = pd.Series(data, index=index)
result_pd = getattr(ser_pd.str, func)(regex)
tm.assert_frame_equal(result_pd, result_fr.astype(object))
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_extract(str_accessor, fletcher_variant, data, regex):
_check_extract("extract", str_accessor, fletcher_variant, data, regex)
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_extractall(str_accessor, fletcher_variant, data, regex):
_check_extract("extractall", str_accessor, fletcher_variant, data, regex)
@string_patterns
def test_find(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("find", data, str_accessor, fletcher_variant, sub=pat)
@string_patterns
def test_findall(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("findall", data, str_accessor, fletcher_variant, pat=pat)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=10),
)
def test_get(data, n, str_accessor, fletcher_variant):
_check_str_to_str("get", data, str_accessor, fletcher_variant, i=n)
@string_patterns
def test_index(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("index", data, str_accessor, fletcher_variant, sub=pat)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_len(data, str_accessor, fletcher_variant):
_check_str_to_int("len", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=50),
)
def test_ljust(data, n, str_accessor, fletcher_variant):
_check_str_to_str("ljust", data, str_accessor, fletcher_variant, width=n)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_lower(data, str_accessor, fletcher_variant):
_check_str_to_str("lower", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@strip_examples
def test_lstrip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data, strip_method="lstrip")
@pytest.mark.parametrize("case", [True, False])
@pytest.mark.parametrize("pat", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_match(data, pat, case, str_accessor, fletcher_variant):
_check_str_to_bool(
"match", data, str_accessor, fletcher_variant, pat=pat, case=case
)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@pytest.mark.parametrize("form", ["NFC", "NFKC", "NFD", "NFKD"])
def test_normalize(data, form, str_accessor, fletcher_variant):
_check_str_to_str("normalize", data, str_accessor, fletcher_variant, form=form)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=50),
)
@pytest.mark.parametrize("side", ["left", "right", "both"])
def test_pad(data, n, side, str_accessor, fletcher_variant):
_check_str_to_str("pad", data, str_accessor, fletcher_variant, width=n, side=side)
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_partition(str_accessor, fletcher_variant, data, expand):
if not expand:
pytest.xfail(
"partition(expand=False) not supported as pyarrow cannot deal with tuples"
)
if str_accessor == "str":
pytest.xfail("string.parititon always returns a tuple")
_do_test_split(
str_accessor, fletcher_variant, data, expand, split_method="partition"
)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=10),
)
def test_repeat(data, n, str_accessor, fletcher_variant):
_check_str_to_str("repeat", data, str_accessor, fletcher_variant, repeats=n)
@settings(deadline=None)
@given(
data_tuple=string_patterns_st(),
n=st.integers(min_value=0, max_value=10),
repl=st.sampled_from(["len4", "", "z"]),
)
@example(
data_tuple=(["aababaa"], "aabaa", 0),
repl="len4",
n=1,
fletcher_variant="continuous",
)
@example(data_tuple=(["aaa"], "a", 0), repl="len4", n=1, fletcher_variant="continuous")
def test_replace_no_regex_case_sensitive(
data_tuple, repl, n, str_accessor, fletcher_variant
):
data, pat, test_offset = data_tuple
_check_str_to_str(
"replace",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
repl=repl,
n=n,
case=True,
regex=False,
)
@string_patterns
def test_rfind(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("rfind", data, str_accessor, fletcher_variant, sub=pat)
@string_patterns
def test_rindex(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("index", data, str_accessor, fletcher_variant, sub=pat)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=50),
)
def test_rjust(data, n, str_accessor, fletcher_variant):
_check_str_to_str("rjust", data, str_accessor, fletcher_variant, width=n)
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_rpartition(str_accessor, fletcher_variant, data, expand):
if not expand:
pytest.xfail(
"partition(expand=False) not supported as pyarrow cannot deal with tuples"
)
if str_accessor == "str":
pytest.xfail("string.parititon always returns a tuple")
_do_test_split(
str_accessor, fletcher_variant, data, expand, split_method="rpartition"
)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@strip_examples
def test_rstrip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data, strip_method="rstrip")
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
slice_=st.tuples(st.integers(-20, 20), st.integers(-20, 20), st.integers(-20, 20)),
)
def test_slice(data, slice_, str_accessor, fletcher_variant):
if slice_[2] == 0:
pytest.raises(ValueError)
return
if data == [None] or data == [""]:
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).slice(*slice_)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
ser_pd = pd.Series(data, dtype=object)
result_pd = ser_pd.str.slice(*slice_)
tm.assert_series_equal(result_fr, result_pd)
def test_slice_replace(str_accessor, fletcher_variant):
ser = _fr_series_from_data(["a", "ab", "abc", "abdc", "abcde"], fletcher_variant)
# Using test cases from the pandas documentation
result = getattr(ser, str_accessor).slice_replace(1, repl="X")
expected = _fr_series_from_data(["aX", "aX", "aX", "aX", "aX"], fletcher_variant)
tm.assert_series_equal(result, expected)
result = getattr(ser, str_accessor).slice_replace(stop=2, repl="X")
expected = _fr_series_from_data(["X", "X", "Xc", "Xdc", "Xcde"], fletcher_variant)
tm.assert_series_equal(result, expected)
result = getattr(ser, str_accessor).slice_replace(start=1, stop=3, repl="X")
expected = _fr_series_from_data(["aX", "aX", "aX", "aXc", "aXde"], fletcher_variant)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_split(str_accessor, fletcher_variant, data, expand):
_do_test_split(str_accessor, fletcher_variant, data, expand, split_method="split")
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_rsplit(str_accessor, fletcher_variant, data, expand):
_do_test_split(str_accessor, fletcher_variant, data, expand, split_method="rsplit")
def _do_test_split(str_accessor, fletcher_variant, data, expand, split_method):
len_data = len(data)
idx_a = list(range(1, len_data + 1))
idx_b = list(range(2, len_data + 2))
index = pd.MultiIndex.from_tuples(
list(zip(idx_a, idx_b)), names=["first", "second"]
)
ser_fr = _fr_series_from_data(data, fletcher_variant, index=index)
result_fr = getattr(getattr(ser_fr, str_accessor), split_method)("+", expand=expand)
ser_pd = pd.Series(data, index=index)
result_pd = getattr(ser_pd.str, split_method)("+", expand=expand)
if expand:
tm.assert_frame_equal(result_pd, result_fr.astype(object))
else:
tm.assert_series_equal(result_pd, result_fr.astype(object))
@string_patterns
def test_startswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("startswith", data, str_accessor, fletcher_variant, pat=pat)
@settings(deadline=None, max_examples=3)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[
" 000000000000000000000000000000000000000000İࠀࠀࠀࠀ𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐤱000000000000𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀"
],
["\x80 "],
[],
],
example_kword="data",
)
def test_strip_offset(str_accessor, fletcher_variant, fletcher_slice_offset, data):
_do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@strip_examples
def test_strip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data)
def _do_test_text_strip(
str_accessor, fletcher_variant, fletcher_slice_offset, data, strip_method="strip"
):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
arrow_data = pa.array(
[None for _ in range(fletcher_slice_offset)] + data, type=pa.string()
)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = | pd.Series(fr_array[fletcher_slice_offset:]) | pandas.Series |
from __future__ import print_function, absolute_import
import os, sys, subprocess, shlex, tempfile, time, sklearn.base, math
import numpy as np
import pandas as pd
from pandas_extensions import *
from ExeEstimator import *
class LibFFMClassifier(ExeEstimator, sklearn.base.ClassifierMixin):
'''
options:
-l <lambda>: set regularization parameter (default 0)
-k <factor>: set number of latent factors (default 4)
-t <iteration>: set number of iterations (default 15)
-r <eta>: set learning rate (default 0.1)
-s <nr_threads>: set number of threads (default 1)
-p <path>: set path to the validation set
--quiet: quiet model (no output)
--norm: do instance-wise normalization
--no-rand: disable random update
`--norm' helps you to do instance-wise normalization. When it is enabled,
you can simply assign `1' to `value' in the data.
'''
def __init__(self, columns, lambda_v=0, factor=4, iteration=15, eta=0.1,
nr_threads=1, quiet=False, normalize=None, no_rand=None):
ExeEstimator.__init__(self)
self.columns = columns.tolist() if hasattr(columns, 'tolist') else columns
self.lambda_v = lambda_v
self.factor = factor
self.iteration = iteration
self.eta = eta
self.nr_threads = nr_threads
self.quiet = quiet
self.normalize = normalize
self.no_rand = no_rand
def fit(self, X, y=None):
if type(X) is str: train_file = X
else:
if not hasattr(X, 'values'): X = | pd.DataFrame(X, columns=self.columns) | pandas.DataFrame |
from .io import read_annotations, save_annotations
import warnings
import glob
import os
import os.path as path
import numpy as np
import pandas as pd
class AnnotationFormat:
"""
Class containing useful data for accessing and manipulating annotations.
I've tried to extract as many "magic constants" out of the actual methods as
possible so that they can be grouped here and changed easily in the future.
"""
# Column Names
LEFT_COL = "Begin Time (s)"
RIGHT_COL = "End Time (s)"
TOP_COL = "High Freq (Hz)"
BOT_COL = "Low Freq (Hz)"
CLASS_COL = "Species"
CLASS_CONF_COL = "Species Confidence"
CALL_UNCERTAINTY_COL = "Call Uncertainty"
# Column which cannot be left as NA or NaN
REQUIRED_COLS = [
LEFT_COL,
RIGHT_COL,
TOP_COL,
BOT_COL,
CLASS_COL,
CLASS_CONF_COL,
CALL_UNCERTAINTY_COL
]
# Dictionary mapping annotator's noisy labels to a constant class name
CLASS_LABEL_MAP = {
"humpback whale": "hb",
"hb whale": "hb",
"hb?": "hb",
"hhb": "hb",
"hb": "hb",
"jn": "hb",
"sea lion": "sl",
"sl": "sl",
"rockfish": "rf",
"rf": "rf",
"killer whale": "kw",
"kw": "kw",
"?": "?",
"mech": "?",
"mechanical": "?"
}
# Boxes need to span at least 1ms and 1/100 Hz
# If a box is dropped for this reason, it was likely created by mistake.
BOX_MIN_DURATION = 1e-3
BOX_MIN_FREQ_RANGE = 1e-2
# Useful glob patterns for finding annotation files and mispellings
PATTERN = "*.*-*.txt"
BAD_PATTERNS = ["*.*_*.txt"]
_format = AnnotationFormat()
def get_all_classes(annotation_paths, verbose=False):
"""
Returns a list of all classes seen in the annotation files.
Parameters
annotation_paths : list of str
paths to the .txt annotation files (eg: ['/foo/bar/annots.txt'])
verbose : bool, optional (default: False)
flag to control whether debug information is printed
Returns
classes : list of str
List containing all unique classes
"""
classes = set()
for annot_fname in annotation_paths:
classes.update(list(read_annotations(annot_fname)[_format.CLASS_COL].unique()))
classes = sorted([s for s in list(classes)])
if verbose:
print("Classes found: ", classes)
return classes
def get_area(annotation):
"""
Calculates the area of a single annotation box.
Parameters
annotation : pandas Series
a single annotation
Returns
area : float
Area of the bounding box (Hz*Seconds)
"""
return ((annotation[_format.RIGHT_COL] - annotation[_format.LEFT_COL])
* (annotation[_format.TOP_COL] - annotation[_format.BOT_COL]))
def get_all_annotations_in_directory(directory, check_misnomers=True):
"""
Uses glob to construct a list of paths to each file in the provided
directory which matches the correct formatting of an annotation file name.
Parameters
directory : str
path to the directory of interest
check_misnomers : bool, optional (default: True)
flag to control whether to warn about potential filename mistakes
Returns
good_results : List of str
Paths found in the given directory which match the filename pattern
"""
good_results = glob.glob(path.join(directory, _format.PATTERN))
if check_misnomers:
# Check if there are any incorrectly named files that may be overlooked
bad_results = []
for bad_pattern in _format.BAD_PATTERNS:
bad_results.extend(glob.glob(path.join(directory, bad_pattern)))
if len(bad_results) > 0:
warnings.warn(
"({}) Some files in {} may be incorrectly named: " \
"[\n {}\n]".format(
"get_all_annotations_in_directory",
directory,
",\n ".join(bad_results)
)
)
return good_results
def levenshteinDistanceDP(token1, token2):
"""
Efficiently calculates the Levenshtein distance (edit distance) between two
strings. Useful for determining if a column name has been misspelled.
The cost of insertions, deletions, and substitutions are all set to 1.
Parameters
token1 : str
first token
token2 : str
second token
Returns
distance : int
the number of single-character edits required to turn token1 into token2
"""
distances = np.zeros((len(token1) + 1, len(token2) + 1))
for t1 in range(len(token1) + 1):
distances[t1][0] = t1
for t2 in range(len(token2) + 1):
distances[0][t2] = t2
a, b, c = 0, 0, 0
for t1 in range(1, len(token1) + 1):
for t2 in range(1, len(token2) + 1):
if (token1[t1-1] == token2[t2-1]):
distances[t1][t2] = distances[t1 - 1][t2 - 1]
else:
a = distances[t1][t2 - 1]
b = distances[t1 - 1][t2]
c = distances[t1 - 1][t2 - 1]
if (a <= b and a <= c):
distances[t1][t2] = a + 1
elif (b <= a and b <= c):
distances[t1][t2] = b + 1
else:
distances[t1][t2] = c + 1
return distances[len(token1)][len(token2)]
def _print_n_rejected(n_rejected, reason):
if n_rejected > 0:
print("Rejecting {} annotation(s) for {}".format(
n_rejected,
reason
))
def clean_annotations(annotations, verbose=False):
"""
Cleans a single DataFrame of annotations by identifying invalid annotations
and separating them from the valid annotations.
Additionally checks for other formatting issues such as misnamed columns.
Parameters
annotations : DataFrame
a set of annotations from a single recording
verbose : bool, optional (default: False)
flag to control whether debug information is printed
Returns
valid_annotations : DataFrame
the annotations that passed every filter
invalid_annotations : DataFrame
the annotations that failed at least one filter
"""
annotations = annotations.copy()
original_size = len(annotations)
# Check for misnamed columns
column_map = {}
for req_col in _format.REQUIRED_COLS:
# For each required column, find the column with a dist <= 1.
matches = []
for col in annotations.columns:
dist = levenshteinDistanceDP(col, req_col)
if dist <= 1:
matches.append(col)
if dist > 0:
column_map[col] = req_col
if len(matches) > 1:
warnings.warn(
"({}) Required Column '{}' matches multiple " \
"columns: [{}]".format(
"clean_annotations",
req_col,
", ".join(matches)
)
)
# This required column is ambiguous. Stop and reject all.
# TODO: Write logic to combine ambiguous columns automatically
return pd.DataFrame(columns=annotations.columns), annotations
if len(matches) == 0:
warnings.warn(
"({}) Required Column '{}' does not match any existing " \
"columns: [{}]".format(
"clean_annotations",
req_col,
", ".join(list(annotations.columns))
)
)
# This required column was not found. Stop and reject all.
return | pd.DataFrame(columns=annotations.columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script is used for measuring some coefficients of the molecules."""
import numpy as np
import pandas as pd
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Crippen, Descriptors as desc, Lipinski, MolSurf
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler as Scaler
from tqdm import tqdm
from drugex.core import util
def converage(fnames):
"""This method parsed log files of reinforcement learning process
Arguments:
fnames (list): List of log file paths
Returns:
output (DataFrame): Table contains two columns, one is the index of
file name in the list, the other is the average reward of batch SMILES
given by the environment.
"""
xy = []
for i, fname in enumerate(fnames):
lines = open(fname).readlines()
for line in lines:
if not line.startswith('Epoch'): continue
# The average reward at current step in ith log file
score = float(line.split(' ')[3])
xy.append([i, score])
output = pd.DataFrame(xy, columns=['LABEL', 'SCORE'])
return output
def training_process(fname):
"""This method parsed log files of RNN training process
Arguments:
fname (str): log file paths of RNN training
Returns:
valid (ndarray): The validation rate at each epoch during the training process.
loss (ndarray): The value of loss function at each epoch during the training process.
"""
log = open(fname)
valid = []
loss = []
for line in log:
if not line.startswith('Epoch:'): continue
data = line.split(' ')
valid.append(float(data[-1]))
loss.append(float(data[-3]))
valid, loss = np.array(valid), np.array(loss)
return valid, loss
def logP_mw(fnames, is_active=False):
""" logP and molecular weight calculation for logP ~ MW chemical space visualization
Arguments:
fnames (list): List of file paths that contains CANONICAL_SMILES (, LOGP and MWT
if it contains the logP and molecular weight for each molecule).
is_active (bool, optional): selecting only active ligands (True) or all of the molecules (False)
if it is true, the molecule with PCHEMBL_VALUE >= 6.5 or SCORE > 0.5 will be selected.
(Default: False)
Returns:
df (DataFrame): The table contains three columns;
molecular weight, logP and index of file name in the fnames
"""
df = pd.DataFrame()
for i, fname in enumerate(fnames):
print(fname)
sub = pd.read_table(fname)
sub['LABEL'] = i
if 'PCHEMBL_VALUE' in sub.columns:
sub = sub[sub.PCHEMBL_VALUE >= (6.5 if is_active else 0)]
elif 'SCORE' in sub.columns:
sub = sub[sub.SCORE > (0.5 if is_active else 0)]
sub = sub.drop_duplicates(subset='CANONICAL_SMILES')
if not ('LOGP' in sub.columns and 'MWT' in sub.columns):
# If the the table does not contain LOGP and MWT
# it will calculate these coefficients with RDKit.
logp, mwt = [], []
for i, row in sub.iterrows():
try:
mol = Chem.MolFromSmiles(row.CANONICAL_SMILES)
x, y = desc.MolWt(mol), Crippen.MolLogP(mol)
logp.append(y)
mwt.append(x)
except:
sub = sub.drop(i)
print(row.CANONICAL_SMILES)
sub['LOGP'], sub['MWT'] = logp, mwt
df = df.append(sub[['MWT', 'LOGP', 'LABEL']])
return df
def dimension(fnames, fp='ECFP', is_active=False, alg='PCA', maximum=int(1e5)):
""" Dimension reduction analysis it contains two algorithms: PCA and t-SNE,
and two different descriptors: ECFP6 and PhysChem
Arguments:
fnames (list): List of file paths that contains CANONICAL_SMILES and SCORE (or PCHEMBL_VALUE).
fp (str, optional): The descriptors for each molecule, either ECFP6 or PhysChem (Default: 'ECFP')
is_active (bool, optional): selecting only active ligands (True) or all of the molecules (False)
if it is true, the molecule with PCHEMBL_VALUE >= 6.5 or SCORE > 0.5 will be selected.
(Default: False)
alg (str, optional): Dimension reduction algorithms, either 'PCA' or 't-SNE' (Default: 'PCA')
maximum (int, optional): Considering dimension reduction for the large dataset is extremely
time- and resource consuming, if the size of dataset in one file larger than this threshold,
maximum number of sample will be randomly selected (Default: 100,000)
Returns:
df (DataFrame): the table contains two columns, component 1 and 2.
"""
df = pd.DataFrame()
for i, fname in enumerate(fnames):
sub = pd.read_table(fname)
if maximum is not None and len(sub) > maximum:
sub = sub.sample(maximum)
if 'PCHEMBL_VALUE' in sub.columns:
sub = sub[sub.PCHEMBL_VALUE >= (6.5 if is_active else 0)]
sub['SCORE'] = sub.PCHEMBL_VALUE
elif 'SCORE' in sub.columns:
sub = sub[sub.SCORE > (0.5 if is_active else 0)]
sub = sub.drop_duplicates(subset='CANONICAL_SMILES')
print(len(sub))
sub['LABEL'] = i
df = df.append(sub)
fp_alg = util.Environment.ECFP_from_SMILES if fp == 'ECFP' else PhyChem
fps = fp_alg(df.CANONICAL_SMILES)
pca = PCA(n_components=2) if alg == 'PCA' else TSNE(n_components=2)
xy = pca.fit_transform(fps)
df['X'], df['Y'] = xy[:, 0], xy[:, 1]
if alg == 'PCA':
ratio = pca.explained_variance_ratio_[:2]
return df, ratio
else:
return df
def substructure(fname, sub, is_active=False):
""" Calculating the percentage of molecules that contains the given substructure
in the given dataset.
Arguments:
sub (str): molecular substructure with SMARTS representation.
is_active (bool, optional): selecting only active ligands (True) or all of the molecules (False)
if it is true, the molecule with PCHEMBL_VALUE >= 6.5 or SCORE > 0.5 will be selected.
(Default: False)
Returns:
percentage (float): percentage of molecules (xx.xx%) that contains the given substructure
"""
sub = Chem.MolFromSmarts(sub)
df = pd.read_table(fname).drop_duplicates(subset='CANONICAL_SMILES')
if 'SCORE' in df.columns:
df = df[df.SCORE > (0.5 if is_active else 0.0)]
elif 'PCHEMBL_VALUE' in df.columns:
df = df[df.PCHEMBL_VALUE >= (6.5 if is_active else 0.0)]
num = 0
for smile in df.CANONICAL_SMILES:
mol = Chem.MolFromSmiles(smile)
if mol.HasSubstructMatch(sub):
num += 1
# print(smile)
percentage = num * 100 / len(df)
return percentage
def diversity(fake_path, real_path=None, is_active=False):
""" Molecular diversity measurement based on Tanimoto-distance on ECFP6 fingerprints,
including, intra-diversity and inter-diversity.
Arguments:
fake_path (str): the file path of molecules that need to measuring diversity
real_path (str, optional): the file path of molecules as the reference, if it
is provided, the inter-diversity will be calculated; otherwise, the intra-diversity
will be calculated.
is_active (bool, optional): selecting only active ligands (True) or all of the molecules (False)
if it is true, the molecule with PCHEMBL_VALUE >= 6.5 or SCORE > 0.5 will be selected.
(Default: False)
Returns:
df (DataFrame): the table that contains columns of CANONICAL_SMILES
and diversity value for each molecules
"""
fake = pd.read_table(fake_path)
fake = fake[fake.SCORE > (0.5 if is_active else 0)]
fake = fake.drop_duplicates(subset='CANONICAL_SMILES')
fake_fps, real_fps = [], []
for i, row in fake.iterrows():
mol = Chem.MolFromSmiles(row.CANONICAL_SMILES)
fake_fps.append(AllChem.GetMorganFingerprint(mol, 3))
if real_path:
real = pd.read_table(real_path)
real = real[real.PCHEMBL_VALUE >= (6.5 if is_active else 0)]
for i, row in real.iterrows():
mol = Chem.MolFromSmiles(row.CANONICAL_SMILES)
real_fps.append(AllChem.GetMorganFingerprint(mol, 3))
else:
real_fps = fake_fps
method = np.min if real_path else np.mean
dist = 1 - np.array([method(DataStructs.BulkTanimotoSimilarity(f, real_fps)) for f in fake_fps])
fake['DIST'] = dist
return fake
def properties(fnames, labels, is_active=False):
""" Five structural properties calculation for each molecule in each given file.
These properties contains No. of Hydrogen Bond Acceptor/Donor, Rotatable Bond,
Aliphatic Ring, Aromatic Ring and Heterocycle.
Arguments:
fnames (list): the file path of molecules.
labels (list): the label for each file in the fnames.
is_active (bool, optional): selecting only active ligands (True) or all of the molecules (False)
if it is true, the molecule with PCHEMBL_VALUE >= 6.5 or SCORE > 0.5 will be selected.
(Default: False)
Returns:
df (DataFrame): the table contains three columns; 'Set' is the label
of fname the molecule belongs to, 'Property' is the name of one
of five properties, 'Number' is the property value.
"""
props = []
for i, fname in enumerate(fnames):
df = | pd.read_table(fname) | pandas.read_table |
import pandas as pd
class TripleBarrier:
def __init__(self, price, vol_span=50, barrier_horizon=5, factors=None, label=0):
"""
Labels the Data with the Triple Barrier Method
:param price: closing price
:param vol_span: look back to dertermine volatility increment threshold
:param barrier_horizon: represents vertical length (days) for barrier
:param factors: repreesnts scalar for barrier height
:param label: 0 represents label for classification [-1, 0, 1], 1 represenst label for regression -1 <= x <= 1
"""
self.label = label
if factors is None:
factors = [2, 2]
daily_vol = self.get_daily_vol(prices=price, lookback=vol_span)
vertical_barriers = self.add_vertical_barrier(
close=price, num_days=barrier_horizon
)
triple_barrier_events = self.get_events(
close=price,
factor=factors,
target=daily_vol,
vertical_barrier=vertical_barriers,
)
self.labels = self.get_labels(triple_barrier_events, price)
@staticmethod
def get_daily_vol(prices, lookback=50):
"""
Daily Volatility Estimates
Computes the daily volatility at intraday estimation points, applying a span of lookback days to an
exponentially weighted moving standard deviation.
This function is used to compute dynamic thresholds for profit taking and stop loss limits
"""
# find the timestamps at [t-1]
df = prices.index.searchsorted(prices.index - | pd.Timedelta(days=1) | pandas.Timedelta |
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import random
from datetime import datetime, timedelta
import time
import re
import requests
from unit import *
# from rule import *
base_path_1 = "../dataset/"
base_path_2 = "../dataset/tmp/"
base_path_3 = "../output/"
def ensemble(file1, file2, file3, a):
df1 = pd.read_csv(file1, sep=',')
df1.index = df1['test_id']
df1 = df1.drop(["test_id"], axis=1)
df2 = pd.read_csv(file2, sep=',')
df2.index = df2['test_id']
df2 = df2.drop(["test_id"], axis=1)
# df2 = df2.sort_index()
df3 = df1 * a + df2 * (1 - a)
df3.to_csv(file3, index=True, sep=',')
def ensemble_three_file(file1, file2, file3, file4, a, b, c):
df1 = pd.read_csv(file1, sep=',')
df1.index = df1['test_id']
df1 = df1.drop(["test_id"], axis=1)
df2 = pd.read_csv(file2, sep=',')
df2.index = df2['test_id']
df2 = df2.drop(["test_id"], axis=1)
df3 = pd.read_csv(file3, sep=',')
df3.index = df3['test_id']
df3 = df3.drop(["test_id"], axis=1)
df4 = df1 * a + df2 * b + df3 * c
df4.to_csv(file4, index=True, sep=',')
#from model import test
'''
type_="0301-0531_0801-0410" , feature_first=True
type_="0301-0531_0801-0410" , feature_first=False
type_="2017_0101-2018_0410_less" , feature_first=False
ans_file = base_path_3 + "test/" + end_day + "-xgboost_weather" + type + "_" + str(feature_first) + ".csv"
'''
def cal_ensemble_best_xgboost():
score = np.zeros(6)
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file2 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file4 = base_path_3 + end_day + "xgboost_mean_ensemble.csv"
file5 = base_path_3 + end_day + "xgboost_median_ensemble.csv"
ensemble_median(file1, file2, file3, file6, file4, file5)
# ensemble_mean_3(file1, file2, file3, file4, file5)
score1, _, _ = test(end_day, file1)
score2, _, _ = test(end_day, file2)
score3, _, _ = test(end_day, file3)
score6, _, _ = test(end_day, file6)
score4, _, _ = test(end_day, file4)
score5, _, _ = test(end_day, file5)
score_now = np.array([score1, score2, score3, score6, score4, score5])
score += score_now
print ("score: ", score_now)
num += 1.0
avg_score = score / num
print ("avg_score: ", avg_score)
# a = 0.3
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# True) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file4 = base_path_3 + "xgboost_ensemble.csv"
# ensemble(file2, file3, file4, a)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# best_params = [0.0, 0.0, 0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# for k in range(11):
# b = k * 1.0 / 10.0
# if a + b > 1.0:
# continue
# c = 1 - a - b
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# True) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file4 = base_path_3 + "xgboost_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a, b, c]
# best_score = avg_score
# print best_params
'''
get_test(type="0301-0531_0801-0410", feature_first=False)
get_test(type="0301-0531_0801-0410", feature_first=True)
get_test(type="2017_0101-2018_0410", feature_first=True)
get_test(type="2017_0101-2018_0410_less", feature_first=False)
ans_file = base_path_3 + "test/" + end_day + "-ext_with_weather_" + type + "_" + str(feature_first) + ".csv"
'''
def cal_ensemble_best_ext_with_weather():
# for i in range(11):
a = 1.0
b = 0.0
c = 0.0
total_score = 0.0
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "ext_with_weather_ensemble.csv"
ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
score, _, _ = test(end_day, file4)
print ("score: ", score)
total_score += score
num += 1.0
avg_score = total_score / num
print ("avg_score: ", avg_score)
# best_params = [0.0, 0.0, 0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# for k in range(11):
# b = k*1.0/10.0
# if a + b > 1.0:
# continue
# c = 1.0 - a -b
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(False) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(True) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(False) + ".csv"
# file4 = base_path_3 + "ext_with_weather_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a, b, b]
# best_score = avg_score
# print best_params
'''
get_test(type="0301-0531_0801-0410")
get_test(type="2017_0101-2018_0410_less")
get_test(type="2017_0101-2018_0410_test")
ans_file = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + type + ".csv"
'''
def cal_ensemble_best_ext_with_weather_three_metric():
# for i in range(11):
a = 1.0
b = 0.0
c = 0.0
total_score = 0.0
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
file2 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
file3 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
file4 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
score, _, _ = test(end_day, file4)
print ("score: ", score)
total_score += score
num += 1.0
avg_score = total_score / num
print ("avg_score: ", avg_score)
# best_params = [0.0, 0.0, 0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# for k in range(11):
# b = k*1.0/10.0
# if a + b > 1.0:
# continue
# c = 1.0 - a -b
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
# file2 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
# file3 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
# file4 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=a, b=b, c=c)
# score, _, _ = test(end_day, file4)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a, b, b]
# best_score = avg_score
# print best_params
def cal_ensemble_all():
a = 0.6
total_score = 0.0
total_score1 = 0.0
total_score2 = 0.0
num = 0.0
for j in range(11, 28):
end_day = "2018-04-" + str(j)
file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "ext_with_weather_ensemble.csv"
# ensemble_three_file(file1, file2, file3, file4=file4, a=1.0, b=0.0, c=0.0)
file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "xgboost_ensemble.csv"
ensemble(file5, file6, file7, a=0.3)
file8 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
file9 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
file10 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
file11 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
# ensemble_three_file(file8, file9, file10, file4=file11, a=1.0, b=0.0, c=0.0)
file12 = base_path_3 + "ensemble_all_1.csv"
ensemble(file1, file7, file12, a=0.6)
score, _, _ = test(end_day, file12)
print ("score_1: ", score)
total_score1 += score
print ("after rule:", score)
file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file6 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "xgboost_ensemble.csv"
ensemble(file5, file6, file7, a=0.2)
file13 = base_path_3 + "ensemble_all_2.csv"
ensemble(file1, file7, file13, a=0.7)
score, _, _ = test(end_day, file13)
total_score2 += score
print ("score_2: ", score)
file14 = base_path_3 + end_day + "_ensemble_zhoujie.csv"
ensemble(file12, file13, file14, a=0.6)
score, _, _ = test(end_day, file14)
total_score += score
print ("score: ", score)
num += 1.0
avg_score1 = total_score1 / num
avg_score2 = total_score2 / num
avg_score = total_score / num
print ("avg_score: ", avg_score1, avg_score2, avg_score)
# best_params = [0.0]
# best_score = 2.0
# for i in range(11):
# a = i * 1.0 / 10.0
# total_score = 0.0
# num = 0.0
# for j in range(11, 28):
# end_day = "2018-04-" + str(j)
# file1 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file2 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
# True) + ".csv"
# file3 = base_path_3 + "test/" + end_day + "-ext_with_weather_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file4 = base_path_3 + "ext_with_weather_ensemble.csv"
# # ensemble_three_file(file1, file2, file3, file4=file4, a=1.0, b=0.0, c=0.0)
#
# # file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# # True) + ".csv"
# file5 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file6 = base_path_3 + "test/" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file7 = base_path_3 + "xgboost_ensemble.csv"
# ensemble(file5, file6, file7, a=0.3)
#
# file8 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "0301-0531_0801-0410" + ".csv"
# file9 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_less" + ".csv"
# file10 = base_path_3 + "test/" + end_day + "_ext_with_weather_three_metric_" + "2017_0101-2018_0410_test" + ".csv"
# file11 = base_path_3 + "ext_with_weather_three_metric_ensemble.csv"
# # ensemble_three_file(file8, file9, file10, file4=file11, a=1.0, b=0.0, c=0.0)
#
# file12 = base_path_3 + "ensemble_all.csv"
# # ensemble_three_file(file4, file4, file11, file4=file12, a=a, b=b, c=c)
# # ensemble_three_file(file1, file7, file8, file4=file12, a=a, b=b, c=c)
# ensemble(file1, file7, file12, a=a)
# score, _, _ = test(end_day, file12)
# print "score: ", score
# total_score += score
# num += 1.0
# avg_score = total_score / num
# print "avg_score: ", avg_score
# if avg_score < best_score:
# best_params = [a]
# best_score = avg_score
# print best_params
def get_ans_1(end_day, caiyun=False):
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + ".csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "xgboost_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
True) + "_caiyun.csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file7 = base_path_3 + "xgboost_ensemble_caiyun.csv"
ensemble(file5, file6, file7, a=0.2)
if caiyun == False:
file12 = base_path_3 + end_day + "_ensemble_all_1.csv"
else:
file12 = base_path_3 + end_day + "_ensemble_all_1_caiyun.csv"
ensemble(file1, file7, file12, a=0.7)
def get_ans_2(end_day, caiyun=False):
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file7 = base_path_3 + end_day + "_xgboost_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-ext_with_weather_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + "" + end_day + "-xgboost_weather" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file6 = base_path_3 + "" + end_day + "-xgboost_weather" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file7 = base_path_3 + end_day + "_xgboost_ensemble_caiyun.csv"
ensemble(file5, file6, file7, a=0.3)
if caiyun == False:
file12 = base_path_3 + end_day + "_ensemble_all_2.csv"
else:
file12 = base_path_3 + end_day + "_ensemble_all_2_caiyun.csv"
ensemble(file1, file7, file12, a=0.6)
def get_ans(end_day="2018-05-08", caiyun=False):
# get_ans_1(end_day, caiyun=caiyun)
# get_ans_2(end_day, caiyun=caiyun)
# if caiyun == False:
# file13 = base_path_3 + end_day + "_ensemble_all_1.csv"
# file14 = base_path_3 + end_day + "_ensemble_all_2.csv"
# file15 = base_path_3 + end_day + "_ensemble_all_zhoujie.csv"
# else:
# file13 = base_path_3 + end_day + "_ensemble_all_1_caiyun.csv"
# file14 = base_path_3 + end_day + "_ensemble_all_2_caiyun.csv"
# file15 = base_path_3 + end_day + "_ensemble_all_zhoujie_caiyun.csv"
# ensemble(file13, file14, file15, a=0.4)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble_caiyun.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble_caiyun.csv"
ensemble_median(file1, file2, file3, file4, file5, file6)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble_0429.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble_0429.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + end_day + "lightgbm_mean_ensemble_0429_caiyun.csv"
file6 = base_path_3 + end_day + "lightgbm_median_ensemble_0429_caiyun.csv"
ensemble_median(file1, file2, file3, file4, file5, file6)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + ".csv"
file5 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file6 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file7 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file8 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + ".csv"
file9 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file10 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file11 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file12 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + ".csv"
file13 = base_path_3 + end_day + "lightgbm_mean_ensemble.csv"
file14 = base_path_3 + end_day + "lightgbm_median_ensemble.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "0301-0531_0801-0410" + "_" + str(
False) + "_caiyun.csv"
file5 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file6 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file7 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file8 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0410_less" + "_" + str(
False) + "_caiyun.csv"
file9 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file10 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file11 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file12 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_less" + "_" + str(
False) + "_caiyun.csv"
file13 = base_path_3 + end_day + "lightgbm_mean_ensemble_caiyun.csv"
file14 = base_path_3 + end_day + "lightgbm_median_ensemble_caiyun.csv"
ensemble_medians([file1, file2, file5, file6], file13, file14)
if caiyun == False:
file24 = base_path_3 + "" + end_day + "_weight_mean_0410.csv"
file25 = base_path_3 + "" + end_day + "_weight_mean_0429.csv"
file26 = base_path_3 + "" + end_day + "_weight_mean_0410_0429.csv"
else:
file24 = base_path_3 + "" + end_day + "_weight_mean_0410_caiyun.csv"
file25 = base_path_3 + "" + end_day + "_weight_mean_0429_caiyun.csv"
file26 = base_path_3 + "" + end_day + "_weight_mean_0410_0429_caiyun.csv"
ensemble_medians_with_weight([file5, file7, file8], [0.6, 0.4, 0.0], file24)
ensemble_medians_with_weight([file9, file11, file12], [0.5, 0.3, 0.2], file25)
ensemble_medians_with_weight([file13, file14], [0.4, 0.6], file26)
if caiyun == False:
file27 = base_path_3 + "" + end_day + "_mean_0410.csv"
file28 = base_path_3 + "" + end_day + "_median_0410.csv"
else:
file27 = base_path_3 + "" + end_day + "_mean_0410_caiyun.csv"
file28 = base_path_3 + "" + end_day + "_median_0410_caiyun.csv"
ensemble_medians([file5, file7, file8], file27, file28)
if caiyun == False:
file15 = base_path_3 + end_day + "lightgbm_mean_ensemble_6.csv"
file16 = base_path_3 + end_day + "lightgbm_median_ensemble_6.csv"
else:
file15 = base_path_3 + end_day + "lightgbm_mean_ensemble_6_caiyun.csv"
file16 = base_path_3 + end_day + "lightgbm_median_ensemble_6_caiyun.csv"
ensemble_medians([file1, file2, file3, file4, file5, file6, file7, file8], file15, file16)
if caiyun == False:
file17 = base_path_3 + end_day + "lightgbm_mean_ensemble_29_6.csv"
file18 = base_path_3 + end_day + "lightgbm_median_ensemble_29_6.csv"
else:
file17 = base_path_3 + end_day + "lightgbm_mean_ensemble_29_6_caiyun.csv"
file18 = base_path_3 + end_day + "lightgbm_median_ensemble_29_6_caiyun.csv"
ensemble_medians([file1, file2, file3, file4, file9, file10, file11, file12], file17, file18)
if caiyun == False:
file19 = base_path_3 + end_day + "lightgbm_ensemble_mean_4.csv"
file20 = base_path_3 + end_day + "lightgbm_ensemble_median_4.csv"
else:
file19 = base_path_3 + end_day + "lightgbm_ensemble_mean_4_caiyun.csv"
file20 = base_path_3 + end_day + "lightgbm_ensemble_median_4_caiyun.csv"
ensemble_medians([file5, file7, file9, file11], file19, file20)
if caiyun == False:
file21 = base_path_3 + end_day + "lightgbm_ensemble_mean_2.csv"
file22 = base_path_3 + end_day + "lightgbm_ensemble_median_2.csv"
else:
file21 = base_path_3 + end_day + "lightgbm_ensemble_mean_2_caiyun.csv"
file22 = base_path_3 + end_day + "lightgbm_ensemble_median_2_caiyun.csv"
ensemble_medians([file9, file11], file21, file22)
if caiyun == False:
file23 = base_path_3 + end_day + "lightgbm_ensemble_mean_weight.csv"
else:
file23 = base_path_3 + end_day + "lightgbm_ensemble_mean_weight_caiyun.csv"
ensemble_medians_with_weight([file12, file8], [0.3, 0.7], file23)
if caiyun == False:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + ".csv"
file24 = base_path_3 + end_day + "lightgbm_ensemble_mean__clean_4.csv"
file25 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_4.csv"
else:
file1 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "1" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file2 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "2" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file3 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "3" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file4 = base_path_3 + "" + end_day + "-lightgbm_weather_params_" + "4" + "_" + "2017_0101-2018_0429_clean" + "_" + str(
False) + "_caiyun.csv"
file24 = base_path_3 + end_day + "lightgbm_ensemble_mean__clean_4_caiyun.csv"
file25 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_4_caiyun.csv"
# ensemble_medians([file1, file2, file3, file4], file24, file25)
if caiyun == False:
file26 = base_path_3 + end_day + "lightgbm_ensemble_mean_clean_2.csv"
file27 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_2.csv"
else:
file26 = base_path_3 + end_day + "lightgbm_ensemble_mean_clean_2_caiyun.csv"
file27 = base_path_3 + end_day + "lightgbm_ensemble_median_clean_2_caiyun.csv"
# ensemble_medians([file1, file3], file26, file27)
# file19 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "1" + "_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file20 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "2" + "_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# file21 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "3" + "_" + "0301-0531_0801-0410" + "_" + str(
# False) + ".csv"
# # file22 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "4" + "_" + "0301-0531_0801-0410" + "_" + str(
# # False) + ".csv"
# file23 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "1" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file24 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "2" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file25 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "3" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file26 = base_path_3 + "" + end_day + "-lightgbm_weather_log_params_" + "4" + "_" + "2017_0101-2018_0410_less" + "_" + str(
# False) + ".csv"
# file27 = base_path_3 + "" + end_day + "-lightgbm_log_mean_ensemble.csv"
# file28 = base_path_3 + "" + end_day + "-lightgbm_log_median_ensemble.csv"
# ensemble_medians(
# [file1, file2, file3, file4, file5, file6, file7, file8, file19, file20, file21, file23, file24, file25,
# file26], file27, file28)
def get_ans_latter(end_day="2018-05-11", caiyun=False):
get_ans_1(end_day, caiyun=caiyun)
get_ans_2(end_day, caiyun=caiyun)
if caiyun == False:
file13 = base_path_3 + end_day + "_ensemble_all_1.csv"
file14 = base_path_3 + end_day + "_ensemble_all_2.csv"
file15 = base_path_3 + end_day + "_ensemble_all_zhoujie.csv"
else:
file13 = base_path_3 + end_day + "_ensemble_all_1_caiyun.csv"
file14 = base_path_3 + end_day + "_ensemble_all_2_caiyun.csv"
file15 = base_path_3 + end_day + "_ensemble_all_zhoujie_caiyun.csv"
ensemble(file13, file14, file15, a=0.4)
def ensemble_mean():
data_base = "../output/"
df1 = pd.read_csv(data_base + 'friend/sub20180502_060127.csv')
df2 = pd.read_csv(data_base + '2018-05-01-ext_with_weather_0301-0531_0801-0410_False.csv')
df3 = pd.read_csv(data_base + '2018-05-01-xgboost_weather0301-0531_0801-0410_False.csv')
df4 = pd.read_csv(data_base + 'friend/res2018-05-01.csv')
df1.columns = ['test_id', 'PM2.5_df1', 'PM10_df1', 'O3_df1']
df2.columns = ['test_id', 'PM2.5_df2', 'PM10_df2', 'O3_df2']
df3.columns = ['test_id', 'PM2.5_df3', 'PM10_df3', 'O3_df3']
df4.columns = ['test_id', 'PM2.5_df4', 'PM10_df4', 'O3_df4']
df = df1
df = pd.merge(df, df2, on='test_id', how='left')
df = pd.merge(df, df3, on='test_id', how='left')
df = | pd.merge(df, df4, on='test_id', how='left') | pandas.merge |
"""Build models to compare featurizations.
Non-parametric:
1. ROCS TanimotoCombo
2. ROCS RefTversky
Parametric (use both Tanimoto and RefTversky versions):
3. ROCS shape + ROCS color
4. ROCS shape + color components
5. ROCS shape + color components + color atom overlaps
It would be interesting to see if the color atom overlaps track well with
RefTversky.
TODO: Multiple reference molecules for MUV.
"""
import collections
import cPickle as pickle
import gflags as flags
import gzip
from oe_utils.utils import h5_utils
import logging
import numpy as np
import pandas as pd
from sklearn import cross_validation
from sklearn import ensemble
from sklearn import grid_search
from sklearn import linear_model
from sklearn import metrics
from sklearn import preprocessing
from sklearn import svm
import sys
flags.DEFINE_string('rocs_actives', None,
'ROCS overlays for actives.')
flags.DEFINE_string('rocs_inactives', None,
'ROCS overlays for inactives.')
flags.DEFINE_string('color_components_actives', None,
'Color components for actives.')
flags.DEFINE_string('color_components_inactives', None,
'Color components for inactives.')
flags.DEFINE_string('color_atom_overlaps_actives', None,
'Color atom overlaps for actives.')
flags.DEFINE_string('color_atom_overlaps_inactives', None,
'Color atom overlaps for inactives.')
flags.DEFINE_string('dataset', None, 'Dataset.')
flags.DEFINE_string('dataset_file', None, 'Filename containing datasets.')
flags.DEFINE_string('model', 'logistic', 'Model type.')
flags.DEFINE_string('prefix', None, 'Prefix for output filenames.')
flags.DEFINE_boolean('skip_failures', True, 'Skip failed datasets.')
flags.DEFINE_boolean('cycle', False, 'If True, use cyclic validation.')
flags.DEFINE_integer('n_jobs', 1, 'Number of parallel jobs.')
flags.DEFINE_string('scaling', None, 'Type of feature scaling.')
FLAGS = flags.FLAGS
logging.getLogger().setLevel(logging.INFO)
# These datasets failed OMEGA expansion of their xtal ligand.
FAILED = [
'aa2ar',
'andr',
'aofb',
'bace1',
'braf',
'dyr',
'esr2',
'fkb1a',
'kif11',
'rxra',
'sahh',
'urok',
]
def load_datasets():
datasets = []
if FLAGS.dataset is not None:
datasets.append(FLAGS.dataset)
elif FLAGS.dataset_file is not None:
with open(FLAGS.datasets) as f:
for line in f:
datasets.append(line.strip())
else:
raise ValueError('No dataset(s) specified.')
return datasets
def load_features_and_labels(dataset):
"""Load features from ROCS overlays.
Returns:
features: Dict mapping feature names to numpy arrays.
labels: Numpy array containing labels for molecules.
"""
features = {}
# ROCS.
rocs_actives = h5_utils.load(FLAGS.rocs_actives % dataset)
rocs_inactives = h5_utils.load(FLAGS.rocs_inactives % dataset)
num_actives = len(rocs_actives['shape_tanimoto'])
num_inactives = len(rocs_inactives['shape_tanimoto'])
labels = np.concatenate((np.ones(num_actives, dtype=int),
np.zeros(num_inactives, dtype=int)))
for feature in ['shape_tanimoto', 'color_tanimoto', 'shape_overlap',
'color_overlap', 'ref_self_shape', 'ref_self_color',
'fit_self_shape', 'fit_self_color', 'ref_titles',
'fit_titles']:
features[feature] = np.concatenate((rocs_actives[feature],
rocs_inactives[feature]))
features['combo_tanimoto'] = np.true_divide(
features['shape_tanimoto'] + features['color_tanimoto'], 2)
# Tversky.
features['shape_tversky'] = np.true_divide(
features['shape_overlap'],
0.95 * features['ref_self_shape'] +
0.05 * features['fit_self_shape'])
features['color_tversky'] = np.true_divide(
features['color_overlap'],
0.95 * features['ref_self_color'] +
0.05 * features['fit_self_color'])
features['combo_tversky'] = np.true_divide(
features['shape_tversky'] + features['color_tversky'], 2)
# Color components.
cc_actives = h5_utils.load(FLAGS.color_components_actives % dataset)
cc_inactives = h5_utils.load(FLAGS.color_components_inactives % dataset)
check_mol_titles(features, cc_actives, cc_inactives)
features['color_components'] = np.concatenate((
cc_actives['color_tanimoto'], cc_inactives['color_tanimoto'])).squeeze()
# Tversky.
cc_features = {}
for feature in ['color_overlap', 'ref_self_color', 'fit_self_color']:
cc_features[feature] = np.concatenate((cc_actives[feature],
cc_inactives[feature])).squeeze()
features['color_components_tversky'] = np.true_divide(
cc_features['color_overlap'],
0.95 * cc_features['ref_self_color'] +
0.05 * cc_features['fit_self_color'])
# If both molecules have no color atoms, color components will contain NaNs.
mask = np.logical_and(cc_features['ref_self_color'] == 0,
cc_features['fit_self_color'] == 0)
features['color_components_tversky'][mask] = 0
assert not np.count_nonzero(np.isnan(features['color_components_tversky']))
# Color atom overlaps.
cao_actives = h5_utils.load(FLAGS.color_atom_overlaps_actives % dataset)
cao_inactives = h5_utils.load(FLAGS.color_atom_overlaps_inactives % dataset)
check_mol_titles(features, cao_actives, cao_inactives)
features['color_atom_overlaps'] = np.concatenate((
cao_actives['color_atom_overlaps'],
cao_inactives['color_atom_overlaps'])).squeeze()
features['color_atom_overlaps_mask'] = np.concatenate((
cao_actives['mask'], cao_inactives['mask'])).squeeze()
# Remove titles from features.
for key in ['ref_titles', 'fit_titles']:
del features[key]
# Sanity checks.
for _, value in features.iteritems():
assert value.ndim in [2, 3]
assert value.shape[0] == labels.size
return features, labels
def check_mol_titles(features, actives, inactives):
"""Compare molecule titles to make sure molecule order is consistent."""
for key in ['ref_titles', 'fit_titles']:
titles = np.concatenate((actives[key], inactives[key]))
assert np.array_equal(features[key], titles)
logging.info('Molecule titles match')
def get_cv(labels):
"""Get a cross-validation iterator (NOT generator)."""
cv = cross_validation.StratifiedKFold(labels, n_folds=5, shuffle=True,
random_state=20160416)
return list(cv)
def get_model():
if FLAGS.model == 'logistic':
return linear_model.LogisticRegressionCV(class_weight='balanced',
scoring='roc_auc',
n_jobs=FLAGS.n_jobs,
max_iter=10000, verbose=1)
elif FLAGS.model == 'random_forest':
return ensemble.RandomForestClassifier(n_estimators=100,
n_jobs=FLAGS.n_jobs,
class_weight='balanced',
verbose=1)
elif FLAGS.model == 'svm':
return grid_search.GridSearchCV(
estimator=svm.SVC(kernel='rbf', gamma='auto',
class_weight='balanced'),
param_grid={'C': np.logspace(-4, 4, 10)}, scoring='roc_auc',
n_jobs=FLAGS.n_jobs, verbose=1)
else:
raise ValueError('Unrecognized model %s' % FLAGS.model)
def roc_enrichment(fpr, tpr, target_fpr):
"""Get ROC enrichment."""
assert fpr[0] == 0
assert fpr[-1] == 1
assert np.all(np.diff(fpr) >= 0)
return np.true_divide(np.interp(target_fpr, fpr, tpr), target_fpr)
def get_cv_metrics(y_true, y_pred):
"""Get 5-fold mean AUC."""
assert len(y_true) == len(y_pred)
fold_metrics = collections.defaultdict(list)
for yt, yp in zip(y_true, y_pred):
assert len(yt) == len(yp)
fold_metrics['auc'].append(metrics.roc_auc_score(yt, yp))
fpr, tpr, _ = metrics.roc_curve(yt, yp)
for x in [0.005, 0.01, 0.02, 0.05, 0.1, 0.2]:
fold_metrics['e-%g' % x].append(roc_enrichment(fpr, tpr, x))
return fold_metrics
def add_rows(features, scores, rows, dataset, index=None):
"""Record per-fold and averaged cross-validation results."""
for fold in range(len(scores['auc'])):
row = {'dataset': dataset, 'features': features, 'fold': fold}
if index is not None:
row['index'] = index
for key, values in scores.iteritems():
row[key] = values[fold]
rows.append(row)
# Averages
row = {'dataset': dataset, 'features': features, 'fold': 'all'}
if index is not None:
row['index'] = index
for key, values in scores.iteritems():
row[key] = np.mean(values)
rows.append(row)
def scale_features(features, train):
"""Scale features, using test set to learn parameters.
Returns:
Scaled copy of features.
"""
if FLAGS.scaling is None:
return features
logging.info('Scaling features with %s', FLAGS.scaling)
if FLAGS.scaling == 'max_abs':
scaler = preprocessing.MaxAbsScaler()
elif FLAGS.scaling == 'standard':
scaler = preprocessing.StandardScaler()
else:
raise ValueError('Unrecognized scaling %s' % FLAGS.scaling)
scaler.fit(features[train])
return scaler.transform(features)
def build_model(features, labels, cv, name, index=None, rocs=False):
"""Get cross-validation metrics for a single model."""
fold_y_pred = []
fold_y_true = []
assert features.ndim == 2
for fold, (train, test) in enumerate(cv):
scaled_features = scale_features(features, train)
prefix = '%s-%s-fold-%d' % (FLAGS.prefix, name, fold)
if index is not None:
prefix += '-ref-%d' % index
if rocs:
y_pred = scaled_features[test].squeeze()
else:
model = get_model()
model.fit(scaled_features[train], labels[train])
# Save trained models.
with gzip.open('%s-model.pkl.gz' % prefix, 'wb') as f:
pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
try:
y_pred = model.predict_proba(scaled_features[test])[:, 1]
except AttributeError:
y_pred = model.decision_function(scaled_features[test])
fold_y_pred.append(y_pred)
y_true = labels[test]
fold_y_true.append(y_true)
# Save model output.
assert np.array_equal(y_true.shape, y_pred.shape)
assert y_true.ndim == 1
with gzip.open('%s-output.pkl.gz' % prefix, 'wb') as f:
pickle.dump( | pd.DataFrame({'y_true': y_true, 'y_pred': y_pred}) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
from autumn.settings import PROJECTS_PATH
from autumn.settings import INPUT_DATA_PATH
from autumn.tools.utils.utils import update_timeseries
from autumn.models.covid_19.constants import COVID_BASE_DATETIME
from autumn.tools.utils.utils import create_date_index
from autumn.settings import PASSWORD_ENVAR
from getpass import getpass
from autumn.tools.utils import secrets
COVID_AU_DIRPATH = os.path.join(INPUT_DATA_PATH, "covid_au")
CHRIS_CSV = os.path.join(COVID_AU_DIRPATH, "monitoringreport.secret.csv")
COVID_DHHS_DEATH_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_deaths.secret.csv")
COVID_DHHS_CASE_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_cases.secret.csv")
COVID_DHHS_ADMN_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_admissions.secret.csv")
COVID_DHHS_VAC_CSV = os.path.join(COVID_AU_DIRPATH, "monashmodelextract_vaccination.secret.csv")
COVID_VIDA_VAC_CSV = os.path.join(COVID_AU_DIRPATH, "vida_vac.secret.csv")
COVID_VIDA_POP_CSV = os.path.join(COVID_AU_DIRPATH, "vida_pop.csv")
COVID_VAC_CSV = os.path.join(COVID_AU_DIRPATH, "vac_cov.csv")
COVID_DHHS_POSTCODE_LGA_CSV = os.path.join(COVID_AU_DIRPATH, "postcode lphu concordance.csv")
COVID_VICTORIA_TARGETS_CSV = os.path.join(
PROJECTS_PATH, "covid_19", "victoria", "victoria", "targets.secret.json"
)
# Two different mappings
LGA_TO_CLUSTER = os.path.join(
INPUT_DATA_PATH, "mobility", "LGA to Cluster mapping dictionary with proportions.csv"
)
LGA_TO_HSP = os.path.join(INPUT_DATA_PATH, "covid_au", "LGA_HSP map_v2.csv")
COVID_DHHS_MAPING = LGA_TO_HSP # This is the new mapping
TODAY = (pd.to_datetime("today") - COVID_BASE_DATETIME).days
TARGET_MAP_DHHS = {
"notifications": "cluster_cases",
"hospital_occupancy": "value_hosp",
"icu_occupancy": "value_icu",
"icu_admissions": "admittedtoicu",
"hospital_admissions": "nadmissions",
"infection_deaths": "cluster_deaths",
}
cluster_map_df = pd.read_csv(COVID_DHHS_MAPING)
map_id = cluster_map_df[["cluster_id", "cluster_name"]].drop_duplicates()
map_id["cluster_name"] = (
map_id["cluster_name"]
.str.upper()
.str.replace("&", "")
.str.replace(" ", "_")
.str.replace(" ", "_")
)
CLUSTER_MAP = dict(map_id.values)
CLUSTER_MAP[0] = "VICTORIA"
CHRIS_MAP = {
# North east metro
"St Vincents Hospital": "NORTH_EAST_METRO",
"St Vincents Private Hospital Fitzroy": "NORTH_EAST_METRO",
"Austin Hospital": "NORTH_EAST_METRO",
"Northern Hospital, The [Epping]": "NORTH_EAST_METRO",
"Warringal Private Hospital [Heidelberg]": "NORTH_EAST_METRO",
"Maroondah Hospital [East Ringwood]": "NORTH_EAST_METRO",
"Box Hill Hospital": "NORTH_EAST_METRO",
"Angliss Hospital": "NORTH_EAST_METRO",
"Epworth Eastern Hospital": "NORTH_EAST_METRO",
"Knox Private Hospital [Wantirna]": "NORTH_EAST_METRO",
# South east metro
"Bays Hospital, The [Mornington]": "SOUTH_EAST_METRO",
"Frankston Hospital": "SOUTH_EAST_METRO",
"Peninsula Private Hospital [Frankston]": "SOUTH_EAST_METRO",
"Holmesglen Private Hospital ": "SOUTH_EAST_METRO",
"Alfred, The [Prahran]": "SOUTH_EAST_METRO",
"<NAME>": "SOUTH_EAST_METRO",
"Monash Medical Centre [Clayton]": "SOUTH_EAST_METRO",
"Valley Private Hospital, The [Mulgrave]": "SOUTH_EAST_METRO",
"Monash Children's Hospital": "SOUTH_EAST_METRO",
"Dandenong Campus": "SOUTH_EAST_METRO",
"St John of God Berwick Hospital": "SOUTH_EAST_METRO",
"Casey Hospital": "SOUTH_EAST_METRO",
# West metro
"Royal Childrens Hospital [Parkville]": "WEST_METRO",
"Sunshine Hospital": "WEST_METRO",
"Epworth Freemasons": "WEST_METRO",
"Western Hospital [Footscray]": "WEST_METRO",
"Melbourne Private Hospital, The [Parkville]": "WEST_METRO",
"Royal Melbourne Hospital - City Campus": "WEST_METRO",
"Mercy Public Hospitals Inc [Werribee]": "WEST_METRO",
"Epworth Hospital [Richmond]": "WEST_METRO",
"<NAME> - Moreland Private Hospital": "WEST_METRO",
# Grampians
"Ballarat Health Services [Base Campus]": "GRAMPIANS",
"St John of God Ballarat Hospital": "GRAMPIANS",
"Wimmera Base Hospital [Horsham]": "GRAMPIANS",
# Loddon malle
"Bendigo Hospital, The": "LODDON_MALLEE",
"New Mildura Base Hospital": "LODDON_MALLEE",
"St John of God Bendigo Hospital": "LODDON_MALLEE",
"Mildura Base Public Hospital": "LODDON_MALLEE",
# Barwon south west
"St John of God Geelong Hospital": "BARWON_SOUTH_WEST",
"Geelong Hospital": "BARWON_SOUTH_WEST",
"South West Healthcare [Warrnambool]": "BARWON_SOUTH_WEST",
"Epworth Geelong": "BARWON_SOUTH_WEST",
"Hamilton Base Hospital": "BARWON_SOUTH_WEST",
# Hume
"Albury Wodonga Health - Albury": "HUME",
"Goulburn Valley Health [Shepparton]": "HUME",
"Northeast Health Wangaratta": "HUME",
# Gippsland
"Latrobe Regional Hospital [Traralgon]": "GIPPSLAND",
"Central Gippsland Health Service [Sale]": "GIPPSLAND",
}
CHRIS_HOSPITAL = "Confirmed COVID ‘+’ cases admitted to your hospital"
CHRIS_ICU = "Confirmed COVID ‘+’ cases in your ICU/HDU(s)"
fix_lga = {
"Unknown": 0,
"Kingston (C) (Vic.)": "Kingston (C)",
"Interstate": 0,
"Overseas": 0,
"Melton (C)": "Melton (S)",
"Latrobe (C) (Vic.)": "Latrobe (C)",
"Wodonga (C)": "Wodonga (RC)",
"Unincorporated Vic": 0,
}
def main():
process_zip_files()
cases = preprocess_cases()
cases = load_cases(cases)
chris_icu = load_chris_df(CHRIS_ICU)
chris_hosp = load_chris_df(CHRIS_HOSPITAL)
chris_df = chris_hosp.merge(
chris_icu, on=["date_index", "cluster_id"], how="outer", suffixes=("_hosp", "_icu")
)
chris_df = chris_df.groupby(["date_index", "cluster_id"]).sum().reset_index()
admissions = preprocess_admissions()
admissions = load_admissions(admissions)
deaths = preprocess_deaths()
deaths = load_deaths(deaths)
cases = cases.merge(chris_df, on=["date_index", "cluster_id"], how="outer")
cases = cases.merge(admissions, on=["date_index", "cluster_id"], how="outer")
cases = cases.merge(deaths, on=["date_index", "cluster_id"], how="outer")
cases = cases[cases["date_index"] < TODAY]
password = os.environ.get(PASSWORD_ENVAR, "")
if not password:
password = getpass(prompt="Enter the encryption password:")
for cluster in CLUSTER_MAP.values():
if cluster == "VICTORIA":
continue
cluster_secrets_file = os.path.join(
PROJECTS_PATH, "covid_19", "victoria", cluster.lower(), "targets.secret.json"
)
cluster_df = cases.loc[cases.cluster_id == cluster]
update_timeseries(TARGET_MAP_DHHS, cluster_df, cluster_secrets_file, password)
vic_df = cases.groupby("date_index").sum(skipna=True).reset_index()
update_timeseries(TARGET_MAP_DHHS, vic_df, COVID_VICTORIA_TARGETS_CSV, password)
# True vaccination numbers
df = preprocess_vac()
df = create_vac_coverage(df)
df.to_csv(COVID_VAC_CSV, index=False)
# Vida's vaccination model
df = fetch_vac_model()
update_vida_pop(df)
df = preprocess_vac_model(df)
df.to_csv(COVID_VIDA_VAC_CSV, index=False)
secrets.write(COVID_VIDA_VAC_CSV, password)
def merge_with_mapping_df(df, left_col_name):
df = df.merge(cluster_map_df, left_on=[left_col_name], right_on=["lga_name"], how="left")
df.loc[df.cluster_id.isna(), ["cluster_id", "cluster_name", "proportion"]] = [0, "VIC", 1]
df.cluster_id.replace(CLUSTER_MAP, inplace=True)
return df
def preprocess_csv(csv_file, col_name):
df = | pd.read_csv(csv_file) | pandas.read_csv |
#! /usr/bin/env python
"""
Script to observe sentence and token counts by language
"""
import os
import json
from argparse import ArgumentParser
from collections import Counter, defaultdict
from multiprocessing import Pool
from typing import Generator
import pandas as pd
MINIMUM_SENTENCE_THRESHOLD = 10
MINIMUM_TOKEN_THRESHOLD = 10
def count_sentences_tokens(path: str) -> tuple[str, int, int]:
"""Retrieves language, number of sentences, and number of tokens for each file"""
print(f"Extracting data from {path}")
with open(path) as file:
data = json.load(file)
language = data.get("site_language")
sentences = data.get("sentences")
n_sentences = 0
if sentences:
n_sentences = len(sentences)
tokens = data.get("tokens")
n_tokens = 0
if tokens:
n_tokens = sum([len(sentence_tokens) for sentence in tokens for sentence_tokens in sentence])
return language, n_sentences, n_tokens
class SentenceTokenCounter:
def __init__(self) -> None:
# Distribution of document counts by language and content type
self.doc_sentence_token_distribution: defaultdict[str, Counter[str]] = defaultdict(
Counter
)
def count(self, language: str, n_sentences: int, n_tokens:int) -> None:
"""Counts total sentences and tokens by language"""
print(f"Counting {language}")
self.doc_sentence_token_distribution[language]['Document'] += 1
self.doc_sentence_token_distribution[language]['Sentence'] += n_sentences
self.doc_sentence_token_distribution[language]['Token'] += n_tokens
def histogram(self) -> None:
"""Creates histogram from sentence and token count distributions by language"""
df = | pd.DataFrame.from_dict(self.doc_sentence_token_distribution, orient="index") | pandas.DataFrame.from_dict |
import time
import pandas as pd
import os
from hungarian import *
from collections import deque
import multiprocessing as mp
import numpy as np
import sys
sys.setrecursionlimit(1000000)
sys.getrecursionlimit()
class TreeNode: # This is the node for tree serch
def __init__(self, builidng_id, location_id, tree_vec, parent):
# self.tree_node_id = None
self.building_id = int(builidng_id)
self.location_id = int(location_id)
self.parent = parent
self.lower_value = None
self.upper_value = None
self.tree_vec = tree_vec # tree vector indicates at the tree nodes the locations are assigned to which building
self.assignment_mat_1 = None
self.assignment_mat_2 = None
def GLB_(assignment_mat):
location_ind, building_ind = linear_sum_assignment(assignment_mat)
value = assignment_mat[location_ind, building_ind].sum() # + M # As this is the symmetric case, the i,k of each branch must be chosen in its corresponding l,j
return {'building_ind': building_ind, 'location_ind': location_ind, 'value': value}
branch_list = []
GLB_cost_mat = np.zeros((1,1))
def init(branch_list_, GLB_cost_mat_):
global branch_list
global GLB_cost_mat
branch_list = branch_list_
GLB_cost_mat = GLB_cost_mat_
def Branch_update(multiplier_mat, tmp_value):
for branch in branch_list:
branch.assignment_mat = branch.init_assignment_mat - multiplier_mat
solution = GLB_(branch.assignment_mat)
branch.location_ind = solution['location_ind']
branch.building_ind = solution['building_ind']
branch.lower_value = solution['value'] + tmp_value
GLB_cost_mat[branch.i_ind - 1][branch.k_ind - 1] = branch.lower_value
class BAB:
def __init__(self, instance, glbsolver, args, cwd):
self.instance = instance
self.LB, self.UB = glbsolver.LB, glbsolver.UB
self.args = args
self.bf_lower_bound_list = [self.LB]
self.bf_upper_bound_list = [self.UB]
self.lb_lower_bound_list = [self.LB]
self.lb_upper_bound_list = [self.UB]
self.tree_node_list=[0]
self.current_layer_nodes = []
self.branch_iter = 0
self.best_solution_1 = None
self.best_solution_2 = None
self.random_i1_list = []
self.random_i2_list = []
self.nb_local = 0
# for quick access
self.target_relative_gap = args['target_relative_gap']
self.max_branch_iters = args['max_branch_iters']
self.M = args['M']
self.time_limit = args['time_limit']
self.start_time_breadth = 0.0
self.valid_time_breadth = 0.0
self.start_time_lb = 0.0
self.valid_time_lb = 0.0
self.nb_of_orig_building = instance.nb_of_orig_building
self.nb_of_orig_location = instance.nb_of_orig_location
self.nb_of_dest_building = instance.nb_of_dest_building
self.nb_of_dest_location = instance.nb_of_dest_location
self.flow_mat = instance.flow_mat
self.trans_cost_mat = instance.trans_cost_mat
self.build_cost_orig_mat = instance.build_cost_orig_mat
self.build_cost_dest_mat = instance.build_cost_dest_mat
self.pathfile=cwd
def local_search(self, tree_node):
assignment_mat_1, assignment_mat_2 = tree_node.assignment_mat_1, tree_node.assignment_mat_2
UpperBound = np.sum(self.flow_mat * np.matmul(np.matmul(assignment_mat_1, self.trans_cost_mat), assignment_mat_2)) + \
np.sum(self.build_cost_orig_mat * assignment_mat_1) + np.sum(self.build_cost_dest_mat * assignment_mat_2)
tree_node.upper_value = UpperBound
return
local_search_list = deque()
local_search_list.append(assignment_mat_1)
Flag_Swap = 1
while (len(local_search_list) != 0 and Flag_Swap <= 10000):
temp_assign_mat = local_search_list[0]
assignment_mat_tmp = local_search_list[0]
for i in range(self.nb_local):
temp_assign_mat = local_search_list[0]
if self.random_i1_list[i] != self.random_i2_list[i]:
temp_assign_mat[[self.random_i1_list[i], self.random_i2_list[i]], :] = temp_assign_mat[[self.random_i2_list[i], self.random_i1_list[i]],:]
tmp_UB = np.sum(self.flow_mat * np.matmul(np.matmul(temp_assign_mat, self.trans_cost_mat), assignment_mat_2)) + \
np.sum(self.build_cost_orig_mat * temp_assign_mat) + np.sum(self.build_cost_dest_mat * assignment_mat_2)
if tmp_UB < UpperBound:
# print(UpperBound)
UpperBound = tmp_UB
assignment_mat_tmp = temp_assign_mat
local_search_list.append(assignment_mat_tmp)
local_search_list.popleft()
Flag_Swap = Flag_Swap + 1
assignment_mat_1 = assignment_mat_tmp
local_search_list = deque()
local_search_list.append(assignment_mat_2)
while (len(local_search_list) != 0 and Flag_Swap <= 20000):
temp_assign_mat = local_search_list[0]
assignment_mat_tmp = local_search_list[0]
for i in range(self.nb_local):
temp_assign_mat = local_search_list[0]
if self.random_i1_list[i] != self.random_i2_list[i]:
temp_assign_mat[[self.random_i1_list[i], self.random_i2_list[i]], :] = temp_assign_mat[[self.random_i2_list[i], self.random_i1_list[i]],:]
tmp_UB = np.sum(self.flow_mat * np.matmul(np.matmul(assignment_mat_1, self.trans_cost_mat),temp_assign_mat.T)) + \
np.sum(self.build_cost_orig_mat * assignment_mat_1) + np.sum(self.build_cost_dest_mat * temp_assign_mat)
if tmp_UB < UpperBound:
UpperBound = tmp_UB
assignment_mat_tmp = temp_assign_mat
local_search_list.append(assignment_mat_tmp)
local_search_list.popleft()
Flag_Swap += 1
assignment_mat_2 = assignment_mat_tmp
tree_node.upper_value = UpperBound
tree_node.assignment_mat_1, tree_node.assignment_mat_2 = assignment_mat_1, assignment_mat_2
def solveNode(self, live_node):
tree_nodes = []
live_building_id = int(live_node.building_id + 1)
for i in range(self.nb_of_dest_location):
tmp_tree_vec = live_node.tree_vec.copy() # should copy, not use ip address
if tmp_tree_vec[i] == -1: # and tmp_tree_vec.count(-1) > 1 # todo: change tree_vec to dict
tmp_tree_vec[i] = live_building_id
tree_node = TreeNode(live_building_id, i, tmp_tree_vec, live_node)
multiplier_mat = np.zeros([self.nb_of_dest_location, self.nb_of_dest_building])
tmp_value = 0
for k in range(self.nb_of_dest_building):
if tree_node.tree_vec[k] != -1:
l_ind = k
j_ind = tree_node.tree_vec[k]
multiplier_mat[l_ind, j_ind] = self.M
tmp_value += self.M
Branch_update(multiplier_mat, tmp_value)
lower_solution_1 = Hungarian_1(GLB_cost_mat + self.build_cost_orig_mat)
assignment_mat_1 = np.zeros([self.nb_of_orig_building, self.nb_of_orig_location])
assignment_mat_1[lower_solution_1['building_ind'], lower_solution_1['location_ind']] = 1
lower_solution_2 = Hungarian_2(self.build_cost_dest_mat - multiplier_mat)
assignment_mat_2 = np.zeros([self.nb_of_dest_location, self.nb_of_dest_building])
assignment_mat_2[lower_solution_2['location_ind'], lower_solution_2['building_ind']] = 1
tree_node.lower_value = lower_solution_1['value'] + lower_solution_2['value'] + tmp_value
tree_node.assignment_mat_1, tree_node.assignment_mat_2 = assignment_mat_1, assignment_mat_2
self.local_search(tree_node)
tree_nodes.append(tree_node)
return tree_nodes
def solveNodes(self, nodes):
child_node_list = []
lb, ub = np.inf, self.UB
best_node = None
for live_node in nodes:
if time.time() > self.valid_time_breadth: break
tree_nodes = self.solveNode(live_node)
for tree_node in tree_nodes:
if tree_node.upper_value < ub:
ub = tree_node.upper_value
best_node = tree_node
# as still two locations are not assigned, the solution is an lower bound solution
if tree_node.tree_vec.count(-1) > 1:
if tree_node.lower_value <= ub:
if tree_node.lower_value < lb: lb = tree_node.lower_value
child_node_list.append(tree_node)
return child_node_list, lb, ub, best_node
def createRoot(self):
tree_vec = [-1] * self.nb_of_dest_building
root = TreeNode(-1, -1, tree_vec, -1) # generate the root tree_node
root.lower_value = self.LB
root.upper_value = self.UB
return root
def checkStopCondition(self):
GAP = (self.UB - self.LB) / self.UB
print(f'**BNB-BF iter {self.branch_iter}: Best Lower bound = ', self.LB)
print(f'**BNB-BF iter {self.branch_iter}: Best Upper bound = ', self.UB)
print(f'**BNB-BF iter {self.branch_iter}: GAP = ', GAP)
self.bf_lower_bound_list.append(self.LB)
self.bf_upper_bound_list.append(self.UB)
if GAP <= self.target_relative_gap:
print('**BNB-BF target relative gap reached')
return True
if self.branch_iter >= self.max_branch_iters:
print('**BNB-BF max branch iters reached')
return True
if time.time() >= self.valid_time_breadth:
print('**BNB-BF time limit reached')
return True
def createRandomList(self):
for i in range(self.nb_of_orig_building):
for j in range(self.nb_of_orig_building):
if i != j:
self.random_i1_list.append(i)
self.random_i2_list.append(j)
self.nb_local = len(self.random_i1_list)
def solve_breadth(self, solver_status):
self.createRandomList()
if self.args['threads'] == -1:
cores = mp.cpu_count()
else:
cores = self.args['threads']
p = mp.Pool(processes=cores, initializer=init, initargs=(self.instance.branch_list,self.instance.GLB_cost_mat))
self.start_time_breadth = time.time()
self.valid_time_breadth = self.start_time_breadth + self.time_limit
root = self.createRoot()
task_list = [[root]] + [[] for _ in range(cores-1)]
number_of_nodes = 1
while True:
# new iter
self.branch_iter += 1
print(f'**BNB-BF iter {self.branch_iter}: nodes {number_of_nodes}')
self.tree_node_list.append(number_of_nodes)
# solve nodes
result_list = p.map(self.solveNodes, task_list)
# update lb and ub
result_with_new_lb = min(result_list, key=lambda x: x[1])
new_lb = result_with_new_lb[1]
if self.LB < new_lb < np.inf:
self.LB = new_lb
result_with_new_ub = min(result_list, key=lambda x: x[2])
new_ub = result_with_new_ub[2]
if new_ub < self.UB:
self.UB = new_ub
self.best_solution_1 = result_with_new_ub[3].assignment_mat_1
self.best_solution_2 = result_with_new_ub[3].assignment_mat_2
stop_flag = self.checkStopCondition()
if stop_flag: break
# update task_list
all_node_list = []
for result in result_list:
for node in result[0]:
if node.lower_value < self.UB:
all_node_list.append(node)
number_of_nodes = len(all_node_list)
if number_of_nodes == 0:
print('**BNB-BF branch and bound complete')
solver_status.value = 1
break
ave_load = int(np.ceil(number_of_nodes / cores))
task_list = []
for i in range(cores-1): task_list.append(all_node_list[i*ave_load:(i+1)*ave_load])
task_list.append(all_node_list[(i+1)*ave_load:])
# time
t1 = time.time()
print(f'**BNB-BF iter {self.branch_iter}: elapsed time {t1 - self.start_time_breadth}')
print(f'**BNB-BF best solution1 {self.best_solution_1}, best solution2 {self.best_solution_2}')
solution_1_df=pd.DataFrame(self.best_solution_1)
solution_2_df=pd.DataFrame(self.best_solution_2)
lb_ub_dict={'LB':self.bf_lower_bound_list,'UB':self.bf_upper_bound_list,'tree_node': self.tree_node_list}
lb_ub_df=pd.DataFrame(lb_ub_dict)
solution_1_df.to_csv(os.path.join(self.pathfile,'breadth_first_assignment_1.csv'))
solution_2_df.to_csv(os.path.join(self.pathfile,'breadth_first_assignment_2.csv'))
lb_ub_df.to_csv(os.path.join(self.pathfile,'breadth_first_lb_ub_iter.csv'))
def solve_lb(self, solver_status):
from queue import PriorityQueue
import copy
global branch_list
global GLB_cost_mat
branch_list = copy.deepcopy(self.instance.branch_list)
GLB_cost_mat = copy.deepcopy(self.instance.GLB_cost_mat)
self.start_time_lb = time.time()
self.valid_time_lb = self.start_time_lb + self.time_limit
lb, ub = self.LB, self.UB
best_solution_1, best_solution_2 = None, None
pq = PriorityQueue()
root = self.createRoot()
node_no = 0
pq.put((root.lower_value, node_no, root))
node_no += 1
while (pq.queue):
if solver_status.value == 1:
print('--BNB-LB stopped as BNB-BF has completed')
break
if time.time() > self.valid_time_lb:
print('--BNB-LB time limit reached')
break
lower_value, _, live_node = pq.get()
if lower_value > ub:
print('--BNB-LB branch and bound complete')
break
lb = lower_value
tree_nodes = self.solveNode(live_node)
for tree_node in tree_nodes:
if tree_node.upper_value < ub:
ub = tree_node.upper_value
best_solution_1 = tree_node.assignment_mat_1
best_solution_2 = tree_node.assignment_mat_2
# as still two locations are not assigned, the solution is an lower bound solution
if tree_node.tree_vec.count(-1) > 1:
if tree_node.lower_value <= ub:
pq.put((tree_node.lower_value, node_no, tree_node))
node_no += 1
self.lb_lower_bound_list.append(lb)
self.lb_upper_bound_list.append(ub)
gap = (ub - lb) / ub
print(f'--BNB-LB: lb = {lb}, ub = {ub}, gap = {gap}')
print(f'--BNB-LB best solution1 {best_solution_1}, best solution2 {best_solution_2}')
best_solution_1_df=pd.DataFrame(best_solution_1)
best_solution_2_df=pd.DataFrame(best_solution_2)
lb_ub_dict={'LB':self.lb_lower_bound_list,'UB':self.lb_upper_bound_list}
lb_ub_df= | pd.DataFrame(lb_ub_dict) | pandas.DataFrame |
import requests
import re
import pandas as pd
import time
airbnb = pd.read_csv('../mergeScore/airbnb.csv')
school = {'Longitude':-2.233771, 'Latitude':53.46679}
result = []
def getFloat(text):
num = re.findall('\d*\.\d*公里', text)
try:
return num[0][:-2]
except Exception:
return -1
for index, row in airbnb.iterrows():
kw = {'long1': school['Longitude'],
'lat1': school['Latitude'],
'long2': row['Longitude'],
'lat2': row['Latitude']}
response = requests.get('http://tool.yovisun.com/longlat/index.php?', params=kw)
result.append({'distance': getFloat(response.text)})
time.sleep(0.05)
df = | pd.DataFrame(result) | pandas.DataFrame |
"""
econ_platform_core - Glue code for a unified work environment.
The center-piece of the package is *fetch()* a function which dynamically loads a pandas time series (Series object)
from any supported provider or database interface. The fetch command will determine whether the series exists in the
local database, or whether to query the external provider. If it already exists, the platform will decide whether it
is time to seek a refresh. (NOTE: That update protocol is not implemented at the time of writing.)
For a user, this is all done "under the hood" within a single line of code. The same plotting routines will always
work, even if the computer is cut off from external providers and the user has to grab locally archived data.
Normally, users will import econ_platform and call init_econ_platform(), which will
initialise this package.
Importing this file alone is supposed to have minimal side effects. However, configuration information is not
loaded, and so most functions will crash. This means that end uses will almost always want to call the initialisation
function (or import econ_platform.start, which is a script that initialises the platform).
This package is supposed to only depend on standard Python libraries and *pandas*. Anything else (including
things like matplotlib) are pushed into econ_platform, where code is meant to be loaded as extensions. If an extension
cannot be loaded (missing API packages, for example), econ_platform will still load up, it will just report that
an extension load failed.
Since sqlite3 is in the standard Python libraries, base SQL functionality will be implemented here.
(The design question is whether the code in here should migrate to other files. The issue is avoiding circular imports.
It might be possible, but I might need to redesign some classes, and create more "do nothing" base classes that just
offer an interface to users. Not a huge priority, and I should make sure I have more comprehensive test coverage
before trying.)
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas
import traceback
import webbrowser
import warnings
import datetime
# As a convenience, use the "logging.info" function as log.
from logging import info as log, debug as log_debug, warning as log_warning, error as log_error
import econ_platform_core.configuration
from econ_platform_core.entity_and_errors import PlatformEntity, PlatformError, TickerNotFoundError
from econ_platform_core import utils as utils
import econ_platform_core.extensions
from econ_platform_core.tickers import TickerFull, TickerDataType, TickerFetch, TickerLocal, TickerProviderCode
from econ_platform_core.series_metadata import SeriesMetadata
import econ_platform_core.tickers
# Get the logging information. Users can either programmatically change the LogInfo.LogDirectory or
# use a config file before calling start_log()
from econ_platform_core.update_protocols import UpdateProtocolManager, NoUpdateProtocol
LogInfo = utils.PlatformLogger()
def start_log(fname=None): # pragma: nocover
"""
Call this function if you want a log. By default, the log name is based on the base Python script name
(sys.argv[0]), and goes into the default directory (LonInfo.LogDirectory).
:param fname: str
:return:
"""
global LogInfo
LogInfo.StartLog(fname)
PlatformConfiguration = econ_platform_core.configuration.ConfigParserWrapper()
ExtensionList = econ_platform_core.extensions.ExtensionManager()
class DatabaseManager(PlatformEntity):
"""
This is the base class for Database Managers.
Note: Only support full series replacement for now.
SetsLastUpdateAutomatically: Does the database update the last_update/last_refresh fields automatically on
a write> If False, the UpdateProtocol will (supposed to...) call SetLastUpdate() after a Write()
"""
def __init__(self, name='Virtual Object'):
super().__init__()
self.Name = name
# This is overridden by the AdvancedDatabase constructor.
# By extension, everything derived from this base class (like the TEXT database is "not advanced."
self.IsAdvanced = False
self.Code = ''
self.ReplaceOnly = True
self.SetsLastUpdateAutomatically = False
def Find(self, ticker):
"""
Can we find the ticker on the database? Default behaviour is generally adequate.
:param ticker: str
:return: SeriesMetadata
"""
warnings.warn('Find is being replaced by GetMeta()', DeprecationWarning)
return self.GetMeta(ticker)
# ticker_obj = econ_platform_core.tickers.map_string_to_ticker(ticker)
# if type(ticker_obj) is TickerLocal:
# return self._FindLocal(ticker_obj)
# if type(ticker_obj) is TickerDataType:
# return self._FindDataType(ticker_obj)
# meta = SeriesMetadata()
# meta.ticker_local = None
# meta.ticker_full = ticker_obj
# meta.series_provider_code, meta.ticker_query = ticker_obj.SplitTicker()
# meta.Exists = self.Exists(meta)
# # Provider-specific meta data data not supported yet.
# return meta
def _FindLocal(self, local_ticker): # pragma: nocover
"""
Databases that support local tickers should override this method.
:param local_ticker: TickerLocal
:return:
"""
raise NotImplementedError('This database does not support local tickers')
def _FindDataType(self, datatype_ticker): # pragma: nocover
"""
:param datatype_ticker: TickerDataType
:return:
"""
raise NotImplementedError('This database does not support data type tickers')
def Exists(self, ticker): # pragma: nocover
"""
This method is only really needed by the non-complex databases.
:param ticker: str
:return: bool
"""
raise NotImplementedError()
def Retrieve(self, series_meta): # pragma: nocover
"""
:param series_meta: SeriesMetadata
:return: pandas.Series
"""
raise NotImplementedError()
def GetMeta(self, ticker_str):
ticker_obj = econ_platform_core.tickers.map_string_to_ticker(ticker_str)
if type(ticker_obj) is TickerLocal:
return self._FindLocal(ticker_obj)
if type(ticker_obj) is TickerDataType:
return self._FindDataType(ticker_obj)
meta = SeriesMetadata()
meta.ticker_local = None
meta.ticker_full = ticker_obj
meta.series_provider_code, meta.ticker_query = ticker_obj.SplitTicker()
meta.Exists = self.Exists(meta)
# Provider-specific meta data data not supported yet.
return meta
def GetLastRefresh(self, ticker_full):
"""
Get the last refresh datetime for a ticker.
Subclasses must implement this method, as it is the minimal information needed for an update
strategy.
:param ticker_full: TickerFull
:return:
"""
raise NotImplementedError()
def SetLastRefresh(self, ticker_full, time_stamp=None):
"""
Set the timestamp of the last refresh. Note that this will be called if an external provider is polled
and there is no new data.
If time_stamp is None, the manager should set to the current time.
This needs to be implemented by all database managers. The simplest manager (TEXT) will just touch the file
to reset the time stamp.
If the database manager is told to write the series, it is up to the database manager to set the LastRefresh
during the write operation. It is left to the manager, as it is likely that it will be more efficient to set the
status during the write operation.
:param ticker_full: TickerFull
:param time_stamp: datatime.datetime
:return:
"""
raise NotImplementedError()
def SetLastUpdate(self, ticker_full, time_stamp=None):
"""
Sets the last_update *and* last_refresh fields. If time_stamp is None, uses current time.
Called after a Write() by the UpdateProtocol, unless self.SetsLastUpdateAutomatically is True.
:param ticker_full: TickerFull
:param time_stamp: datatime.date
:return:
"""
raise NotImplementedError()
def RetrieveWithMeta(self, full_ticker):
"""
Retrieve both the meta data and the series. Have a single method in case there is
an optimisation for the database to do both queries at once.
Since we normally do not want the meta data at the same time, have the usual workflow to just
use the Retrieve() interface.
:param full_ticker: str
:return: list
"""
meta = self.GetMeta(full_ticker)
meta.AssertValid()
ser = self.Retrieve(meta)
return ser, meta
def Delete(self, series_meta): # pragma: nocover
"""
Delete a series.
:param series_meta: SeriesMetadata
:return:
"""
raise NotImplementedError()
def Write(self, ser, series_meta, overwrite=True): # pragma: nocover
"""
:param ser: pandas.Series
:param series_meta: SeriesMetadata
:param overwrite: bool
:return:
"""
raise NotImplementedError()
class DatabaseList(PlatformEntity):
"""
List of all Database managers. Developers can push their own DatabaseManagers into the global object.
"""
def __init__(self):
super().__init__()
self.DatabaseDict = {}
def Initialise(self):
pass
def AddDatabase(self, wrapper, code=None):
"""
Add a database. If the code is not supplied, uses the code based on the PlatformConfiguration
setting (normal use). Only need to supply the code for special cases, like having extra SQLite
database files for testing.
:param wrapper: DatabaseManager
:param code: str
:return:
"""
if code is None:
code = PlatformConfiguration['DatabaseList'][wrapper.Name]
wrapper.Code = code
self.DatabaseDict[wrapper.Code] = wrapper
def __getitem__(self, item):
"""
Access method.
:param item: str
:return: DatabaseManager
"""
if item.upper() == 'DEFAULT':
item = PlatformConfiguration['Database']['Default']
if item == 'SQL':
# If the DEFAULT is SQL, will be re-mapped twice!
item = PlatformConfiguration['Database']['SQL']
return self.DatabaseDict[item]
def TransferSeries(self, full_ticker, source, dest):
"""
Transfer a series from one database to another.
Useful for migrations and testing.
:param full_ticker: str
:param source: str
:param dest: str
:return:
"""
source_manager = self[source]
dest_manager = self[dest]
ser, meta = source_manager.RetrieveWithMeta(full_ticker)
# The meta information should be the same, except the Exists flag...
meta.Exists = dest_manager.Exists(full_ticker)
dest_manager.Write(ser, meta)
Databases = DatabaseList()
class ProviderWrapper(PlatformEntity):
"""
Data provider class. Note that we call them "providers" and not "sources" since the source is the
agency in the real world that calculates the data. The provider and the source can be the same - for example,
if we get Eurostat data from Eurostat itself. However, we can get Eurostat data from DB.nomics.
"""
Name: str
def __init__(self, name='VirtualObject', default_code=None):
super().__init__()
self.Name = name
self.ProviderCode = ''
self.IsExternal = True
# Are these data only pushed to the database? If so, never attempt to update from within a fetch.
self.PushOnly = False
# Sometimes we fetch an entire table as a side effect of fetching a series.
# A provider can mark this possibility by setting TableWasFetched to True, and
# loading up TableSeries, TableMeta with series/meta-data. The fetch() function will
# store the data.
self.TableWasFetched = False
self.TableSeries = {}
self.TableMeta = {}
self.WebPage = ''
if not name == 'VirtualObject':
try:
self.ProviderCode = PlatformConfiguration['ProviderList'][name]
except KeyError:
if default_code is None:
raise PlatformError(
'Must set the provider code in the config file under [ProviderList] for provider {0}'.format(name))
else:
self.ProviderCode = default_code
def fetch(self, series_meta): # pragma: nocover
"""
Fetch a series from a provider.
:param series_meta: SeriesMetadata
:return: pandas.Series
"""
raise NotImplementedError
def GetSeriesURL(self, series_meta):
"""
Get the URL for a series, if possible. Otherwise, returns the provider webpage.
:param series_meta: SeriesMetadata
:return: str
"""
try:
return self._GetSeriesUrlImplementation(series_meta)
except NotImplementedError:
return self.WebPage
def _GetSeriesUrlImplementation(self, series_meta):
"""
Implements the actual fetching. If a NotImplementedError is thrown, the object will return the
Provider.WebPage.
"""
raise NotImplementedError()
class ProviderList(PlatformEntity):
"""
List of all provider wrappers. Developers can push their own DatabaseManagers into the global object.
Keep the "User" provider also saved as a data member, since it will be accessed by code hooking into it.
"""
def __init__(self):
super().__init__()
self.ProviderDict = {}
self.EchoAccess = False
self.UserProvider = None
self.PushOnlyProvider = None
def Initialise(self):
self.EchoAccess = PlatformConfiguration['ProviderOptions'].getboolean('echo_access')
def AddProvider(self, obj):
"""
Add a provider
:param obj: ProviderWrapper
:return:
"""
self.ProviderDict[obj.ProviderCode] = obj
if obj.Name == 'User':
# Tuck the "User" provider into a known location.
self.UserProvider = obj
if obj.Name == 'PushOnly':
self.PushOnlyProvider = obj
def __getitem__(self, item):
"""
Access method
:param item: str
:return: ProviderWrapper
"""
# Need to convert to string since we are likely passed a ticker.
return self.ProviderDict[str(item)]
Providers = ProviderList()
UpdateProtocolList = UpdateProtocolManager()
def fetch(ticker, database='Default', dropna=True):
"""
Fetch a series from database; may create series and/or update as needed.
(May create a "fetchmany()" for fancier fetches that take a slice of the database.)
:param ticker: str
:param database: str
:param dropna: bool
:return: pandas.Series
"""
# Default handling is inide the database manager...
# if database.lower() == 'default':
# database = PlatformConfiguration["Database"]["Default"]
database_manager: DatabaseManager = Databases[database]
series_meta = database_manager.GetMeta(ticker)
series_meta.AssertValid()
provider_code = series_meta.series_provider_code
# noinspection PyPep8
try:
provider_manager: ProviderWrapper = Providers[provider_code]
except:
raise KeyError('Unknown provider_code: ' + str(provider_code)) from None
if series_meta.Exists:
# Return what is on the database.
global UpdateProtocolList
# TODO: Allow for choice of protocol.
return UpdateProtocolList["DEFAULT"].Update(ticker, series_meta, provider_manager, database_manager)
else:
return UpdateProtocolList["DEFAULT"].FetchAndWrite(ticker, series_meta, provider_manager, database_manager)
# if provider_manager.IsExternal:
# _hook_fetch_external(provider_manager, ticker)
# if provider_manager.PushOnly:
# raise PlatformError(
# 'Series {0} does not exist on {1}. Its ticker indicates that it is push-only series.'.format(
# ticker, database)) from None
# log_debug('Fetching %s', ticker)
# # Force this to False, so that ProviderManager extension writers do not need to
# # remember to do so.
# provider_manager.TableWasFetched = False
# if Providers.EchoAccess:
# print('Going to {0} to fetch {1}'.format(provider_manager.Name, ticker))
# try:
# out = provider_manager.fetch(series_meta)
# except TickerNotFoundError:
# # If the table was fetched, write the table, even if the specific series was not there...
# if provider_manager.TableWasFetched:
# for k in provider_manager.TableSeries:
# t_ser = provider_manager.TableSeries[k]
# t_meta = provider_manager.TableMeta[k]
# # Don't write the single series again..
# if str(t_meta.ticker_full) == str(series_meta.ticker_full):
# continue
# database_manager.Write(t_ser, t_meta)
# if not database_manager.SetsLastUpdateAutomatically:
# database_manager.SetLastUpdate(t_meta.ticker_full)
# raise
# if type(out) is not tuple:
# ser = out
# else:
# ser, series_meta = out
# if dropna:
# ser = ser.dropna()
# log('Writing %s', ticker)
# database_manager.Write(ser, series_meta)
# # Having this logic repeated three times is silly, but I want to force subclasses to
# # implement SetLastUpdate(), as otherwise update protocols will break.
# if not database_manager.SetsLastUpdateAutomatically:
# database_manager.SetLastUpdate(series_meta.ticker_full)
# if provider_manager.TableWasFetched:
# for k in provider_manager.TableSeries:
# t_ser = provider_manager.TableSeries[k]
# t_meta = provider_manager.TableMeta[k]
# # Don't write the single series again..
# if str(t_meta.ticker_full) == str(series_meta.ticker_full):
# continue
# database_manager.Write(t_ser, t_meta)
# if not database_manager.SetsLastUpdateAutomatically:
# database_manager.SetLastUpdate(t_meta.ticker_full)
#
# return ser
def fetch_df(ticker, database='Default', dropna=True):
"""
Return a DataFrame. Used by R.
As a convenience for R users, dumps the last error to the log file.
:param ticker: str
:param database: str
:param dropna: bool
:return: pandas.DataFrame
"""
# noinspection PyPep8
try:
ser = fetch(ticker, database, dropna)
df = pandas.DataFrame({'series_dates': ser.index, 'series_values': ser.values})
return df
except:
log_last_error()
raise
def reset_update_time(ticker, database='Default', time_stamp=None):
"""
Convenience function to set the update/refresh time back in the past, forcing an update of the series.
If time_stamp is left as None, set to datetime.datetime(1980, 1, 1).
:param ticker: str
:param database: str
:return: None
"""
database_manager: DatabaseManager = Databases[database]
series_meta = database_manager.GetMeta(ticker)
series_meta.AssertValid()
if time_stamp is None:
time_stamp=datetime.datetime(1980, 1, 1)
database_manager.SetLastRefresh(ticker_full=series_meta.ticker_full, time_stamp=time_stamp)
def log_extension_status(): # pragma: nocover
"""
After the fact logging of what extensions were loaded. Useful for R
:return:
"""
log_debug('Successful Extension Initialisation')
for e in ExtensionList.LoadedExtensions:
log_debug(e)
if len(ExtensionList.DecoratedFailedExtensions) == 0:
log_debug('No extension loads failed.')
return
log_warning('Failed Extension Initialisation')
for f, warn in ExtensionList.DecoratedFailedExtensions:
log_warning('Extension_File\t{0}\tMessage:\t{1}'.format(f, warn))
# noinspection PyUnusedLocal
def _hook_fetch_external(provider_manager, ticker):
"""
Hook for customisation when external reference is hit.
:param provider_manager: ProviderManager
:param ticker: str
:return: None
"""
pass
def log_last_error(just_info=False): # pragma: nocover
"""
Convenience function to log the last error.
If just_info, just logs as "INFO", so we do not get an error...
:type just_info: bool
:return:
"""
msg = traceback.format_exc()
if just_info:
log(msg)
else:
log_error(msg)
def get_provider_url(provider_code, open_browser=True):
"""
Get the URL of the provider's data website, if it exists (specified).
Returns None if not defined.
Will open a browser if asked.
:param provider_code: str
:return: str
"""
provider_code = str(provider_code)
try:
url = Providers[provider_code].WebPage
except KeyError:
raise PlatformError('Provider code not defined: {0}'.format(provider_code))
if url is None or len(url) == 0:
return None
if open_browser:
webbrowser.open(url, new=2)
return url
def get_series_URL(ticker, database='Default', open_browser=True):
database_manager: DatabaseManager = Databases[database]
series_meta = database_manager.Find(ticker)
provider_code = str(series_meta.series_provider_code)
try:
url = Providers[provider_code].GetSeriesURL(series_meta)
except KeyError:
raise PlatformError('Provider code not defined: {0}'.format(provider_code)) from None
if url is None or len(url) == 0:
return None
if open_browser:
webbrowser.open(url, new=2)
return url
def fetch_metadata(ticker_str, database='SQL'):
"""
Given a ticker string, find the series metadata, returned as a pandas.DataFrame.
TODO: If does not exist, try to go to the provider.
:param ticker_str: str
:param database: str
:return: pandas.DataFame
"""
db_manager: DatabaseManager = Databases[database]
meta = db_manager.Find(ticker_str)
if meta.Exists:
try:
meta = db_manager.GetMeta(meta.ticker_full)
except NotImplementedError:
pass
df = meta.to_DF()
return df
def init_package():
"""
Call to initialise the package, other than configuration file (and logging set up).
:return:
"""
global PlatformConfiguration
if not PlatformConfiguration.LoadedAny:
# May switch over to "silent" loading, but not knowing which config files were loaded can
# cause a lot of errors...
PlatformConfiguration = econ_platform_core.configuration.load_platform_configuration(display_steps=True)
# By default, go into the "logs" directory below this file.
if len(LogInfo.LogDirectory) == 0:
# If it has not been set manually, use the config information.
LogInfo.LogDirectory = utils.parse_config_path(PlatformConfiguration['Logging']['LogDirectory'])
Databases.Initialise()
Providers.Initialise()
UpdateProtocolList.Initialise()
global ExtensionList
ExtensionList.LoadedExtensions, ExtensionList.FailedExtensions, ExtensionList.DecoratedFailedExtensions = \
econ_platform_core.extensions.load_extensions()
def get_platform_information(return_instead_of_print=False):
"""
If return_instead_of_print is True, returns a DataFrame with information,
otherwise just prints to console (expected usage).
Format will change, so this is just for users who want to refresh their memory of provider codes, see
what extensions exist, etc.
:return: pandas.DataFrame
"""
out = pandas.DataFrame(columns=['Type', 'Name', 'Info'])
# Create a little utility to append rows to a DataFrame
def appender(df, row):
return df.append(pandas.DataFrame([row], columns=df.columns))
# First: extensions
for ext in ExtensionList.LoadedExtensions:
out = appender(out, ['Extension', ext, 'Loaded'])
out.sort_values('Name')
failed = | pandas.DataFrame(columns=['Type', 'Name', 'Info']) | pandas.DataFrame |
import os
import uuid
from datetime import datetime
from time import sleep
import fsspec
import pandas as pd
import pytest
import v3iofs
from storey import EmitEveryEvent
import mlrun
import mlrun.feature_store as fs
from mlrun import store_manager
from mlrun.datastore.sources import CSVSource, ParquetSource
from mlrun.datastore.targets import CSVTarget, NoSqlTarget, ParquetTarget
from mlrun.features import Entity
from tests.system.base import TestMLRunSystem
@TestMLRunSystem.skip_test_if_env_not_configured
# Marked as enterprise because of v3io mount and remote spark
@pytest.mark.enterprise
class TestFeatureStoreSparkEngine(TestMLRunSystem):
project_name = "fs-system-spark-engine"
spark_service = ""
pq_source = "testdata.parquet"
csv_source = "testdata.csv"
spark_image_deployed = (
False # Set to True if you want to avoid the image building phase
)
test_branch = "" # For testing specific branch. e.g.: "https://github.com/mlrun/mlrun.git@development"
@classmethod
def _init_env_from_file(cls):
env = cls._get_env_from_file()
cls.spark_service = env["MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"]
def get_local_pq_source_path(self):
return os.path.relpath(str(self.assets_path / self.pq_source))
def get_remote_pq_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.pq_source
return path
def get_local_csv_source_path(self):
return os.path.relpath(str(self.assets_path / self.csv_source))
def get_remote_csv_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.csv_source
return path
def custom_setup(self):
from mlrun import get_run_db
from mlrun.run import new_function
from mlrun.runtimes import RemoteSparkRuntime
self._init_env_from_file()
if not self.spark_image_deployed:
store, _ = store_manager.get_or_create_store(
self.get_remote_pq_source_path()
)
store.upload(
self.get_remote_pq_source_path(without_prefix=True),
self.get_local_pq_source_path(),
)
store, _ = store_manager.get_or_create_store(
self.get_remote_csv_source_path()
)
store.upload(
self.get_remote_csv_source_path(without_prefix=True),
self.get_local_csv_source_path(),
)
if not self.test_branch:
RemoteSparkRuntime.deploy_default_image()
else:
sj = new_function(
kind="remote-spark", name="remote-spark-default-image-deploy-temp"
)
sj.spec.build.image = RemoteSparkRuntime.default_image
sj.with_spark_service(spark_service="dummy-spark")
sj.spec.build.commands = ["pip install git+" + self.test_branch]
sj.deploy(with_mlrun=False)
get_run_db().delete_function(name=sj.metadata.name)
self.spark_image_deployed = True
def test_basic_remote_spark_ingest(self):
key = "patient_id"
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
assert measurements.status.targets[0].run_id is not None
def test_basic_remote_spark_ingest_csv(self):
key = "patient_id"
name = "measurements"
measurements = fs.FeatureSet(
name,
entities=[fs.Entity(key)],
engine="spark",
)
source = CSVSource(
"mycsv", path=self.get_remote_csv_source_path(), time_field="timestamp"
)
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
features = [f"{name}.*"]
vec = fs.FeatureVector("test-vec", features)
resp = fs.get_offline_features(vec)
df = resp.to_dataframe()
assert type(df["timestamp"][0]).__name__ == "Timestamp"
def test_error_flow(self):
df = pd.DataFrame(
{
"name": ["Jean", "Jacques", "Pierre"],
"last_name": ["Dubois", "Dupont", "Lavigne"],
}
)
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity("name")],
engine="spark",
)
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
fs.ingest(
measurements,
df,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
def test_ingest_to_csv(self):
key = "patient_id"
csv_path_spark = "v3io:///bigdata/test_ingest_to_csv_spark"
csv_path_storey = "v3io:///bigdata/test_ingest_to_csv_storey.csv"
measurements = fs.FeatureSet(
"measurements_spark",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_spark)]
fs.ingest(
measurements,
source,
targets,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
csv_path_spark = measurements.get_target_path(name="csv")
measurements = fs.FeatureSet(
"measurements_storey",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_storey)]
fs.ingest(
measurements,
source,
targets,
)
csv_path_storey = measurements.get_target_path(name="csv")
read_back_df_spark = None
file_system = fsspec.filesystem("v3io")
for file_entry in file_system.ls(csv_path_spark):
filepath = file_entry["name"]
if not filepath.endswith("/_SUCCESS"):
read_back_df_spark = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_spark is not None
read_back_df_storey = None
for file_entry in file_system.ls(csv_path_storey):
filepath = file_entry["name"]
read_back_df_storey = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_storey is not None
assert read_back_df_spark.sort_index(axis=1).equals(
read_back_df_storey.sort_index(axis=1)
)
@pytest.mark.parametrize("partitioned", [True, False])
def test_schedule_on_filtered_by_time(self, partitioned):
name = f"sched-time-{str(partitioned)}"
now = datetime.now()
path = "v3io:///bigdata/bla.parquet"
fsys = fsspec.filesystem(v3iofs.fs.V3ioFS.protocol)
pd.DataFrame(
{
"time": [
pd.Timestamp("2021-01-10 10:00:00"),
| pd.Timestamp("2021-01-10 11:00:00") | pandas.Timestamp |
"""Test functions in owid.datautils.dataframes module.
"""
import numpy as np
import pandas as pd
from pytest import warns
from typing import Any, Dict
from owid.datautils import dataframes
class TestCompareDataFrames:
def test_with_large_absolute_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2= | pd.DataFrame({"col_01": [2, 3]}) | pandas.DataFrame |
import pandas as pd
import plotly.graph_objs as go
####### STUDIES TRACES ######
def tech_indicator_plot(df, study, fig):
return eval(study)(df, fig)
def tech_indicator_subplot(df, study):
return eval(study)(df)
# Moving average
def moving_average_trace(df, fig):
MA = df["Close"].rolling(window=5).mean()
trace = go.Scatter(
x=df["Date"], y=MA, mode="lines", showlegend=False, name="MA",
line=dict(width=1)
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Exponential moving average
def e_moving_average_trace(df, fig):
EMA = df["Close"].rolling(window=20).mean()
trace = go.Scatter(
x=df["Date"], y=EMA, mode="lines", showlegend=False, name="EMA",
line=dict(width=1)
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Bollinger Bands
def bollinger_trace(df, fig, window_size=10, num_of_std=5):
price = df["Close"]
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
Lower_band = rolling_mean - (rolling_std * num_of_std)
trace = go.Scatter(
x=df["Date"], y=upper_band, mode="lines", showlegend=False, name="BB_upper",
line=dict(width=1)
)
trace2 = go.Scatter(
x=df["Date"], y=rolling_mean, mode="lines", showlegend=False, name="BB_mean",
line=dict(width=1)
)
trace3 = go.Scatter(
x=df["Date"], y=Lower_band, mode="lines", showlegend=False, name="BB_Lower",
line=dict(width=1)
)
fig.append_trace(trace, 1, 1) # plot in first row
fig.append_trace(trace2, 1, 1) # plot in first row
fig.append_trace(trace3, 1, 1) # plot in first row
return fig
# Accumulation Distribution
def accumulation_trace(df):
df["Volume"] = ((df["Close"] - df["Low"]) - (df["High"] - df["Close"])) / (
df["High"] - df["Low"]
)
trace = go.Scatter(
x=df["Date"], y=df["Volume"], mode="lines", showlegend=False, name="Accumulation",
line=dict(width=1)
)
return trace
# Commodity Channel Index
def cci_trace(df, ndays=5):
TP = (df["High"] + df["Low"] + df["Close"]) / 3
CCI = pd.Series(
(TP - TP.rolling(window=10, center=False).mean())
/ (0.015 * TP.rolling(window=10, center=False).std()),
name="cci",
)
trace = go.Scatter(
x=df["Date"], y=CCI, mode="lines", showlegend=False, name="CCI",
line=dict(width=1)
)
return trace
# Price Rate of Change
def roc_trace(df, ndays=5):
N = df["Close"].diff(ndays)
D = df["Close"].shift(ndays)
ROC = pd.Series(N / D, name="roc")
trace = go.Scatter(
x=df["Date"], y=ROC, mode="lines", showlegend=False, name="ROC",
line=dict(width=1)
)
return trace
# Stochastic oscillator %K
def stoc_trace(df):
SOk = pd.Series((df["Close"] - df["Low"]) / (df["High"] - df["Low"]), name="SO%k")
trace = go.Scatter(
x=df["Date"], y=SOk, mode="lines", showlegend=False, name="SO%k",
line=dict(width=1)
)
return trace
# Momentum
def mom_trace(df, n=5):
M = pd.Series(df["Close"].diff(n), name="Momentum_" + str(n))
trace = go.Scatter(
x=df["Date"], y=M, mode="lines", showlegend=False, name="MOM",
line=dict(width=1)
)
return trace
# Pivot points
def pp_trace(df, fig):
PP = pd.Series((df["High"] + df["Low"] + df["Close"]) / 3)
R1 = pd.Series(2 * PP - df["Low"])
S1 = pd.Series(2 * PP - df["High"])
R2 = pd.Series(PP + df["High"] - df["Low"])
S2 = pd.Series(PP - df["High"] + df["Low"])
R3 = pd.Series(df["High"] + 2 * (PP - df["Low"]))
S3 = | pd.Series(df["Low"] - 2 * (df["High"] - PP)) | pandas.Series |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
## 问题
### 我们没有数据
### 我们不知道哪些特征会影响人找工作,权重如何
### 人最终面试、上岗受到很多因素影响。如【有其他更优质工作机会、突发事件】等。这些不应该作为【匹配失败】的数据。那么什么样的数据应该作为失败数据呢?上岗成功的一定算是匹配成功,但不匹配的数据仍需要由逻辑得出
## 解题思路
### 特征选取
### 根据现实情况选择部分显著特征,每种特征根据情况折算得分,得分高于 80 的认为最终可以上岗成功(这里把特征总结成枚举项,便于随机生成)
### 特征之间可能相互影响,如家庭背景影响薪资权重,婚姻状况影响上班耗时权重(暂不考虑)
### 后续补充权重计算【可能应该按折算成薪资后的数额进行】
### 数据来源
### 没有足够外部数据,就由逻辑生成随机数据源
### 数据源足够大时,可让 AI 掌握由人类总结的逻辑
### 此时输出结果应与现有逻辑匹配保持一致
### 后续业务数据集成
### 最终上岗的业务数据可以直接与生成的随机数据源混合,训练后查看结果,如果仍获得较高拟合度,则认为我们总结的特征及权重符合实际。如果发现拟合度较低,分析原因
### 后续持续更换特征,迭代找出更合适的模型
### 着重分析最终上岗数据中的较低分值的数据,发掘其中隐藏的特征
# %%
# 按照以下逻辑生成数据作为基础数据,表达逻辑匹配推荐算法
# 基准逻辑算法
dict_raws_base = {
# 性别
# 0 岗位有硬性要求 人员不符合
# 1 岗位有期望要求 人员不符合
# 2 岗位有期望要求 人员符合
# 3 岗位无要求
# 4 岗位有硬性要求 人员符合
'gender': {
'high': 5,
'score': [-10000, 0, 100, 100, 120],
},
# 时间匹配度(PS:时间匹配度的概念是由人的时间与岗位需要之间时间矩阵的交集,情况复杂,需要独立算法单独计算,此处不展开)
# 1 / 7 是 14.25%
# 0 <15%
# 1 >=15% <60%
# 2 >=60% <90%
# 3 >90%
'time_match_rate': {
'high': 4,
'score': [-10000, 0, 60, 100],
},
# 职位类别
# 0 非常不满意
# 1 不满意
# 2 满意
'job_categories': {
'high': 3,
'score': [0, 60, 100],
},
# 缴金
# 0 公司不缴纳+用户在乎
# 1 公司缴纳+用户不在乎
# 2 公司缴纳+用户在乎
# 3 公司不缴纳+用户不在乎
'insurance': {
'high': 4,
'score': [0, 60, 100, 100],
},
# 薪资+福利(包吃住等)
# 0 薪资低于预期+无福利
# 1 薪资低于预期+有福利
# 2 薪资达到预期+无福利
# 3 薪资达到预期+有福利
'benefits': {
'high': 4,
'score': [0, 20, 80, 100],
},
# 上班耗时
# 0 超过2小时
# 1 超过1小时
# 2 40分钟以内
# 3 20分钟以内
'go_work_time': {
'high': 4,
'score': [0, 20, 80, 100],
},
}
# 假定特征相同,只是具体规则有出入
columns = dict_raws_base.keys()
# 得分模型
def get_score(d, score_raws):
count = 0
for key in columns:
score = score_raws[key]['score'][int(d[key])]
# 暂时不设置权重,认为各特征权重相等 weight = score_raws[key]['weight']
count += (score / len(columns))
return count
def get_score_rate(d, score_key):
# 得分 0 总分 < 60 不考虑
# 得分 1 总分 60 ~ 80 可以考虑
# 得分 2 总分 > 80 有较强上岗可能性
rate = 0
count = d[score_key]
if count < 60:
rate = 0
if count >= 60 and count < 80:
rate = 1
if count >= 80:
rate = 2
return rate
# %%
import numpy as np
import pandas as pd
def get_df(dict_raws, size):
df_raws = {}
for key in dict_raws:
df_raws[key] = np.random.randint(dict_raws[key]['high'], size=(size))
df= | pd.DataFrame(df_raws, columns=columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
from joblib import dump, load
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.utils import compute_class_weight
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, roc_curve, mean_squared_error
import statsmodels.api as sm
import statsmodels.formula.api as smf
LENGTH_OF_PAM = 3
rev = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'a':'t', 't':'a', 'c':'g', 'g':'c'}
nucleotides = ['A', 'T', 'C', 'G']
dinucleotides = [i + j for i in nucleotides for j in nucleotides]
allnucleotides = nucleotides + dinucleotides
input_file = 'data/Gaetan.featureTable.xlsx'
model_file = 'cune_classifier.joblib'
RANDOM_STATE = 3767
N_TREES = 1000
RANDOM_STATE = 353563
FOLD_COUNT = 5
ARM_LENGTH = 65
def read_data(file_name):
df = pd.read_excel(file_name, index_col='Id')
df.WU_CRISPR.fillna(0, inplace=True)
df = df[pd.notnull(df['ss_oligo'])]
df.shape
return df
def reverse_complement(sequence):
return ''.join([rev[n] for n in sequence][::-1])
def gc_content(seq):
d = len([s for s in seq if s in 'CcGc']) / len(seq) * 100
return round(d, 2)
def get_oligo_components(guide, oligo, distance_from_pam, mutation_size, truncation=0):
pam_mutation_index = range(20 - distance_from_pam - mutation_size, 20 - distance_from_pam)
guide_regex = ''.join([nucleotide if i not in pam_mutation_index else '[ATCG]' for i, nucleotide in enumerate(guide)])
match = re.search(guide_regex, oligo, flags=re.IGNORECASE)
if match:
dist = match.span()[1] - distance_from_pam - LENGTH_OF_PAM
else:
match = re.search(guide_regex, reverse_complement(oligo), flags=re.IGNORECASE)
dist = len(oligo) - match.span()[1] + distance_from_pam + LENGTH_OF_PAM + 1
mutation = oligo[dist-1: dist].upper()
arms = [oligo[: dist-1].upper(), oligo[dist:].upper()]
t_fix = [truncation] * 2 if truncation else [len(arm) for arm in arms]
return pd.Series({
'oligo_arm1': arms[0][-t_fix[0]:-20],
'oligo_arm2': arms[1][20: t_fix[1]],
'oligo_arm1_length': len(arms[0]),
'oligo_arm2_length': len(arms[1]),
'oligo_trimmed': arms[0][-t_fix[0]:] + mutation + arms[1][: t_fix[1]]
})
def merge_duplicates(df, merge_key):
tmp_df = df.reset_index().groupby(merge_key, group_keys=False).agg(
{'Id': 'first',
'point_mutations':sum,
'indels': sum,
'trials': sum}).set_index('Id')
cols_to_use = df.columns.difference(tmp_df.columns)
merged_df = pd.concat([df[cols_to_use], tmp_df], axis=1, join='inner')
return merged_df
def remove_unwanted(df, vaa):
#df3 = df[ ~(df.point_mutations / df.trials).between(range_from, range_to, True)]
return df[ (df.indels / df.trials) > vaa]
def remove_dodgy(df):
#df = df[df.trials > 1]
return df[df.dodgy == 0]
def process_labels(df):
labels_df = df.loc[:, ['indels', 'point_mutations', 'trials', 'New']]
labels_df.loc[:, 'cleavage_sample_ratio'] = (labels_df.indels / labels_df.trials)
for i in [0.4, 0.5, 0.6]:
labels_df.loc[:, 'cleavage_sample_{}'.format(i)] = labels_df.cleavage_sample_ratio > i
labels_df.loc[:, 'hdr_sample_ratio'] = (labels_df.point_mutations / labels_df.trials)
for i in [0.1, 0.18, 0.2, 0.3, 0.4, 0.5, 0.6]:
labels_df.loc[:, 'hdr_sample_{}'.format(i)] = labels_df.hdr_sample_ratio > i
labels_df.loc[:, 'hdr_cleavage_ratio'] = (labels_df.point_mutations / labels_df.indels)
for i in [0.1, 0.18, 0.2, 0.3, 0.4, 0.5, 0.6]:
labels_df.loc[:, 'hdr_cleavage_{}'.format(i)] = labels_df.hdr_cleavage_ratio > i
#labels_df.loc[:, 'hdr'] = (labels_df.point_mutations / labels_df.trials) > ((range_to + range_from) / 2)
#labels_df.loc[:, 'hdr'] = (labels_df.point_mutations > 0)
labels_df=labels_df.reset_index().set_index(['New','Id']).sort_index()
return labels_df
def tokenize_sequence_global(seq, name):
data = {n: int(sum(1 for i in range(len(seq)) if seq.startswith(n, i))/(len(seq)-len(n)+1)*100) for n in allnucleotides}
s = pd.Series(data)
s = s.reindex(allnucleotides, axis=1)
s.index = ['{}_{}'.format(name, x) for x in s.index]
return s
def tokenize_sequence_local(seq, name):
nmers = [1, 2]
data = {'{}_{}_{:02d}'.format(name, n, i + 1): seq[i: i + n] for n in nmers for i in range(0, len(seq) - n + 1)}
return pd.Series(data)
def tokenize_sequence_local_onehot(seq, name):
tokens = nucleotides + dinucleotides
data = {f'loc{len(n)}_{i}_{n}': 1 if seq.startswith(n, i) else 0 for i in range(len(seq)) for n in tokens}
s = | pd.Series(data) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from math import ceil, floor
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
train_data = pd.read_csv('train.csv')
train_labels = train_data['ACTION']
del train_data['ACTION']
train_data.shape
# role_title and role_code give the same information
# check this
len(set(train_data['ROLE_CODE'].values))
len(set(train_data['ROLE_TITLE'].values))
# check that there is a unique title for each code
for code in set(train_data['ROLE_CODE']):
code_ind = np.equal(train_data['ROLE_CODE'], code)
titles = set(train_data[code_ind]['ROLE_TITLE'])
if len(titles) > 1:
print(f'Mismatch for code {code}')
break
print(f'Code {code} = title {titles}')
del train_data['ROLE_CODE']
train_data.shape
# if there are less than 20 members in a factor level, change the level to a value of zero
for col in range(0, train_data.shape[1]):
levels = train_data.iloc[:, col].value_counts().index.values
vals_to_change = [level for level in levels if train_data.iloc[:, col].value_counts()[level] <= 20]
n_rows = (train_data.iloc[:, col]).shape[0]
for row in range(n_rows):
print(f'Column {col}, row {row} of {n_rows}')
if train_data.iat[row, col] in vals_to_change:
train_data.iat[row, col] = 0
# encode features as categorical variables
train_categorical = pd.get_dummies(train_data, columns=train_data.columns)
train_categorical.shape
'''
# comress the data with factor analysis
from sklearn.decomposition import FactorAnalysis
compressor = FactorAnalysis(n_components=200)
train_compressed = compressor.fit_transform(train_categorical, y_train)
'''
# check class distribution
train_labels.value_counts() # data is unbalanced – don't optimize accuracy directly
# test/train split
X_train, X_test, y_train, y_test = train_test_split(train_categorical, train_labels.values, test_size=0.3)
# X_train, X_test, y_train, y_test = train_test_split(train_data, train_labels, test_size=0.3)
# duplicate data points from the minority class
# y_train.value_counts()
n_maj, n_min = list(pd.DataFrame(y_train).value_counts())
# number of times to replicate each minority class memeber
n_repeat = floor(n_maj / n_min)
# minority class indices (indices with 0)
min_ind = np.equal(y_train, 0)
X_train_aug = X_train
for _ in range(n_repeat):
X_train_aug = np.concatenate((X_train_aug, X_train[min_ind]), axis=0)
X_train_aug.shape
y_train_aug = y_train
for _ in range(n_repeat):
y_train_aug = np.concatenate((y_train_aug, y_train[min_ind]), axis=0)
# shuffle the indices
perm = np.random.permutation(range(X_train_aug.shape[0]))
X_train_aug = X_train_aug[perm, :]
y_train_aug = y_train_aug[perm]
#####################################
# fit a random forest classifier
#####################################
rf = RandomForestClassifier(verbose=1, n_jobs=4, n_estimators=300)
rf.fit(X_train_aug, y_train_aug)
# make predictions using fitted model
predictions = rf.predict(X_test)
# class balance of predictions
pd.DataFrame(predictions).value_counts()
# calculate the cost-adjusted accuracy
print(metrics.classification_report(y_test, predictions))
print(metrics.confusion_matrix(y_test, predictions))
print(metrics.roc_auc_score(y_test, predictions))
#####################################
# build a neural network to fit
#####################################
import tensorflow as tf
# convert pd dataframes to np arrays
X_train_aug = X_train_aug
y_train_aug = y_train_aug.reshape((y_train_aug.shape[0], 1))
'''
X_train_nn = X_train
y_train_nn = y_train.reshape((y_train.shape[0], 1))
'''
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(100, input_shape=(X_train_aug.shape[1],), activation='relu'),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(1000, activation='relu'),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(1, activation='sigmoid')
])
'''
# logistic regression
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(1, input_shape=(X_train_aug.shape[1], ), activation='sigmoid'),
])
'''
model.summary()
model.compile(optimizer='Adam', loss='binary_crossentropy', weighted_metrics=['acc'])
history = model.fit(X_train_aug, y_train_aug, batch_size=256, epochs=100, verbose=1, validation_split=0.2,
class_weight={1: n_repeat / (n_repeat + 1), 0: 1 / n_repeat})
# make predictions
predictions_nn = [1 if y > 0.5 else 0 for y in model.predict(X_test)]
# check class balance
| pd.DataFrame(predictions_nn) | pandas.DataFrame |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import transition
from ...utils import testing as ust
@pytest.fixture
def basic_df():
return pd.DataFrame(
{'x': range(5),
'y': range(5, 10)},
index=range(100, 105))
@pytest.fixture
def year():
return 2112
@pytest.fixture
def totals_col():
return 'total'
@pytest.fixture
def rates_col():
return 'growth_rate'
@pytest.fixture
def grow_targets(year, totals_col):
return pd.DataFrame({totals_col: [7]}, index=[year])
@pytest.fixture
def grow_targets_filters(year, totals_col):
return pd.DataFrame({'x_min': [0, 2, np.nan],
'y_max': [7, 9, np.nan],
'x': [np.nan, np.nan, 4],
totals_col: [1, 4, 10]},
index=[year, year, year])
@pytest.fixture(scope='function')
def random_df(request):
"""
Seed the numpy prng and return a data frame w/ predictable test inputs
so that the tests will have consistent results across builds.
"""
old_state = np.random.get_state()
def fin():
# tear down: reset the prng after the test to the pre-test state
np.random.set_state(old_state)
request.addfinalizer(fin)
np.random.seed(1)
return pd.DataFrame(
{'some_count': np.random.randint(1, 8, 20)},
index=range(0, 20))
@pytest.fixture
def growth_rates(rates_col, totals_col, grow_targets):
del grow_targets[totals_col]
grow_targets[rates_col] = [0.4]
return grow_targets
@pytest.fixture
def growth_rates_filters(rates_col, totals_col, grow_targets_filters):
del grow_targets_filters[totals_col]
grow_targets_filters[rates_col] = [0.5, -0.5, 0]
return grow_targets_filters
def assert_empty_index(index):
pdt.assert_index_equal(index, pd.Index([]))
def assert_for_add(new, added):
assert len(new) == 7
pdt.assert_index_equal(added, pd.Index([105, 106]))
def assert_for_remove(new, added):
assert len(new) == 3
assert_empty_index(added)
def test_add_rows(basic_df):
nrows = 2
new, added, copied = transition.add_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_starting_index(basic_df):
nrows = 2
starting_index = 1000
new, added, copied = transition.add_rows(basic_df, nrows, starting_index)
assert len(new) == len(basic_df) + nrows
pdt.assert_index_equal(added, pd.Index([1000, 1001]))
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_zero(basic_df):
nrows = 0
new, added, copied = transition.add_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
def test_add_rows_with_accounting(random_df):
control = 10
new, added, copied = transition.add_rows(
random_df, control, accounting_column='some_count')
assert control == new.loc[copied]['some_count'].sum()
assert copied.isin(random_df.index).all()
def test_remove_rows(basic_df):
nrows = 2
new, removed_indexes = transition.remove_rows(basic_df, nrows)
assert_for_remove(new, transition._empty_index())
assert len(removed_indexes) == nrows
assert removed_indexes.isin(basic_df.index).all()
def test_remove_rows_zero(basic_df):
nrows = 0
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(removed)
def test_remove_rows_all(basic_df):
nrows = len(basic_df)
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df.loc[[]])
ust.assert_index_equal(removed, basic_df.index)
def test_remove_rows_with_accounting(random_df):
control = 10
new, removed = transition.remove_rows(
random_df, control, accounting_column='some_count')
assert control == random_df.loc[removed]['some_count'].sum()
assert removed.isin(random_df.index).all()
def test_remove_rows_raises(basic_df):
# should raise ValueError if asked to remove more rows than
# are in the table
nrows = 25
with pytest.raises(ValueError):
transition.remove_rows(basic_df, nrows)
def test_add_or_remove_rows_add(basic_df):
nrows = 2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == abs(nrows)
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_add_or_remove_rows_remove(basic_df):
nrows = -2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_remove(new, added)
assert len(removed) == abs(nrows)
assert removed.isin(basic_df.index).all()
assert_empty_index(copied)
def test_add_or_remove_rows_zero(basic_df):
nrows = 0
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_grtransition_add(basic_df):
growth_rate = 0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_add(new, added)
assert len(copied) == 2
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_grtransition_add_with_accounting(random_df):
growth_rate = .1
year = 2012
orig_total = random_df['some_count'].sum()
growth = int(round(orig_total * growth_rate))
target = orig_total + growth
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert growth == new.loc[copied]['some_count'].sum()
assert target == new['some_count'].sum()
assert copied.isin(random_df.index).all()
assert_empty_index(removed)
def test_grtransition_remove(basic_df):
growth_rate = -0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_remove(new, added)
assert_empty_index(copied)
assert len(removed) == 2
assert removed.isin(basic_df.index).all()
def test_grtransition_remove_with_accounting(random_df):
growth_rate = -.1
year = 2012
orig_total = random_df['some_count'].sum()
change = -1 * int(round(orig_total * growth_rate))
target = orig_total - change
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert change == random_df.loc[removed]['some_count'].sum()
assert target == new['some_count'].sum()
assert removed.isin(random_df.index).all()
assert_empty_index(added)
assert_empty_index(copied)
def test_grtransition_remove_all(basic_df):
growth_rate = -1
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_grtransition_zero(basic_df):
growth_rate = 0
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_tgrtransition_add(basic_df, growth_rates, year, rates_col):
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 7
bdf_imax = basic_df.index.values.max()
assert pd.Series([bdf_imax + 1, bdf_imax + 2]).isin(new.index).all()
assert len(copied) == 2
assert_empty_index(removed)
def test_tgrtransition_remove(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = -0.4
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 3
assert_empty_index(added)
assert_empty_index(copied)
assert len(removed) == 2
def test_tgrtransition_with_accounting(random_df):
"""
Test segmented growth rate transitions--with an accounting
column--using 1 test w/ mixed growth rates trends:
declining, growing and no growth.
"""
grp1 = random_df.copy()
grp1['segment'] = 'a'
grp2 = random_df.copy()
grp2['segment'] = 'b'
grp3 = random_df.copy()
grp3['segment'] = 'c'
test_df = pd.concat([grp1, grp2, grp3], axis=0, ignore_index=True)
orig_total = random_df['some_count'].sum()
year = 2012
growth_rates = pd.DataFrame(
{
'grow_rate': [-0.1, 0.25, 0],
'segment': ['a', 'b', 'c']
},
index=[year, year, year])
tgrt = transition.TabularGrowthRateTransition(
growth_rates, 'grow_rate', 'some_count')
new, added, copied, removed = tgrt.transition(test_df, year)
added_rows = new.loc[copied]
removed_rows = test_df.loc[removed]
# test a declining segment
a_added_rows = added_rows[added_rows['segment'] == 'a']
a_removed_rows = removed_rows[removed_rows['segment'] == 'a']
a_change = int(round(orig_total * -0.1))
a_target = orig_total + a_change
assert a_change * -1 == a_removed_rows['some_count'].sum()
assert a_target == new[new['segment'] == 'a']['some_count'].sum()
assert_empty_index(a_added_rows.index)
# test a growing segment
b_added_rows = added_rows[added_rows['segment'] == 'b']
b_removed_rows = removed_rows[removed_rows['segment'] == 'b']
b_change = int(round(orig_total * 0.25))
b_target = orig_total + b_change
assert b_change == b_added_rows['some_count'].sum()
assert b_target == new[new['segment'] == 'b']['some_count'].sum()
assert_empty_index(b_removed_rows.index)
# test a no change segment
c_added_rows = added_rows[added_rows['segment'] == 'c']
c_removed_rows = removed_rows[removed_rows['segment'] == 'c']
assert orig_total == new[new['segment'] == 'c']['some_count'].sum()
assert_empty_index(c_added_rows.index)
assert_empty_index(c_removed_rows.index)
def test_tgrtransition_remove_all(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = -1
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_tgrtransition_zero(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = 0
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_tgrtransition_filters(
basic_df, growth_rates_filters, year, rates_col):
tgrt = transition.TabularGrowthRateTransition(
growth_rates_filters, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 5
assert basic_df.index.values.max() + 1 in new.index
assert len(copied) == 1
assert len(removed) == 1
def test_tabular_transition_add(basic_df, grow_targets, totals_col, year):
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
assert_for_add(new, added)
bdf_imax = basic_df.index.values.max()
assert | pd.Series([bdf_imax + 1, bdf_imax + 2]) | pandas.Series |
import pandas as pd
from tqdm import tqdm
import yaml
import time
import numpy as np
from collections import defaultdict
import pickle
tqdm.pandas()
class DataManager(object):
"""Data Manager class"""
def __init__(self, clickstream_data_path, article_vectors_data_path):
self.data = pd.read_csv(clickstream_data_path)
self.article_vectors = pd.read_csv(article_vectors_data_path)
self.train_data = None
self.test_data = None
def merge_article_vectors(self):
"""adds columns of new heading and content text GloVe vectors"""
def get_vector(article_id, article_vectors, vector_columns):
article_vector = article_vectors.loc[
article_vectors.loc[:, "article_id"] == article_id, vector_columns
]
return article_vector.values.reshape(1, -1)
vector_columns = [
*[f"heading_{i}" for i in range(100)],
*[f"content_{i}" for i in range(100)],
]
article_vectors_ = self.data.loc[:, "article_id"].progress_apply(
lambda article_id: get_vector(
article_id=article_id,
article_vectors=self.article_vectors,
vector_columns=vector_columns,
)
)
article_vectors_ = np.concatenate(article_vectors_)
article_vectors_ = pd.DataFrame(data=article_vectors_, columns=vector_columns)
self.data = | pd.concat(objs=[self.data, article_vectors_], axis=1) | pandas.concat |
"""
Created on June 6, 2016
@author: <NAME> (<EMAIL>)
Updated Nov 21, 2017 by <NAME> (github.com/Spenca)
"""
import csv
import os, sys, io
import re
import pandas as pd
import numpy as np
import requests
import yaml
from string import Template
from collections import OrderedDict
from datetime import date, datetime, timedelta
#===============
# Django imports
#---------------
from django.db.models import Count, Q, F
from django.http import HttpResponse
from sisyphus.models import DlpAnalysisInformation, Project
from tenx.models import TenxPool
from .models import Sample, SublibraryInformation, ChipRegion, ChipRegionMetadata, MetadataField, DoubletInformation
from dlp.models import (DlpLane, DlpSequencing, DlpLibrary)
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.core.exceptions import ValidationError
#============================
# Pipeline Status
#----------------------------
def get_sequence_date(analysis, library=None):
try:
# Is it for Library?
if library:
sequencing_set = analysis.dlpsequencing_set.all()
# Does Analysis have lanes, if then retrieve latest lane_requested_date from sequencings related to lanes
elif analysis.lanes.all():
sequencing_set = set(l.sequencing for l in analysis.lanes.all())
# Else, Does Analysis have sequencings, retrieve latest lane_requested_date from sequencings directly attached analysis
elif analysis.sequencings.all():
sequencing_set = analysis.sequencings.all()
# Else, Does Analysis's Library have sequencings
else:
sequencing_set = analysis.library.dlpsequencing_set.all()
return max([sequencing.lane_requested_date for sequencing in sequencing_set])
except:
return None
def analysis_info_dict(analysis):
lanes = analysis.lanes.count()
goal = sum(s.number_of_lanes_requested for s in analysis.sequencings.all())
submission_date = get_sequence_date(analysis)
return {
"jira": analysis.analysis_jira_ticket,
"lanes": "{}/{}".format(lanes, goal),
"version": analysis.version.version,
"run_status": analysis.analysis_run.run_status,
"aligner": "bwa-aln" if analysis.aligner is "A" else "bwa-mem",
"submission": str(submission_date) if submission_date else None,
"last_updated": str(analysis.analysis_run.last_updated.date()) if analysis.analysis_run.last_updated else None
}
def fetch_montage():
r = requests.get('https://52.235.35.201/_cat/indices', verify=False, auth=("guest", "sh<PASSWORD>!Montage")).text
return [j.replace("sc", "SC") for j in re.findall('sc-\d{4}', r)]
def analysis_to_row(analysis, basic_dict=None, incomplete=None):
if not basic_dict:
basic_dict = {"name": analysis.library.sample.sample_id, "library": analysis.library.pool_id}
return {**basic_dict, **analysis_info_dict(analysis)}
# | Validate whether a given analysis is IMPORTED or not
# | Input: Analysis
# | Ouput: Boolean
# {True if imported}
def validate_imported(analysis):
# Retrieve all lanes attached to Analysis and create a set of seqeuncings based on it
related_sequencings = set(l.sequencing for l in analysis.lanes.all())
# Check if count(lanes attached to analysis) is smaller or equal to count(lanes attached to related_sequencings)
return analysis.lanes.count() <= sum(s.dlplane_set.count() for s in related_sequencings)
# | (INCOMPLETE) Fetch Row Information related to incomplete Analyses
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_incomplete_analyses():
object_list = []
analyses = DlpAnalysisInformation.objects.exclude(
analysis_run__run_status__in=['complete', 'align_complete', 'hmmcopy_complete'])
for a in analyses.all():
object_list.append(analysis_to_row(a, incomplete=True))
return object_list
# | (PROJECTS) Fetch Row Information related to given a set of dlp libraries
# | Input: Set of Dlp Libraries
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_libraries(libraries, wetlab=None, no_analysis=None):
object_list = []
for library in libraries:
basic_dict = {"name": library.sample.sample_id, "library": library.pool_id}
# For each libraries retrieve all attached analyses
analyses = library.dlpanalysisinformation_set.all()
if analyses and not no_analysis:
for analysis in analyses:
#Hide completed analysis if wetlab
if not wetlab:
object_list.append((analysis_to_row(analysis, basic_dict)))
# If Library does not have any analysis, fill in NA information
else:
# if Wetlab display Sequencing lane info instead of Analysis lane info
if wetlab or no_analysis:
sequencings = library.dlpsequencing_set.all()
if sequencings:
goal = sum(l.number_of_lanes_requested for l in sequencings)
lane = sum(l.dlplane_set.count() for l in sequencings)
basic_dict = {**basic_dict, "lanes": "{}/{}".format(lane, goal) if sequencings else None}
object_list.append({**basic_dict, "submission": str(get_sequence_date(library, True))})
return object_list
# | Fetch Row Information related to given a set of sequencings
# | Input: Set of Sequencings
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_sequencings(sequencings, wetlab=False):
object_list = []
for sequencing in sequencings:
object_list += fetch_rows_from_libraries([sequencing.library], wetlab=wetlab)
return object_list
# | (NO ANALYSIS) Fetch Row Information related to libraries with no analyses but correct lane numbers
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_no_analysis_libraries():
libraries = DlpLibrary.objects\
.annotate(lane_count=Count('dlpsequencing__dlplane'),lane_goal=Count('dlpsequencing__number_of_lanes_requested'))\
.filter(Q(dlpanalysisinformation=None)&Q(lane_count=F('lane_goal'))).all()
return fetch_rows_from_libraries(libraries, no_analysis=True)
# | (WETLAB) Fetch Row Information from sequencings with certain conditions:
# | 1. (OR) Mismatching lane count
# | 2. (AND) Lane requested within 2 months
# | 3. Additionally, hide completed analyses
# | 4. Recently COMPLETED
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_for_wetlab():
threshold = datetime.now() - timedelta(days=60)
# Unimported
sequencings = DlpSequencing.objects\
.annotate(lane_count=Count('dlplane'))\
.filter((Q(lane_count=0)|Q(lane_count__lt=F('number_of_lanes_requested')))&Q(lane_requested_date__gte=threshold))
# Recently Finished or Updated
threshold = datetime.now() - timedelta(days=14)
analyses = DlpAnalysisInformation.objects\
.filter(Q(analysis_run__run_status__in=['complete','align_complete','hmmcopy_complete'])&Q(analysis_run__last_updated__gte=threshold))
analyses_list = [{
**{
"name": a.library.sample.sample_id,
"library": a.library.pool_id
},
**analysis_info_dict(a)
} for a in analyses]
return fetch_rows_from_sequencings(sequencings, wetlab=True) + analyses_list
# | List of Status Page Row Objects
# |
# | WETLAB:
# | Populate row from all sequencings with lane !== goal && recently submitted (2 months)
# |
# | NO ANALYSIS:
# | Populate row from all libraries with sum(sequencing's requested_lane_number) == sum(sequencing's lane count),
# | but no Analysis attached.
# |
# | INCOMPLETE:
# | Populate row from all analyses with run_status not set as either one of ['complete','align_complete','hmmcopy_complete']
# |
# | PROJECTS:
# | Populate rows from set of DlpLibraries of selected Project
def fetch_row_objects(type, key=None):
type = type.strip()
if type == "PROJECTS":
return fetch_rows_from_libraries(Project.objects.get(name=key).dlplibrary_set.all())
elif type == "INCOMPLETE":
return fetch_rows_from_incomplete_analyses()
elif type == "NO ANALYSIS":
return fetch_rows_from_no_analysis_libraries()
elif type == "WETLAB":
return fetch_rows_for_wetlab()
else:
return
#==================================================
# Upload, parse and populate Sublibrary Information
#--------------------------------------------------
def read_excel_sheets(filename, sheetnames):
""" Read the excel sheet.
"""
try:
data = pd.read_excel(filename, sheet_name=None)
except IOError:
raise ValueError('unable to find file', filename)
for sheetname in sheetnames:
if sheetname not in data:
raise ValueError('unable to read sheet(s)', sheetname)
yield data[sheetname]
def check_smartchip_row(index, smartchip_row):
row_sum = sum(smartchip_row)
single_matrix = np.identity(3)
doublet_matrix = np.identity(3) * 2
# Row does not have cells
if smartchip_row == [0, 0, 0]:
cell = None
# TODO: Clean up code; use identity matrices
# Row is singlet
elif row_sum == 1:
for row in range(len(smartchip_row)):
if np.array_equal(smartchip_row, single_matrix[row]):
cell = [row, 0]
# Row is doublet and is strictly live/dead/other
elif row_sum == 2 and len(np.where(np.array(smartchip_row) == 0)[0]) == 2:
for row in range(len(smartchip_row)):
if np.array_equal(smartchip_row, doublet_matrix[row]):
cell = [row, 1]
# Row is doublet but mixed
elif row_sum == 2 and len(np.where(np.array(smartchip_row) == 0)[0]) != 2:
cell = [2, 1]
# Greater than doublet row and row is multiple of unit vector
elif row_sum > 2 and row_sum in smartchip_row:
non_zero_index = np.where(smartchip_row != 0)
cell = [non_zero_index[0][0], 2]
else:
cell = [2, 2]
return cell
def generate_doublet_info(filename):
""" Read SmartChipApp results and record doublet info
"""
col_names = ["live", "dead", "other"]
row_names = ["single", "doublet", "more_than_doublet"]
data = np.zeros((3, 3))
doublet_table = pd.DataFrame(data, columns=col_names, index=row_names, dtype=int)
results = pd.read_excel(filename, sheet_name="Summary")
results = results[results["Condition"] != "~"]
for index, row in results.iterrows():
smartchip_row = [row["Num_Live"], row["Num_Dead"], row["Num_Other"]]
override_row = [row["Rev_Live"], row["Rev_Dead"], row["Rev_Other"]]
if np.array_equal(override_row, [-1, -1, -1]):
cell = check_smartchip_row(index, smartchip_row)
else:
cell = check_smartchip_row(index, override_row)
if cell is not None:
doublet_table[col_names[cell[0]]][row_names[cell[1]]] += 1
return doublet_table
def parse_smartchipapp_results_file(filename):
""" Parse the result file of SmartChipApp.
"""
results, region_metadata = read_excel_sheets(filename, ['Summary', 'Region_Meta_Data'])
# filter out the cells whose Spot_Well value is not NaN
results = results[~results['Spot_Well'].isnull()]
results = results.sort_values(by='Sample')
# change the column names to match the filed names of the model
results.columns = [c.lower() for c in results.columns]
region_metadata.columns = [c.lower() for c in region_metadata.columns]
# Lower case metadata field names and check if column exists in metadata fields
# region_metadata.columns = [c.lower() for c in region_metadata.columns]
for c in region_metadata.columns:
if c not in MetadataField.objects.all().values_list('field', flat=True) and c != "region":
raise ValueError('invalid metadata column: {col_name}'.format(col_name=c))
region_metadata.columns.name = 'metadata_field'
region_metadata.rename(columns={'region': 'region_code'}, inplace=True)
region_metadata = region_metadata.set_index('region_code').stack().rename('metadata_value').reset_index()
return results, region_metadata
def create_sublibrary_models(library, sublib_results, region_metadata):
""" Create sublibrary models from SmartChipApp Tables
"""
# Populate the ChipRegion and ChipRegionMetadata from the SmartChipApp results
chip_spot_region_id = {}
chip_spot_sample_id = {}
for code, metadata in region_metadata.groupby('region_code'):
chip_region = ChipRegion(region_code=code)
chip_region.library_id = library.pk
chip_region.save()
sample_id = None
for idx, row in metadata.iterrows():
row['metadata_field'] = row['metadata_field'].lower()
chip_region_metadata = ChipRegionMetadata(
metadata_field=MetadataField.objects.get(field=row['metadata_field']),
metadata_value=row['metadata_value'])
chip_region_metadata.chip_region_id = chip_region.id
chip_region_metadata.save()
if row['metadata_field'] == 'sample_id':
sample_id = row['metadata_value']
if sample_id is None:
raise ValueError('No sample id for region {}'.format(code))
try:
#Need to encode as ascii and ignore special characters, otherwise we get sample IDs like 'SA1151\xa0' instead of 'SA1151'
sample = Sample.objects.get(sample_id=sample_id.encode('ascii', 'ignore'))
except Sample.DoesNotExist:
raise ValueError('Unrecognized sample {}'.format(sample_id))
for idx, row in sublib_results[sublib_results['condition'] == code].iterrows():
chip_spot_region_id[(row['row'], row['column'])] = chip_region.id
chip_spot_sample_id[(row['row'], row['column'])] = sample
# Populate the Sublibrary from the SmartChipApp input and results
for idx, row in sublib_results.iterrows():
row = row.drop('rev_class')
sublib = SublibraryInformation(**row.to_dict())
sublib.library_id = library.pk
try:
sublib.chip_region_id = chip_spot_region_id[(row['row'], row['column'])]
sublib.sample_id = chip_spot_sample_id[(row['row'], row['column'])]
sublib.save()
except KeyError:
raise ValueError('Undefined condition in metadata at row, column: {}, {}'.format(row['row'], row['column']))
library.num_sublibraries = len(sublib_results.index)
library.save()
def create_doublet_info_model(library, doublet_info_matrix):
try:
doublet_info = DoubletInformation.objects.get(library=library)
except:
doublet_info = DoubletInformation.objects.create(library=library)
doublet_info.save()
doublet_info.live_single = doublet_info_matrix["live"]["single"]
doublet_info.dead_single = doublet_info_matrix["dead"]["single"]
doublet_info.other_single = doublet_info_matrix["other"]["single"]
doublet_info.live_doublet = doublet_info_matrix["live"]["doublet"]
doublet_info.dead_doublet = doublet_info_matrix["dead"]["doublet"]
doublet_info.other_doublet = doublet_info_matrix["other"]["doublet"]
doublet_info.live_gt_doublet = doublet_info_matrix["live"]["more_than_doublet"]
doublet_info.dead_gt_doublet = doublet_info_matrix["dead"]["more_than_doublet"]
doublet_info.other_gt_doublet = doublet_info_matrix["other"]["more_than_doublet"]
doublet_info.save()
#=================
# History manager
#-----------------
class HistoryManager(object):
"""
An api for simple_history app.
"""
@staticmethod
def print_history(object, history_type=None):
print('=' * 100)
print("Object\tID\tDate\tAction\tUser")
print('=' * 100)
if history_type is None:
histories = object.history.all()
else:
histories = object.history.filter(history_type=history_type)
for h in histories:
print("\t".join([
str(h.instance),
str(h.instance.id),
str(h.history_date),
h.get_history_type_display(),
str(h.history_user),
]))
print('-' * 100)
def generate_tenx_pool_sample_csv(id):
buffer = io.StringIO()
pool = TenxPool.objects.get(id=id)
list_of_dict = []
for library in pool.libraries.all():
index = library.tenxlibraryconstructioninformation.index_used
list_of_dict.append({"lane": "*", "sample": library.name, "index": index.split(",")[0] if index else "None"})
wr = csv.DictWriter(buffer, fieldnames=["lane", "sample", "index"])
wr.writeheader()
wr.writerows(list_of_dict)
buffer.seek(0)
response = HttpResponse(buffer, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename={}_tenxpool_sample.csv'.format(pool.id)
return response
#======================
# Generate sample sheet
#----------------------
def generate_samplesheet(pk, wdir=None):
"""generate samplesheet for the given Sequencing."""
samplesheet = SampleSheet(pk)
sheet_name = samplesheet.sheet_name
if wdir:
ofilename = os.path.join(wdir, sheet_name)
else:
ofilename = os.path.join(settings.MEDIA_ROOT, sheet_name)
samplesheet.write_header(ofilename)
samplesheet.write_data(ofilename)
return sheet_name, os.path.abspath(ofilename)
class SampleSheet(object):
"""
Sequencing SampleSheet.
"""
def __init__(self, pk):
self._lane = get_object_or_404(DlpLane, pk=pk)
self._si = self._lane.sequencing.sequencing_instrument
self._header = os.path.join(settings.BASE_DIR, "templates/template_samplesheet_header.html")
self._colnames = [
'Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2',
'Sample_Project', 'Description'
]
self._rev_comp_i7 = False
self._rev_comp_i5 = False
# All the sequencing machines listed in the models need i7 to be reverse complemented
if self._si != "O":
self._rev_comp_i7 = True
# Only the NextSeq & HX requires the i5 to be reverse complemented
if self._si == "N550" or self._si != 'HX':
self._rev_comp_i5 = True
rev_comp_override = self._lane.sequencing.rev_comp_override
if rev_comp_override is not None:
self._rev_comp_i7 = ('rev(i7)' in rev_comp_override)
self._rev_comp_i5 = ('rev(i5)' in rev_comp_override)
@property
def sequencing(self):
return self._sequencing
@property
def sheet_name(self):
fc_id = self._lane.flow_cell_id
sheet_name = 'SampleSheet_%s.csv' % fc_id
return sheet_name
def write_header(self, ofilename):
"""write the header section of the sequencing SampleSheet."""
with open(self._header, 'r') as tempstr:
s = Template(tempstr.read())
d = {
'sequencing_instrument': self._lane.sequencing.get_sequencing_instrument_display(),
'submission_date': self._lane.sequencing.submission_date,
'pool_id': self._lane.sequencing.library.pool_id,
'read1_length': self._lane.sequencing.read1_length,
'read2_length': self._lane.sequencing.read2_length,
}
# Sequencing may have no SequencingDetail
try:
d['flow_cell_id'] = self._lane.flow_cell_id
except:
d['flow_cell_id'] = None
s = s.safe_substitute(**d)
ofile = open(ofilename, 'w')
ofile.write(s)
ofile.close()
def write_data(self, ofilename):
"""write the data section of the sequencing SampleSheet."""
data_table = self._mk_data_table()
# reorder the columns
if (len(data_table.columns) != 0):
data_table = data_table[self._colnames]
data_table.to_csv(ofilename, mode='a', index=False)
else:
ofile = open(ofilename, 'w')
ofile.write("ERROR")
ofile.write("\nNo sublibrary data, cannot generate samplesheet\n")
ofile.close()
def _mk_data_table(self):
"""make data table for data section of the samplesheet template."""
def _map_to_template(s):
d = s.to_dict()
# This is the relation between columns in the template samplesheet
# and the actual columns in df from LIMS.
# for leading 0s in samplesheet
row = str(d['row']) if d['row'] > 9 else '0' + str(d['row'])
col = str(d['column']) if d['column'] > 9 else '0' + str(d['column'])
index = d['primer_i7']
if self._rev_comp_i7:
index = _rc(index)
index2 = d['primer_i5']
if self._rev_comp_i5:
index2 = _rc(index2)
res = {
'Sample_ID':
'-'.join([
str(self._lane.sequencing.library.sample),
str(self._lane.sequencing.library.pool_id), 'R' + row, 'C' + col
]),
'Sample_Name':
'',
'Sample_Plate':
'R' + str(d['row']) + '_C' + str(d['column']),
'Sample_Well':
'R' + str(d['row']) + '_C' + str(d['img_col']),
'I7_Index_ID':
d['index_i7'],
'index':
index,
'I5_Index_ID':
d['index_i5'],
'index2':
index2,
'Description':
'CC=' + d['pick_met'] + ';' + 'EC=' + d['condition'],
}
return res
sample_project = '' #','.join(sequencing.library.projects.names())
newl = []
oldl = list(self._lane.sequencing.library.sublibraryinformation_set.values())
df = pd.DataFrame(oldl)
for d in df.apply(_map_to_template, axis=1):
d['Sample_Project'] = sample_project
newl.append(d)
return pd.DataFrame(newl)
def _rc(primer):
"reverse complement given primer string."
d = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
try:
res = ''.join([d[p] for p in primer])
except:
raise Exception("invalid index: %s" % primer)
return res[::-1]
#=============================
# Generate GSC submission form
#-----------------------------
def generate_gsc_form(pk, metadata):
"""generate the GSC submission form for the given library."""
gsc_form = GSCForm(pk, metadata["library_method"])
pool_df = gsc_form.meta_df
sample_df = gsc_form.data_df
header1 = gsc_form.meta_header
header2 = gsc_form.data_header
form_name = gsc_form.get_form_name(metadata["sow"])
buffer = io.BytesIO()
workbook = Submission(pool_df, sample_df, buffer)
workbook.set_column_width()
workbook.write_address_box(metadata)
workbook.write_pool_header(header1)
workbook.write_sample_header(header2)
workbook.close()
return form_name, buffer
class GSCForm(object):
"""
GSC sequencing submission form.
"""
def __init__(self, pk, library):
self._sequencing = get_object_or_404(DlpSequencing, pk=pk)
self._library = self._sequencing.library
self._library_method = library
self._libconst = self._library.dlplibraryconstructioninformation
self._libquant = self._library.dlplibraryquantificationandstorage
self._sample = self._library.sample
self._sample_addinfo = self._sample.additionalsampleinformation
self._meta_header = os.path.join(settings.BASE_DIR, "templates/template_gsc_meta_header.html")
self._data_header = os.path.join(settings.BASE_DIR, "templates/template_gsc_data_header.html")
self._meta_colnames = [
'POOL ID',
'Tube Label',
'Taxonomy ID',
'DNA Volume (uL)',
'DNA Concentration (nM)',
'Storage Medium',
'Quantification Method',
'Library Type',
'NON Chromium / Single Cell'
'Chromium / Single Cell',
'Size Range (bp)',
'Average Size (bp)',
'Number of libraries in pool',
'Read Type',
'Read Length',
'Sequencer',
'Format for data dissemination',
'Reference genome for alignment',
'Additional comments',
]
self._data_colnames = [
'Sub-Library ID',
'Tube Label',
'Taxonomy ID',
'Anonymous Patient ID',
'Strain',
'Disease Condition/Health Status',
'Sex',
'Sample Collection Date',
'Anatomic Site',
'Anatomic Sub-Site',
'Developmental Stage',
'Tissue Type',
'Cell Type (if sorted)',
'Cell Line ID',
'Pathology/Disease Name (for diseased sample only)',
'Additional Pathology Information',
'Grade',
'Stage',
'Tumor content (%)',
'Pathology Occurrence',
'Treatment Status',
'Family Information',
'DNA Volume (uL)',
'DNA Concentration (nM)',
'Storage Medium',
'Quantification Method',
'Library Type',
'Library Construction Method',
'Size Range (bp)',
'Average Size (bp)',
'Chromium Sample Index Name', # Replaced from "Indexed? If the libraries are indexed, provide the index sequence from 5' to 3'"
'Index Read Type (select from drop down list)',
'Index Sequence', # Renamed from 'Dual Indices for LIMS Upload',
'No. of cells/IP',
'Crosslinking Method',
'Crosslinking Time',
'Sonication Time',
'Antibody Used',
'Antibody catalogue number',
'Antibody Lot number',
'Antibody Vendor',
'Amount of Antibody Used(ug)',
'Amount of Bead Used(ul)',
'Bead Type',
'Amount of Chromatin Used(ug)',
]
self._meta_df = self._get_meta_df()
self._data_df = self._get_data_df()
@property
def sequencing(self):
return self._sequencing
@property
def meta_header(self):
return yaml.load(open(self._meta_header), Loader=YODLoader)
@property
def data_header(self):
return yaml.load(open(self._data_header), Loader=YODLoader)
@property
def meta_df(self):
return self._meta_df
@property
def data_df(self):
return self._data_df
def get_form_name(self, statement_of_work):
"create the proper name for the form."
form_name = '_'.join([
'Aparicio',
statement_of_work,
'Constructed_Library-Submission',
date.today().strftime("%d%B%Y"),
self._sample.sample_id,
self._library.pool_id,
]) + '.xlsx'
return form_name
def _get_meta_df(self):
"""return a dataframe of metadata information for self._sequencing."""
# it's ordered based on the self._meta_colnames.
values = [
'_'.join([self._library.sample.sample_id, self._library.pool_id]),
' '.join([self._library.pool_id, self._library.sample.sample_id]),
self._library.sample.taxonomy_id,
self._libquant.dna_volume,
self._libquant.dna_concentration_nm,
self._libquant.storage_medium,
self._libquant.quantification_method,
self._libconst.library_type,
" ",
self._library_method,
self._libquant.size_range,
self._libquant.average_size,
self._library.num_sublibraries,
self._sequencing.get_read_type_display(),
self._sequencing.read1_length,
self._sequencing.get_sequencing_instrument_display(),
self._sequencing.format_for_data_submission,
"N/A",
"",
]
# to avoid the "ValueError: If using all scalar values,
# you must must pass an index".
data = {k: [v] for k, v in zip(self._meta_colnames, values)}
df = pd.DataFrame(data)
# reorder the columns
df = df[self._meta_colnames]
return df
def _get_data_df(self):
"""return a dataframe of sublibrary information for the given library.
NOTE: MUST use the same key values as seen in _data_colnames. """
sample_columns = {
'Taxonomy ID': self._sample.taxonomy_id,
'Anonymous Patient ID': self._sample.anonymous_patient_id,
'Strain': self._sample.strain,
'Disease Condition/Health Status': self._sample_addinfo.disease_condition_health_status,
'Sex': self._sample_addinfo.get_sex_display(),
'Anatomic Site': self._sample_addinfo.anatomic_site,
'Anatomic Sub-Site': self._sample_addinfo.anatomic_sub_site,
'Developmental Stage': self._sample_addinfo.developmental_stage,
'Tissue Type': self._sample_addinfo.get_tissue_type_display(),
'Cell Type (if sorted)': self._sample_addinfo.cell_type,
'Cell Line ID': self._sample.cell_line_id,
'Pathology/Disease Name (for diseased sample only)': self._sample_addinfo.pathology_disease_name,
'Additional Pathology Information': None,
'Grade': self._sample_addinfo.grade,
'Stage': self._sample_addinfo.stage,
'Tumor content (%)': self._sample_addinfo.tumour_content,
'Pathology Occurrence': self._sample_addinfo.get_pathology_occurrence_display(),
'Treatment Status': self._sample_addinfo.get_treatment_status_display(),
'Family Information': self._sample_addinfo.family_information,
}
library_columns = {
'Tube Label': 'NA', #self.library.library_tube_label,
'Sample Collection Date': self._library.dlplibrarysampledetail.sample_spot_date,
'DNA Volume (uL)': "", #self._libquant.dna_volume,
'DNA Concentration (nM)': "", #self._libquant.dna_concentration_nm,
'Storage Medium': "", #self._libquant.storage_medium,
'Quantification Method': "", #self._libquant.quantification_method,
'Library Type': self._libconst.library_type,
'Library Construction Method': self._library_method,
'Size Range (bp)': self._libquant.size_range,
'Average Size (bp)': self._libquant.average_size,
'Chromium Sample Index Name': "",
}
sequencing_columns = {
'Index Read Type (select from drop down list)': self._sequencing.index_read_type,
}
other_columns = {
'No. of cells/IP': None,
'Crosslinking Method': None,
'Crosslinking Time': None,
'Sonication Time': None,
'Antibody Used': None,
'Antibody catalogue number': None,
'Antibody Lot number': None,
'Antibody Vendor': None,
'Amount of Antibody Used(ug)': None,
'Amount of Bead Used(ul)': None,
'Bead Type': None,
'Amount of Chromatin Used(ug)': None,
}
res = []
sublib_set = self._library.sublibraryinformation_set.all()
dual_index = lambda sl: _rc(sl.primer_i7) + '-' + sl.primer_i5
for sl in sublib_set:
d = {
'Sub-Library ID': sl.get_sublibrary_id(),
'Index Sequence': dual_index(sl),
}
d.update(sample_columns)
d.update(library_columns)
d.update(sequencing_columns)
d.update(other_columns)
res.append(d)
df = | pd.DataFrame(res) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from estimagic import second_derivative
from estimagic.parameters.block_trees import block_tree_to_hessian
from estimagic.parameters.block_trees import block_tree_to_matrix
from estimagic.parameters.block_trees import hessian_to_block_tree
from estimagic.parameters.block_trees import matrix_to_block_tree
from estimagic.parameters.tree_registry import get_registry
from numpy.testing import assert_array_equal
from pybaum import tree_equal
from pybaum import tree_just_flatten as tree_leaves
def test_matrix_to_block_tree_array_and_scalar():
t = {"a": 1.0, "b": np.arange(2)}
calculated = matrix_to_block_tree(np.arange(9).reshape(3, 3), t, t)
expected = {
"a": {"a": np.array(0), "b": np.array([1, 2])},
"b": {"a": np.array([3, 6]), "b": np.array([[4, 5], [7, 8]])},
}
assert _tree_equal_up_to_dtype(calculated, expected)
def test_matrix_to_block_tree_only_params_dfs():
tree = {
"a": pd.DataFrame(index=["a", "b"]).assign(value=[1, 2]),
"b": pd.DataFrame(index=["j", "k", "l"]).assign(value=[3, 4, 5]),
}
calculated = matrix_to_block_tree(np.arange(25).reshape(5, 5), tree, tree)
expected = {
"a": {
"a": pd.DataFrame([[0, 1], [5, 6]], columns=["a", "b"], index=["a", "b"]),
"b": pd.DataFrame(
[[2, 3, 4], [7, 8, 9]], columns=["j", "k", "l"], index=["a", "b"]
),
},
"b": {
"a": pd.DataFrame(
[[10, 11], [15, 16], [20, 21]],
index=["j", "k", "l"],
columns=["a", "b"],
),
"b": pd.DataFrame(
[[12, 13, 14], [17, 18, 19], [22, 23, 24]],
index=["j", "k", "l"],
columns=["j", "k", "l"],
),
},
}
assert _tree_equal_up_to_dtype(calculated, expected)
def test_matrix_to_block_tree_single_element():
tree1 = {"a": 0}
tree2 = {"b": 1, "c": 2}
block_tree = {"a": {"b": 0, "c": 1}}
matrix = np.array([[0, 1]])
calculated = matrix_to_block_tree(matrix, tree1, tree2)
assert tree_equal(block_tree, calculated)
# one params df (make sure we don't get a list back)
# dataframe and scalar
# tests against jax
def test_block_tree_to_matrix_array_and_scalar():
t1 = {"c": np.arange(3), "d": (2.0, 1)}
t2 = {"a": 1.0, "b": np.arange(2)}
expected = np.arange(15).reshape(5, 3)
block_tree = {
"c": {"a": np.array([0, 3, 6]), "b": np.array([[1, 2], [4, 5], [7, 8]])},
"d": (
{"a": np.array(9), "b": np.array([10, 11])},
{"a": np.array(12), "b": np.array([13, 14])},
),
}
calculated = block_tree_to_matrix(block_tree, t1, t2)
assert_array_equal(expected, calculated)
def test_block_tree_to_matrix_only_params_dfs():
expected = np.arange(25).reshape(5, 5)
tree = {
"a": pd.DataFrame(index=["a", "b"]).assign(value=[1, 2]),
"b": pd.DataFrame(index=["j", "k", "l"]).assign(value=[3, 4, 5]),
}
block_tree = {
"a": {
"a": pd.DataFrame([[0, 1], [5, 6]], columns=["a", "b"], index=["a", "b"]),
"b": pd.DataFrame(
[[2, 3, 4], [7, 8, 9]], columns=["j", "k", "l"], index=["a", "b"]
),
},
"b": {
"a": pd.DataFrame(
[[10, 11], [15, 16], [20, 21]],
index=["j", "k", "l"],
columns=["a", "b"],
),
"b": pd.DataFrame(
[[12, 13, 14], [17, 18, 19], [22, 23, 24]],
index=["j", "k", "l"],
columns=["j", "k", "l"],
),
},
}
calculated = block_tree_to_matrix(block_tree, tree, tree)
assert_array_equal(expected, calculated)
def test_block_tree_to_hessian_bijection():
params = {"a": np.arange(4), "b": [{"c": (1, 2), "d": np.array([5, 6])}]}
f_tree = {"e": np.arange(3), "f": (5, 6, [7, 8, {"g": 1.0}])}
registry = get_registry(extended=True)
n_p = len(tree_leaves(params, registry=registry))
n_f = len(tree_leaves(f_tree, registry=registry))
expected = np.arange(n_f * n_p**2).reshape(n_f, n_p, n_p)
block_hessian = hessian_to_block_tree(expected, f_tree, params)
got = block_tree_to_hessian(block_hessian, f_tree, params)
assert_array_equal(expected, got)
def test_hessian_to_block_tree_bijection():
params = {"a": np.arange(4), "b": [{"c": (1, 2), "d": np.array([5, 6])}]}
def func(params):
return {"e": params["a"] ** 3, "f": (params["b"][0]["c"][1] / 0.5)}
expected = second_derivative(func, params)["derivative"]
hessian = block_tree_to_hessian(expected, func(params), params)
got = hessian_to_block_tree(hessian, func(params), params)
_tree_equal_up_to_dtype(expected, got)
def test_block_tree_to_matrix_valueerror():
# test that value error is raised when dimensions don't match
with pytest.raises(ValueError):
block_tree_to_matrix({"a": [1], "b": [2]}, {"a": 1}, (1, 2))
with pytest.raises(ValueError):
block_tree_to_matrix({"a": [1], "b": [2]}, {"a": 1}, (1, 2))
def _tree_equal_up_to_dtype(left, right):
# does not compare dtypes for pandas.DataFrame
return tree_equal(left, right, equality_checkers={pd.DataFrame: _frame_equal})
def _frame_equal(left, right):
try:
| pd.testing.assert_frame_equal(left, right, check_dtype=False) | pandas.testing.assert_frame_equal |
import numpy as np
import cv2
import csv
import os
import pandas as pd
import time
def calcuNearestPtsDis2(ptList1):
''' Find the nearest point of each point in ptList1 & return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if len(ptList1)<=1:
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
ptList2 = np.delete(ptList1,i,axis=0)
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPtsDis(ptList1, ptList2):
''' Find the nearest point of each point in ptList1 from ptList2
& return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
ptList2: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if (not len(ptList2)) or (not len(ptList1)):
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPts(csvName1, csvName2):
ptList1_csv = pd.read_csv(csvName1,usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csvName2,usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
ptList1_csv = pd.concat([ptList1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
ptList1_csv.to_csv(csvName1,index=False)
return minDisInd
def drawDisPic(picInd):
picName = 'patients_dataset/image/'+ picInd +'.png'
img = cv2.imread(picName)
csvName1='patients_dataset/data_csv/'+picInd+'other_tumour_pts.csv'
csvName2='patients_dataset/data_csv/'+picInd+'other_lymph_pts.csv'
ptList1_csv = pd.read_csv(csvName1)
ptList2_csv = pd.read_csv(csvName2)
ptList1 = ptList1_csv.values
ptList2 = ptList2_csv.values
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 3 , (0, 0, 255), -1 )
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), (0,255,0), 1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 3 , (255, 0, 0), -1 )
cv2.imwrite( picInd+'_dis.png',img)
def drawDistancePic(disName1, disName2, picID):
''' Draw & save the distance pics
Parameters
----------
disName1,disName2: str
such as 'positive_lymph', 'all_tumour'
picID: str
the patient's ID
'''
cellName_color = {'other_lymph': (255, 0, 0), 'positive_lymph': (255, 255, 0),
'other_tumour': (0, 0, 255), 'positive_tumour': (0, 255, 0)}
ptline_color = {'positive_lymph': (0,0,255), 'positive_tumour': (0,0,255),
'ptumour_plymph': (51, 97, 235), 'other_tumour': (0, 255, 0)}
if (disName1 == 'all_tumour' and disName2 == 'all_lymph') or (disName1 == 'all_tumour' and disName2 == 'positive_lymph'):
line_color = (0,255,255)
elif disName1 == 'positive_tumour' and disName2 == 'positive_lymph':
line_color = (51, 97, 235)
else:
line_color = ptline_color[disName1]
csv_dir = '/data/Datasets/MediImgExp/data_csv'
img_dir = '/data/Datasets/MediImgExp/image'
if disName1 == 'all_tumour' and disName2 == 'positive_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
# positive tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
# other tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, | pd.DataFrame(columns=['nearestInd'], data=minDisInd) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.