prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2020 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Utilities for fetching and munging public data and forecasts."""
import numpy as np
import pandas as pd
import scipy
import xarray as xr
from metis import util
# By default, these will be populated from covid19-open-data. Overwrite these
# module variables if you want to use your own.
DEMOGRAPHICS = None
EPIDEMIOLOGY = None
INDEX = None
def demographics():
global DEMOGRAPHICS
if DEMOGRAPHICS is None:
DEMOGRAPHICS = pd.read_csv(
'https://storage.googleapis.com/covid19-open-data/v2/demographics.csv',
index_col=0)
return DEMOGRAPHICS
def index():
global INDEX
if INDEX is None:
INDEX = pd.read_csv(
'https://storage.googleapis.com/covid19-open-data/v2/index.csv',
index_col=0)
return INDEX
def epidemiology():
global EPIDEMIOLOGY
if EPIDEMIOLOGY is None:
EPIDEMIOLOGY = pd.read_csv(
'https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv')
EPIDEMIOLOGY.rename(columns=dict(key='location', date='time'), inplace=True)
EPIDEMIOLOGY['time'] = pd.to_datetime(EPIDEMIOLOGY['time'])
return EPIDEMIOLOGY
def us_county_data():
"""An opencovid_key-indexed dataframe of county data."""
keys = demographics().index.values.astype(str)
us_keys = keys[np.char.startswith(keys, 'US_')]
key_fips = []
for key in us_keys:
us_state_fips = key.split('_')
if len(us_state_fips) == 3:
key_fips.append((key, us_state_fips[2]))
keys, fips = zip(*key_fips)
us_counties = pd.Index(keys, name='location')
county_df = pd.DataFrame(index=us_counties)
county_df['fips'] = fips
county_df['population'] = demographics().loc[us_counties, 'population']
county_df['subregion1_name'] = index().loc[us_counties, 'subregion1_name']
county_df['subregion2_name'] = index().loc[us_counties, 'subregion2_name']
return county_df
def quantiles_to_samples(pred, num_samples):
sample = np.linspace(0, 1, num_samples + 2)[1:-1]
sample = xr.DataArray(
sample, dims=('sample',), coords=(np.arange(len(sample)),))
weights = util.quantile_conversion_weights(pred['quantile'].values,
sample.values)
weights = xr.DataArray(weights, coords=(pred['quantile'], sample['sample']))
# Bound forecast incidence below by 0.
return np.maximum(0.0, np.exp(np.log(pred + 1).dot(weights)) - 1)
def fetch_cdc_forecast(model, date_stamp, end_date=None, num_samples=None):
"""Returns forecast incidence of the given model from the given date.
Forecasts are converted from the quantiles given to num_samples uniformly
spaced quantiles. New case forecasts are converted to incidence forecasts
using population data from opencovid. Forecasts are constant-extrapolated
through end_date.
Args:
model: string, which model forecast to fetch (e.g. 'COVIDhub-ensemble')
date_stamp: string, which date-stamped forecast to use (e.g. '2020-10-12')
end_date: how far to constant-extrapolate. If omited, no extrapolation is
done. Must be castable to np.datetime64.
num_samples: number of uniformly-spaced quantiles to return in the sample
dimension. If omitted, the original quantiles are returned with a
"quantile" dimension instead of a "sample" dimension.
Returns:
An xr.DataArray of shape [sample, location, time], the forecast incidence
from the model.
"""
url = (f'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/'
f'master/data-processed/{model}/{date_stamp}-{model}.csv')
df = pd.read_csv(url)
county_df = us_county_data()
# Restrict to counties
df = df[df.location.isin(county_df.fips)]
fips_to_key = county_df.reset_index().set_index('fips')['location']
df.loc[:, 'location'] = fips_to_key.loc[df.location].values
# Restrict to the quantiles, discarding point predictions.
df = df.query('type == "quantile"')
df = df.rename(columns=dict(target_end_date='time'))
df.loc[:, 'time'] = | pd.to_datetime(df.time) | pandas.to_datetime |
""""""
__author__ = "<NAME>"
__copyright__ = "WeatherBrain"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pandas
def load_temperature_raw():
"""This methid loads the raw temperature data from
text files and recompiled the data into a dataframe.
:return: Dataframe with temperature data.
"""
# with open(r'Data\\Raw Data\\Temperature 1756-1858.txt') as f:
# data = f.readlines()
# with open(r'Data\\Raw Data\\Temperature 1859-1960.txt') as f:
# data += f.readlines()
# with open(r'Data\\Raw Data\\Temperature 1961-2012.txt') as f:
# data += f.readlines()
# with open(r'Data\\Raw Data\\Temperature 2013-2017.txt') as f:
# data += f.readlines()
with open(r'..\\Data\\Raw Data\\Non-homogenized SLP series in hPa.txt') as f:
data = f.readlines()
with open(r'..\\Data\\Raw Data\\2013-2017, hPa, automatic station.txt') as f:
data += f.readlines()
data_length = len(data)
result = []
for data_index, row in enumerate(data):
row = row.replace('\n', '').split(' ')
row = [x for x in row if x != '']
date = pandas.to_datetime(row[0] + '-' + row[1] + '-' + row[2])
row[2] = date
row = row[2:]
for row_index in range(1, len(row)):
row[row_index] = float(row[row_index])
while len(row) < 4:
row.append(None)
data = [[row[0], row[1], row[2], row[3]]]
result.append(
| pandas.DataFrame(data, columns=['date', 'hPa_one', 'hPa_two', 'hPa_three']) | pandas.DataFrame |
'''
This file includes all the locally differentially private mechanisms we designed for the SIGMOD work.
I am aware that this code can be cleaned a bit and there is a redundancy. But this helps keeping the code plug-n-play.
I can simply copy a class and use it in a different context.
http://dimacs.rutgers.edu/~graham/pubs/papers/sigmod18.pdf
'''
import numpy as np
import itertools
from scipy.linalg import hadamard
import pandas as pd
import xxhash
import sys
import random
#np.seterr(all='raise')
BIG_PRIME = 9223372036854775783
def rr2 (bit,bern):
if bern:
return bit
return -bit
def pop_probmat(prob,sz):
probmat =np.zeros((sz,sz))
d = np.log2(sz)
for i in range(0,sz):
for j in range(0,sz):
perturbed = count_1(np.bitwise_xor(i,j))
#print i,bin(i),j,bin(j) ,bin(np.bitwise_xor(i,j)),perturbed
probmat[i][j] = np.power(1.0-prob,perturbed) * np.power(prob,d-perturbed)
return probmat
def mps (num,bern,rnum):
if bern:
return num
return rnum
def L1(a,b):
a = np.abs(a)
b= np.abs(b)
return round(np.abs(a-b).sum(),4)
def count_1(num):
cnt =0
while num !=0:
num = np.bitwise_and(num,num-1)
cnt+=1
return cnt
def random_number():
return random.randrange(1, BIG_PRIME - 1)
def compute_marg(misc_vars
,irr_estimate
,ips_estimate
,iht_pert_ns_estimate
,iolh_estimate
,mps_pert_dict
,mrr_pert_dict
,mht_pert_dict
,icms_estimate
,icmsht_estimate
):
### These lists store L1 error for each k way marginal.
irr_l1_array = []
iht_l1_array = []
ips_l1_array =[]
iolh_l1_array =[]
icms_l1_array = []
icmsht_l1_array = []
mps_l1_array= []
mrr_l1_array=[]
mht_l1_array = []
s = misc_vars.allsubsetsint.shape[0]
temp_array2= np.zeros(s)
input_dist_margs = np.zeros(np.power(2,misc_vars.d))
marg_from_irr = np.zeros(np.power(2,misc_vars.d))
marg_from_iht = np.zeros(s)
marg_from_ips = np.zeros(np.power(2,misc_vars.d))
marg_from_iolh = np.zeros(np.power(2,misc_vars.d))
marg_from_icms = np.zeros(np.power(2,misc_vars.d))
marg_from_icmsht = np.zeros(np.power(2,misc_vars.d))
all_cords = np.array(range(0, np.power(2,misc_vars.d)))
temp_array = np.zeros(np.power(2, misc_vars.d))
### We now evaluate each marginal using the method described in Barak et al's paper.
for beta in misc_vars.allsubsetsint:
if count_1(beta) != misc_vars.k:
continue
alphas=misc_vars.alphas_cache[beta]["alphas"]
gammas = alphas
marg_from_irr.fill(0.0)
marg_from_ips.fill(0.0)
marg_from_iht.fill(0.0)
marg_from_iolh.fill(0.0)
marg_from_icms.fill(0.0)
marg_from_icmsht.fill(0.0)
input_dist_margs.fill(0.0)
real_indices = []
for alpha in alphas:
temp_array.fill(0.0)
temp_array2.fill(0.0)
try:
f_alpha = misc_vars.f[alpha]
except:
f_alpha = np.zeros(np.power(2,misc_vars.d))
for i in all_cords:
f_alpha[i] = np.power(-1.0, count_1(np.bitwise_and(alpha, i)))
misc_vars.f[alpha] = f_alpha
for gamma in gammas:
temp_array[gamma]+=misc_vars.f[alpha][gamma]
temp_array2[misc_vars.coef_dict[gamma]] +=np.power(-1.0,count_1(np.bitwise_and(gamma,alpha)))
try:
input_dist_margs += (temp_array * misc_vars.f[alpha].dot(misc_vars.input_dist))
marg_from_irr += (temp_array * misc_vars.f[alpha].dot(irr_estimate))
marg_from_ips += (temp_array * misc_vars.f[alpha].dot(ips_estimate))
marg_from_icms += (temp_array * misc_vars.f[alpha].dot(icms_estimate))
marg_from_icmsht += (temp_array * misc_vars.f[alpha].dot(icmsht_estimate))
marg_from_iolh += (temp_array * misc_vars.f[alpha].dot(iolh_estimate))
except:
print ("Unexpected error:", sys.exc_info())
marg_from_iht += (temp_array2 * iht_pert_ns_estimate[misc_vars.coef_dict[alpha]])
real_indices.append(misc_vars.coef_dict[alpha])
### input######
m_inp = np.abs(np.take(input_dist_margs,gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_inp/=m_inp.sum()
#### INPUT_HT #############
m_inp_ht = np.abs(np.take(marg_from_iht,real_indices)) ## Extracting counts from marginal indices specified by "gammas".
m_inp_ht/=m_inp_ht.sum()
iht_l1_array.append(L1(m_inp_ht,m_inp))
######## INPUT_PS ###########
ips_marg = np.abs(np.take(marg_from_ips,gammas)) ## Extracting counts from marginal indices specified by "gammas".
ips_marg/=ips_marg.sum()
ips_l1_array.append(L1(ips_marg,m_inp))
######## INPUT_RR ##########
m_irr = np.abs(np.take(marg_from_irr, gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_irr /= m_irr.sum()
irr_l1_array.append(L1(m_irr,m_inp))
######### INPUT_OLH ##########
try:
m_iolh = np.abs(np.take(marg_from_iolh,gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_iolh/=m_iolh.sum()
iolh_l1_array.append(L1(m_iolh,m_inp))
except:
## incase we drop INPUT_OLH from execution.
#print ("Unexpected error:", sys.exc_info())
iolh_l1_array.append(0.0)
try:
icms_marg = np.abs(np.take(marg_from_icms,gammas)) ## Extracting counts from marginal indices specified by "gammas".
icms_marg/=icms_marg.sum()
icms_l1_array.append(L1(icms_marg,m_inp))
except:
# incase we drop INPUT_CMS from execution.
#print ("Unexpected error:", sys.exc_info())
icms_l1_array.append(0.0)
try:
icmsht_marg = np.abs(np.take(marg_from_icmsht,gammas)) ## Extracting counts from marginal indices specified by "gammas".
icmsht_marg/=icmsht_marg.sum()
icmsht_l1_array.append(L1(icmsht_marg,m_inp))
except:
# incase we drop INPUT_HTCMS from execution.
#print (icms_marg)
#print ("Unexpected error:", sys.exc_info())
icmsht_l1_array.append(0.0)
######### MARG_RR ###############
mrr_l1_array.append(L1(m_inp,mrr_pert_dict[np.binary_repr(beta,width=misc_vars.d)[::-1]]))
#print (m_inp)
######### MARG_HT #####################
mht_l1_array.append(L1(mht_pert_dict[np.binary_repr(beta,width=misc_vars.d)[::-1]],m_inp))
########## MARG_PS #####################
mps_l1_array.append(L1(mps_pert_dict[np.binary_repr(beta, width=misc_vars.d)[::-1]], m_inp))
irr_l1 = np.array(irr_l1_array).mean(axis=0)
ips_l1 = np.array(ips_l1_array).mean(axis=0)
iht_l1 = np.array(iht_l1_array).mean(axis=0)
iolh_l1 = np.array(iolh_l1_array).mean(axis=0)
icms_l1 = np.array(icms_l1_array).mean(axis=0)
icmsht_l1 = np.array(icmsht_l1_array).mean(axis=0)
mrr_l1 = np.array(mrr_l1_array).mean(axis=0)
mps_l1 = np.array(mps_l1_array).mean(axis=0)
mht_l1 = np.array(mht_l1_array).mean(axis=0)
#print (irr_l1_array,mrr_l1,iht_l1_array,mht_l1,ips_l1,mps_l1,iolh_l1_array,icms_l1_array,icmsht_l1_array)
return (irr_l1,mrr_l1,iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1, icms_l1, icmsht_l1)
class INPUT_RR(object):
def perturb2(self):
return
def perturb(self,index_of_1,p):
i = 0
while i < self.sz:
item = 0.0
if i == index_of_1:
item = 1.0
if self.bern_irr[p][i]:
self.irr[i] += item
else:
self.irr[i] += (1.0 - item)
i += 1
## It is possible to simulate InputRR using Binomial distributions. We
## use this simulation for rapid completion.
def correction2(self,miscvar):
i=0
irr2 = np.zeros(self.sz)
while i < self.sz:
irr2[i] = np.random.binomial(miscvar.input_dist[i],0.5,size=1)[0] +\
np.random.binomial(self.population- miscvar.input_dist[i],1.0-self.prob,size=1)[0]
irr2[i]/=self.population
irr2[i] = (self.irr[i] + self.prob - 1.0) / (2.0 * self.prob - 1.0)
i+=1
np.copyto(self.irr,irr2)
#print (irr2)
## just repeat reconstruction of each index to reduce variance.
def correction3(self,miscvar):
i=0
while i <self.sz:
j=0
while j<5:
self.irr[i] += (np.random.binomial(miscvar.input_dist[i],0.5,size=1)[0] +\
np.random.binomial(self.population- miscvar.input_dist[i],self.prob,size=1)[0])
j+=1
self.irr[i]/=(5.0*self.population)
self.irr[i] = (self.irr[i]-self.prob) / (0.5 -self.prob);
#self.irr[i] = (self.irr[i] + self.prob - 1.0) / (2.0 * self.prob - 1.0)
i+=1
#print (self.irr)
def correction(self):
self.irr/=self.population
#print (self.irr)
for i in range(0,self.sz):
self.irr[i] = (self.irr[i]+self.prob-1.0)/(2.0*self.prob-1.0)
#self.irr/=self.irr.sum()
#print (self.irr.round(4))
def __init__(self,e_eps,d,population):
self.population=population
self.d = d
self.sz = np.power(2, self.d)
self.eps = np.log(e_eps)
self.e_eps = np.power(np.e,(self.eps/2.0))
self.prob = self.e_eps/(1.0+self.e_eps)
#print (self.prob,"input-RR")
self.problist = [self.prob,1.0-self.prob]
#self.bern_irr = np.random.choice([True,False], size=self.sz * self.population, p=self.problist).reshape(self.population, self.sz)
#self.sample_index = np.random.choice(range(0, self.sz), size=self.population)
self.irr = np.zeros(np.power(2,self.d))
class MARG_RR(object):
def perturb(self,index_of_1,p,rand_quests):
i = 0
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.marg_freq[rand_quests] += 1.0
while i < self.sz:
item = 0.0
if i == index_of_1:
item = 1.0
if self.bern[p][i]:
self.marg_dict[rand_quests][i] += item
else:
self.marg_dict[rand_quests][i] += (1.0 - item)
i += 1
def perturb2(self,index_of_1,p,rand_quests):
i = 0
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.marg_freq[rand_quests] += 1.0
while i < self.sz:
item = 0.0
b = self.bern_q
if i == index_of_1:
item = 1.0
b = self.bern_p
if b[p][i]:
self.marg_dict[rand_quests][i] += item
else:
self.marg_dict[rand_quests][i] += (1.0 - item)
i += 1
def perturb3(self,index_of_1,p,rand_quests):
try:
self.marg_freq[rand_quests] += 1.0
self.true_marg[rand_quests][index_of_1]+= 1.0
except:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.true_marg[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] += 1.0
self.true_marg[rand_quests][index_of_1]+= 1.0
def correction(self):
#print ("--------------------------------")
for marg in self.marg_dict:
self.marg_dict[marg] /= self.marg_freq[marg]
for i in range(0,self.sz):
self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def correction2(self):
for marg in self.marg_dict:
#print ("--------------------------------")
self.marg_dict[marg] /= self.marg_freq[marg]
for i in range(0,self.sz):
#self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg][i] = (self.marg_dict[marg][i]-(self.prob)) / (0.5 -(self.prob))
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def correction3(self):
for marg in self.marg_dict:
#self.marg_dict[marg] /= self.marg_freq[marg]
i=0
#print (self.marg_dict[marg])
total = self.marg_freq[marg]
while i <self.sz:
j=0
while j <5:
self.marg_dict[marg][i] += (np.random.binomial(self.true_marg[marg][i],0.5,size=1)[0] +\
np.random.binomial(self.marg_freq[marg]- self.true_marg[marg][i],self.prob,size=1)[0])
j+=1
self.marg_dict[marg][i] /= (5.0*total)
#self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg][i] = (self.marg_dict[marg][i]-(self.prob)) / (0.5 -(self.prob))
i+=1
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def __init__(self,d,k,e_eps,population,k_way):
self.d = d
self.k = k
self.population= population
self.k_way = k_way
self.sz = np.power(2,self.k)
self.eps = np.log(e_eps)
self.e_eps = np.power(np.e,self.eps/2.0)
self.prob = self.e_eps / (1.0+self.e_eps)
#print (self.prob,"marg-RR")
self.problist = [self.prob,1.0-self.prob]
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.bern = np.random.choice([True, False], size=self.sz * self.population, p=self.problist).reshape(self.population, self.sz)
self.bern_p = np.random.choice([True, False], size=self.sz * self.population).reshape(self.population, self.sz)
self.bern_q = np.random.choice([True, False], size=self.sz * self.population, p=self.problist[::-1]).reshape(self.population, self.sz)
self.marg_dict = {}
self.marg_freq={}
self.true_marg={}
class MARG_HT(object):
def perturb(self,index_of_1,p,rand_quests):
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = np.zeros(self.sz)
cf =self.rand_coef[p]
self.marg_freq[rand_quests][cf] += 1.0
htc = self.f[index_of_1][cf]
if self.bern[p]:
self.marg_dict[rand_quests][cf] += htc
else:
self.marg_dict[rand_quests][cf] += -htc
def correction(self):
for rm in self.marg_dict:
self.marg_freq[rm][self.marg_freq[rm] == 0.0] = 1.0
self.marg_dict[rm]/=self.marg_freq[rm]
self.marg_dict[rm]/=(2.0*self.prob-1.0)
self.marg_dict[rm][0]=1.0
#print ("-------------------")
#print (self.marg_dict[rm])
self.marg_dict[rm]= np.abs(self.marg_dict[rm].dot(self.f))
self.marg_dict[rm]/=self.marg_dict[rm].sum()
#print (self.marg_dict[rm].round(4))
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def compute_all_marginals(self):
for marg_int in self.k_way:
self.correct_noise_mps(marg_int)
def __init__(self,d,k,e_eps,population,k_way,cls):
self.d = d
self.k = k
self.population= population
self.sz = np.power(2,self.k)
self.e_eps = e_eps
self.f = hadamard(self.sz).astype("float64")
self.prob = (self.e_eps/(1.0+self.e_eps))
self.problist = [self.prob,1.0-self.prob]
self.coef_dist = np.zeros(cls)
self.k_way = k_way
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.rand_coef= np.random.choice(range(0,self.sz),size=population)
self.bern = np.random.choice([True, False], size= self.population, p=self.problist)#.reshape(self.population, self.sz)
self.marg_freq = {}
self.marg_dict = {}
self.marg_noisy = np.zeros(self.sz)
class MARG_PS(object):
def perturb(self,index_of_1,p,rand_quests):
try:
freq = self.rand_cache[index_of_1]["freq"]
except:
i = 0
while i < self.sz:
options = list(range(0, self.sz))
options.remove(i)
self.rand_cache[i] = {"rnum": np.random.choice(np.array(options), size=10000), "freq": 0}
i += 1
freq = self.rand_cache[index_of_1]["freq"]
if freq > 9990:
options = list(range(0, self.sz))
options.remove(index_of_1)
self.rand_cache[index_of_1]["rnum"] = np.random.choice(np.array(options), size=10000)
self.rand_cache[index_of_1]["freq"] = 0
rnum = self.rand_cache[index_of_1]["rnum"][freq]
try:
self.marg_ps_pert_aggr[rand_quests].append(mps(index_of_1, self.bern[p], rnum))
except:
self.marg_ps_pert_aggr[rand_quests] = [mps(index_of_1, self.bern[p], rnum)]
self.rand_cache[index_of_1]["freq"] += 1
def correct_noise_mps(self,marg_int):
self.marg_int=marg_int
self.marg_ps_noisy.fill(0.0)
if type(self.marg_ps_pert_aggr[marg_int]) != "numpy.ndarray":
for rm in self.marg_ps_pert_aggr:
self.marg_ps_pert_aggr[rm] = np.array(self.marg_ps_pert_aggr[rm])
#print (self.marg_ps_pert_aggr.keys())
for index in self.marg_ps_pert_aggr[marg_int]:
self.marg_ps_noisy[index]+=1.0
self.marg_ps_noisy/=self.marg_ps_noisy.sum()
#marg_ps_recon = np.copy(marg_noisy)
self.marg_ps_recon = self.mat_inv.dot(self.marg_ps_noisy)
self.marg_ps_recon/=self.marg_ps_recon.sum()
#print (self.marg_ps_recon.round(4))
return self.marg_ps_recon
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def compute_all_marginals(self):
for marg_int in self.k_way:
self.marg_dict[marg_int]=self.correct_noise_mps(marg_int)
def __init__(self,d,k,e_eps,population,k_way):
self.d = d
self.k = k
self.population= population
self.k_way = k_way
self.sz = np.power(2,self.k)
#self.data = data
self.e_eps = e_eps
self.prob = (self.e_eps/(self.e_eps+self.sz-1.0))
#print self.prob,"marg-ps"
self.probmat = self.pop_probmat()
self.problist = [self.prob,1.0-self.prob]
self.mat = self.pop_probmat()
self.mat_inv = np.linalg.inv(self.mat)
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.bern = np.random.choice([True, False], p=self.problist, size=self.population)
self.marg_ps_pert_aggr = {}
self.rand_cache = {}
self.marg_int = None
self.marg_ps_noisy = np.zeros(self.sz)
self.marg_dict = {}
## From <NAME> al's USENIX paper.
## https://www.usenix.org/system/files/conference/usenixsecurity17/sec17-wang-tianhao.pdf
## This algorithm indeed does well for high order marginals but doesn't outperform INPUT_HT
## for small k's i.e. 2,3, the one's that are the most interesting.
## We trade the gain in accuracy by computational cost. The encoding (or decoding) cost is O(dN).
class INPUT_OLH(object):
def __init__(self,e_eps, d, population,g=1):
self.d = d
self.population= population
self.sz = int(np.power(2,self.d))
#self.data = data
self.e_eps = e_eps
if g == 1:
self.g = int(np.ceil(e_eps+1.0))
else:
self.g = g
#print (self.g)
self.prob = (self.e_eps/(self.e_eps+self.g-1.0))
self.problist = [self.prob,1.0-self.prob]
self.bern_ps = np.random.choice([False,True], size=self.population, p=self.problist)
self.uni_dist = np.random.choice(range(self.g),size=self.population).astype("int32")
#self.hash_cache = np.array( map(str,range(self.sz)),dtype="str") ## works with Python2
self.hash_cache = np.array(range(self.sz),dtype="str")
#self.hashed_pdist = np.zeros(self.population)
self.estimate = np.zeros(self.sz)
def perturb(self,x,p):
if self.bern_ps[p]:
#x_hash= (xxhash.xxh32(self.hash_cache[x], seed=p).intdigest()) % self.g
pert_val= (xxhash.xxh32(self.hash_cache[x], seed=p).intdigest()) % self.g
else:
pert_val=self.uni_dist[p]
dom_index = 0
while dom_index<self.sz:
if pert_val == (xxhash.xxh32(self.hash_cache[dom_index], seed=p).intdigest() % self.g):
self.estimate[dom_index]+=1.0
dom_index+=1
def correction(self):
p=0
while p <self.sz:
self.estimate[p]=(self.estimate[p] - (self.population/self.g))/(self.prob -(1.0/self.g))
p+=1
self.estimate/=self.estimate.sum()
#print(self.estimate.round(4))
class INPUT_HT(object):
def perturb(self,index_of_1,p):
rc = self.rand_coefs[p]
index = self.misc_vars.coef_dict[rc]
self.coef_dist[index] += 1.0
cf = np.power(-1.0, count_1(np.bitwise_and(index_of_1, rc)))
self.iht_pert_ns_estimate[index] += rr2(cf, self.bern_ht[p])
def correction(self):
self.coef_dist[self.coef_dist==0.0]=1.0
self.iht_pert_ns_estimate/=self.coef_dist
self.iht_pert_ns_estimate/=(2.0*self.prob-1.0)
self.iht_pert_ns_estimate[0] = 1.0
self.coef_dist[self.coef_dist<=0.0]=0.0
def __init__(self,d,k,e_eps,population,misc_vars):
self.d = d
self.k = k
self.misc_vars = misc_vars
self.population= population
self.sz = np.power(2,self.k)
self.e_eps = e_eps
self.prob = self.e_eps/(1.0+self.e_eps)
self.problist = [self.prob,1.0-self.prob]
self.bern_ht = np.random.choice([True,False],p=self.problist,size=self.population)
self.rand_coefs = np.random.choice(self.misc_vars.allsubsetsint,size=self.population)
self.iht_pert_ns_estimate = np.zeros(self.misc_vars.allsubsetsint.shape[0])
#iht_pert_ns_estimate.fill(0.0)
self.coef_dist = np.zeros(self.misc_vars.cls)
## From Apple's paper.
## https://machinelearning.apple.com/2017/12/06/learning-with-privacy-at-scale.html
## This algorithm might be a bad performer. But just adding it for a comparison.
class INPUT_CMS:
def __init__(self, w, d,population,e_eps,domain):
'''
if delta <= 0 or delta >= 1:
raise ValueError("delta must be between 0 and 1, exclusive")
if epsilonh <= 0 or epsilonh >= 1:
raise ValueError("epsilon must be between 0 and 1, exclusive")
#self.w = int(np.ceil(np.e / epsilonh))
#self.d = int(np.ceil(np.log(1 / delta)))
'''
self.w=w
self.d =d
self.population=population
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.M = np.zeros(shape=(self.d, self.w))
#print (self.w,self.d,self.w*self.d,self.M.shape)
self.hash_chooser = np.random.choice(range(self.d),size=self.population)
self.epsilon = np.log(e_eps)
self.flip_prob = 1.0/(1.0+np.power(np.e,self.epsilon/2.0))
problist = [self.flip_prob,1.0-self.flip_prob]
self.bern = np.random.choice([True,False],p=problist,size=self.population*self.w).reshape(self.population,self.w)
self.c_eps = (np.power(np.e,self.epsilon/2.0)+1.0)/(np.power(np.e,self.epsilon/2.0)-1.0)
self.estimate = np.zeros(int(np.power(2,domain)))
def __generate_hash_function(self):
a = random_number()
b= random_number()
return lambda x: (a * x + b) % BIG_PRIME % self.w
def perturb(self, key,p):
hash_choice = self.hash_chooser[p]
hashed_key = self.hash_functions[hash_choice](abs(hash(str(key))))
cnt = 0
while cnt< self.w:
item = -1.0
if cnt == hashed_key:
item = 1.0
if self.bern[p][cnt]:
item = -item
self.M[hash_choice][cnt]+=(self.d * (item*self.c_eps*0.5+0.5))
cnt+=1
def query(self,key):
l =0
avg=0.0
hsh_str= abs(hash(str(key)))
while l < self.d:
hashed_key = self.hash_functions[l](hsh_str)
avg+=self.M[l][hashed_key]
l+=1
avg/=self.d
est = ((1.0*self.w)/(self.w-1.0))* (avg- (1.0*self.population)/self.w)
return est
def correction(self):
cnt=0
while cnt <self.estimate.shape[0]:
self.estimate[cnt]=self.query(cnt)
cnt+=1
self.estimate[self.estimate < 0.0] = 0.0
self.estimate/=self.estimate.sum()
## From Apple's paper.
## https://machinelearning.apple.com/2017/12/06/learning-with-privacy-at-scale.html
## This algorithm might be a bad performer. But just adding it for a comparison.
class INPUT_HTCMS:
#def __init__(self, delta, epsilonh,population,e_eps):
def __init__(self, w, d,population,e_eps,domain):
self.w=int(w)
self.d =int(d)
self.ht = hadamard(self.w, dtype="float32")
self.population=population
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.M = np.zeros(shape=(self.d, self.w))
#print (self.w,self.d,self.w*self.d,self.M.shape)
self.hash_chooser = np.random.choice(range(self.d),size=self.population).astype("int32")
self.coef_chooser = np.random.choice(range(self.w),size=self.population).astype("int32")
#self.hash_choice_counter = np.zeros(self.d)
self.flip_prob = 1.0/(1.0+e_eps)
problist = [self.flip_prob,1.0-self.flip_prob]
self.bern = np.random.choice([True,False],p=problist,size=self.population)
self.c_eps = (e_eps+1.0)/(e_eps-1.0)
self.estimate = np.zeros(int(np.power(2,domain)))
def __generate_hash_function(self):
a = random_number()
b= random_number()
return lambda x: (a * x + b) % BIG_PRIME % self.w
def perturb(self, key,p):
hash_choice = self.hash_chooser[p]
#self.hash_choice_counter[hash_choice]+=1.0
hashed_key = self.hash_functions[hash_choice](abs(hash(str(key))))
rand_coef = self.coef_chooser[p]
item = self.ht[rand_coef][hashed_key]
if self.bern[p]:
item = -item
self.M[hash_choice][rand_coef]+=(self.d * item*self.c_eps)
def correction(self):
cnt = 0
while cnt < self.d:
#print self.M[cnt]
self.M[cnt] = self.ht.dot(self.M[cnt])
cnt+=1
cnt=0
while cnt <self.estimate.shape[0]:
self.estimate[cnt]=self.query(cnt)
cnt+=1
self.estimate[self.estimate < 0.0] = 0.0
self.estimate/=self.estimate.sum()
def query(self,key):
l =0
avg=0.0
hsh_str= abs(hash(str(key)))
while l < self.d:
hashed_key = self.hash_functions[l](hsh_str)
avg+=self.M[l][hashed_key]
l+=1
avg/=self.d
est = ((1.0*self.w)/(self.w-1.0))* (avg- (1.0*self.population)/self.w)
return est
class INPUT_PS(object):
def perturb2(self,index_of_1,p):
if self.bern_ps[p]:
self.ips_ps_pert_aggr[index_of_1] += 1.0
else:
self.ips_ps_pert_aggr[self.rand_coef_ps[p]] += 1.0
def perturb(self,index_of_1,p):
try:
freq = self.rand_cache[index_of_1]["freq"]
except:
i = 0
while i < self.sz:
options = list(range(0, self.sz))
options.remove(i)
self.rand_cache[i] = {"rnum": np.random.choice(np.array(options), size=10000), "freq": 0}
i += 1
freq = self.rand_cache[index_of_1]["freq"]
if freq > 9990:
options = list(range(0, self.sz))
options.remove(index_of_1)
self.rand_cache[index_of_1]["rnum"] = np.random.choice(np.array(options), size=10000)
self.rand_cache[index_of_1]["freq"] = 0
rnum = self.rand_cache[index_of_1]["rnum"][freq]
ips_output = mps(index_of_1, self.bern[p], rnum)
self.ips_ps_pert_aggr[ips_output] += 1.0
self.rand_cache[index_of_1]["freq"] += 1
def correction2(self):
self.ips_ps_pert_aggr /= self.population
#print self.ips_ps_pert_aggr, "pert",self.ips_ps_pert_aggr.sum()
for i in range(0, self.sz):
self.ips_ps_pert_aggr[i] = (self.ips_ps_pert_aggr[i] * self.sz + self.probps - 1.0) / (self.probps * (self.sz + 1.0) - 1.0)
#print self.ips_ps_pert_aggr.round(4)
def correction(self):
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
for i in range(0,self.sz):
self.ips_ps_pert_aggr[i] = (self.ips_ps_pert_aggr[i]*self.sz+self.prob-1.0)/(self.prob*(self.sz+1.0)-1.0)
#print self.marg_ps_recon.round(4)
'''
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
# marg_ps_recon = np.copy(marg_noisy)
self.ips_ps_pert_aggr = np.abs(self.mat_inv.dot(self.ips_ps_pert_aggr))
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
'''
#return self.ips_ps_pert_aggr
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def __init__(self,d,k,e_eps,population,misc_vars):
self.d = d
self.k = k
self.population= population
self.k_way = misc_vars.k_way
self.sz = np.power(2,self.d)
self.e_eps = e_eps
self.prob = (self.e_eps/(self.e_eps+self.sz-1.0))
#print (self.prob,"input-ps")
self.problist = [self.prob,1.0-self.prob]
self.probps = (self.e_eps - 1.0) / (self.e_eps + self.sz - 1.0)
self.problist2 = [self.probps, 1.0 - self.probps]
self.rand_coef_ps = np.random.choice(np.array(range(0, self.sz)), size=self.population)
self.bern_ps = np.random.choice([True, False], size=self.population, p=[self.probps, 1.0 - self.probps])
#self.mat = self.pop_probmat()
#self.mat_inv = np.linalg.inv(self.mat) n = gc.collect()
self.bern = np.random.choice([True, False], p=self.problist, size=self.population)
self.ips_ps_pert_aggr = np.zeros(self.sz)
self.rand_cache = {}
self.marg_int = None
self.rand_cache = {}
#inp_trans_menthods.loc[l]=np.array([population,d,len(iway),input_ht_pert,iht_pert_ns_estimate,had_coefs,input_ps,input_rr],dtype="object")
def change_mapping(d):
if d:
return "1"
return "0"
def get_real_data(population,d):
data = pd.read_pickle("data/nyc_taxi_bin_sample.pkl").sample(population,replace=True)
data = data.as_matrix()
f = np.vectorize(change_mapping)
i = data.shape[1]
remainder = d % i
ncopies = d/i
copies = []
j = 0
while j < ncopies:
copies.append(data)
j+=1
#print data[:,range(0,remainder)]
copies.append(data[:,range(0,remainder)])
#rand_perm = np.random.choice(range(0,d),replace=False,size=d)
#print rand_perm
data_high = np.concatenate(tuple(copies),axis=1)#[:,rand_perm]
#print (data_high.shape)
#columns= data.columns.tolist()
#print columns
#data = f(data_high)
return f(data_high).astype("str")
class MARGINAL_VARS(object):
#We cache the set of necessary and sufficient indices to evaluate each <= k way marginal.
def compute_downward_closure(self):
all_cords = np.array(range(0, np.power(2, self.d)))
## iterate over all possible <=k way marginals.
for beta in self.allsubsetsint:
marg_str = bin(beta)[2:]
marg_str = "0" * (self.d - len(marg_str)) + marg_str
parity = np.power(2, count_1(beta))
alphas = np.zeros(parity, dtype="int64")
cnt = 0
for alpha in all_cords:
if np.bitwise_and(alpha, beta) == alpha:
alphas[cnt] = alpha
cnt += 1
### we add marginals in string formats incase needed.
self.alphas_cache[marg_str] = {"alphas": alphas, "probps": ((self.e_eps - 1.0) / (parity + self.e_eps - 1.0))}
self.alphas_cache[beta] = {"alphas": alphas, "probps": ((self.e_eps - 1.0) / (parity + self.e_eps - 1.0))}
## This method finds the set of <=k way marginal indices i.e. list of all subsets of length <=k from d.
def get_k_way_marginals(self):
j = 0
marginal = np.array(["0"] * self.d)
while j <= self.k:
subsets = list(itertools.combinations(range(0, self.d), j))
subsets = np.array([list(elem) for elem in subsets])
for s in subsets:
marginal.fill("0")
for b in s:
marginal[b] = "1"
self.allsubsetsint.append(int("".join(marginal)[::-1], 2))
if j == self.k:
# k_way.append(int("".join(marginal),2))
self.k_way.append("".join(marginal)[::-1])
self.k_way_bit_pos.append(s)
# print s,marginal,"".join(marginal)
j += 1
self.allsubsetsint = np.array(self.allsubsetsint, dtype="int64")
self.k_way = np.array(self.k_way, dtype="str")
self.k_way_bit_pos = np.array(self.k_way_bit_pos, dtype="int64")
self.allsubsetsint.sort()
#print (self.allsubsetsint)
## We tie marginals indices and corresponding bit positions together.
#print (dict(zip(self.k_way, self.k_way_bit_pos)))
return dict(zip(self.k_way, self.k_way_bit_pos))
def __init__(self,d,k,e_eps):
self.d = d
self.k = k
self.input_dist = np.zeros(np.power(2, self.d))
self.allsubsetsint = []
self.k_way = []
self.k_way_bit_pos = []
self.e_eps = e_eps
#self.f = hadamard(np.power(2,self.d)).astype("float64")
self.f = {}
self.alphas_cache = {}
self.k_way_bit_pos_dict =self.get_k_way_marginals()
self.cls = self.allsubsetsint.shape[0]
self.coef_dict = dict(zip(self.allsubsetsint, np.array(range(0, self.cls), dtype="int64")))
self.compute_downward_closure()
'''
Main driver routine that accepts all parameters and
runs perturbation simulation.
'''
def driver(d,k,e_eps,population,misc_vars):
width = 256
no_hash = 5
###### Use the NYC Taxi data.
#data = get_real_data(population, d)
####### Use synthetic data if you don't have the taxi data. ########
data = np.random.choice(["1","0"],p=[0.3,0.7],size=d*population).reshape(population,d)
misc_vars.input_dist.fill(0.0)
##### Input Based Algorithms ########
iht_obj = INPUT_HT(d, k, e_eps, population, misc_vars)
ips_obj = INPUT_PS(d, k, e_eps, population, misc_vars)
irr_obj = INPUT_RR(e_eps, d, population)
iolh_obj = INPUT_OLH(e_eps, d, population)
icms_obj = INPUT_CMS(width, no_hash,population,e_eps,d)
icmsht_obj = INPUT_HTCMS(width, no_hash,population,e_eps,d)
############ Marginal Based Algorithms #########
mps_obj = MARG_PS(d, k, e_eps, population, misc_vars.k_way)
mrr_obj = MARG_RR(d, k, e_eps, population, misc_vars.k_way)
mht_obj = MARG_HT(d, k, e_eps, population, misc_vars.k_way, misc_vars.cls)
p = 0
while p < population:
x = data[p]
index_of_1 = int("".join(x), 2)
misc_vars.input_dist[index_of_1] += 1.0
############# input_RR###############
#irr_obj.perturb(index_of_1,p)
#irr_obj.perturb2()
#########################input-PS #################################
ips_obj.perturb2(index_of_1,p)
########################################
iht_obj.perturb(index_of_1, p)
##########################INPUT_OLH ###############################
#INPUT_OLH is a compute intense scheme. Hence we don't run it for larger d's.
if d < 10:
iolh_obj.perturb(index_of_1,p)
##########################inp_CMS ########################
icms_obj.perturb(index_of_1,p)
##########################inp_HTCMS ########################
icmsht_obj.perturb(index_of_1,p)
########### marg-ps ###########
rand_questions = mps_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
# print rand_questions,responses
index_of_1 = int("".join(data[p][responses]), 2)
mps_obj.perturb(index_of_1, p, rand_questions)
######################### marg-ht ############################
rand_questions = mht_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
# print rand_quests,responses
index_of_1 = int("".join(data[p][responses]), 2)
mht_obj.perturb(index_of_1, p, rand_questions)
######################### marg-rs #################################
rand_questions = mrr_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
index_of_1 = int("".join(data[p][responses]), 2)
mrr_obj.perturb3(index_of_1, p, rand_questions)
p += 1
irr_obj.correction3(misc_vars)
#irr_obj.correction2(misc_vars)
misc_vars.input_dist /= population
#irr_obj.correction()
#print (misc_vars.input_dist.round(4))
ips_obj.correction()
iht_obj.correction()
if d < 10:
iolh_obj.correction()
icms_obj.correction()
icmsht_obj.correction()
#print(icmsht_obj.estimate)
mht_obj.correction()
mrr_obj.correction3()
mps_obj.compute_all_marginals()
return compute_marg(misc_vars
, irr_obj.irr
, ips_obj.ips_ps_pert_aggr
, iht_obj.iht_pert_ns_estimate
, iolh_obj.estimate
, mps_obj.marg_dict
, mrr_obj.marg_dict
, mht_obj.marg_dict
, icms_obj.estimate
, icmsht_obj.estimate
)
'''
Call this method is used when you want to vary k keeping d, eps fixed.
eps = 1.1
d = 9
'''
def vary_k():
## number of repetitions.
rpt = 5
e_eps = 3.0
d = 9
counter = 0
## dfmean and dfstd store the results. We use them in our plotting script.
l1 = np.zeros((rpt, 9))
dfmean = pd.DataFrame(columns=["population", "d", "k", "e_eps", "irr_l1", "mrr_l1", "iht_l1", "mht_l1", "ips_l1", "mps_l1","iolh_l1","icms_l1","icmsht_l1"])
dfstd = pd.DataFrame(columns=["irr_l1_std", "mrr_l1_std", "iht_l1_std", "mht_l1_std", "ips_l1_std", "mps_l1_std","iolh_l1_std","icms_l1_std","icmsht_l1_std"])
## parameters of the sketch
width = 256
no_hash = 5
# population variable. We prefer to keep it in the powers of two.
population = np.power(2, 18)
for k in reversed(range(1,d)):
misc_vars = MARGINAL_VARS(d, k, e_eps)
l1.fill(0.0)
print ("------------------")
for itr in (range(rpt)):
irr_l1, mrr_l1, iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1,icms_l1,icmsht_l1 = driver(d,k,e_eps,population,misc_vars)
l1[itr] = np.array([irr_l1, mrr_l1, iht_l1, mht_l1, ips_l1, mps_l1,iolh_l1,icms_l1,icmsht_l1])
print (l1[itr])
conf = [population, d, k, e_eps]
conf.extend(l1.mean(axis=0))
dfmean.loc[counter] = conf
dfstd.loc[counter] = l1.std(axis=0)
#print (conf)
counter += 1
dfstdcols = list(dfstd.columns.values)
for c in dfstdcols:
dfmean[c] = dfstd[c]
#print (dfmean)
dfmean.to_pickle("data/all_mechanisms_vary_"+str(d)+".pkl")
## (irr_l1,mrr_l1,iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1, icms_l1, icmsht_l1)
#dfmean.to_pickle("all_mechanisms_vary_k_fo.pkl")
'''
Call this method when you want to vary d holding k, eps, N fixed.
Fixed k, eps values,
k= 3
eps = 1.1
N = 2^18
'''
def vary_d():
print ("------------------")
population = int(np.power(2,19))
e_eps = 3.0
rpt =4
l1 = np.zeros((rpt, 9))
## Parameters for sketches
width = 256
no_hash = 5
k=3
dfmean = pd.DataFrame(columns=["population", "d", "k", "e_eps", "irr_l1", "mrr_l1", "iht_l1", "mht_l1", "ips_l1", "mps_l1","iolh_l1","icms_l1","icmsht_l1"])
dfstd = | pd.DataFrame(columns=["irr_l1_std", "mrr_l1_std", "iht_l1_std", "mht_l1_std", "ips_l1_std", "mps_l1_std","iolh_l1_std","icms_l1_std","icmsht_l1_std"]) | pandas.DataFrame |
"""
Tests for the choice_tools.py file.
"""
import unittest
import os
import warnings
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, isspmatrix_csr
import pylogit.choice_tools as ct
import pylogit.base_multinomial_cm_v2 as base_cm
class GenericTestCase(unittest.TestCase):
def setUp(self):
"""
Create a fake dataset and specification from which we can initialize a
choice model.
"""
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
self.fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
self.fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
self.fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
self.fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
self.fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
self.fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
self.fake_shape_ref_pos = 2
# Create an array of all model parameters
self.fake_all_params = np.concatenate((self.fake_shapes,
self.fake_intercepts,
self.fake_betas))
# The mapping between rows and alternatives is given below.
self.fake_rows_to_alts = csr_matrix(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
self.fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
self.fake_index = self.fake_design.dot(self.fake_betas)
# Create the needed dataframe for the choice mdodel constructor
self.fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": self.fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
self.alt_id_col = "alt_id"
self.obs_id_col = "obs_id"
self.choice_col = "choice"
# Create the index specification and name dictionary for the model
self.fake_specification = OrderedDict()
self.fake_names = OrderedDict()
self.fake_specification["x"] = [[1, 2, 3]]
self.fake_names["x"] = ["x (generic coefficient)"]
# Create a fake nest specification for the model
self.fake_nest_spec = OrderedDict()
self.fake_nest_spec["Nest 1"] = [1, 3]
self.fake_nest_spec["Nest 2"] = [2]
# Bundle args and kwargs used to construct the choice model.
self.constructor_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create a variable for the kwargs being passed to the constructor
self.constructor_kwargs = {"intercept_ref_pos":
self.fake_intercept_ref_pos,
"shape_ref_pos": self.fake_shape_ref_pos,
"names": self.fake_names,
"intercept_names":
self.fake_intercept_names,
"shape_names": self.fake_shape_names,
"nest_spec": self.fake_nest_spec}
# Create a generic model object
self.model_obj = base_cm.MNDC_Model(*self.constructor_args,
**self.constructor_kwargs)
class ArgumentValidationTests(GenericTestCase):
def test_get_dataframe_from_data(self):
"""
Ensure that appropriate errors are raised when get_dataframe_from_data
receives incorrect arguments, and that the function returns the
expected results when correct arguments are passed.
"""
# Create a test csv file.
self.fake_df.to_csv("test_csv.csv", index=False)
# Ensure that the dataframe is recovered
func_df = ct.get_dataframe_from_data("test_csv.csv")
self.assertIsInstance(func_df, pd.DataFrame)
self.assertTrue((self.fake_df == func_df).all().all())
# Remove the csv file
os.remove("test_csv.csv")
# Pass the dataframe and ensure that it is returned
func_df = ct.get_dataframe_from_data(self.fake_df)
self.assertIsInstance(func_df, pd.DataFrame)
self.assertTrue((self.fake_df == func_df).all().all())
# Test all the ways that a ValueError should or could be raised
bad_args = [("test_json.json", ValueError),
(None, TypeError),
(77, TypeError)]
for arg, error in bad_args:
self.assertRaises(error, ct.get_dataframe_from_data, arg)
return None
def test_argument_type_check(self):
"""
Ensure that the appropriate errors are raised when arguments of
incorrect type are passed to "check_argument_type".
"""
# Isolate arguments that are correct, and ensure the function being
# tested returns None.
good_args = [self.fake_df, self.fake_specification]
self.assertIsNone(ct.check_argument_type(*good_args))
# Assemble a set of incorrect arguments and ensure the function raises
# a ValueError.
generic_dict = dict()
generic_dict.update(self.fake_specification)
bad_args = [[good_args[0], "foo"],
[good_args[0], generic_dict],
[self.fake_df.values, good_args[1]],
[False, good_args[1]],
[None, None]]
for args in bad_args:
self.assertRaises(TypeError, ct.check_argument_type, *args)
return None
def test_alt_id_col_inclusion_check(self):
""""
Ensure that the function correctly returns None when the
alternative_id_col is in the long format dataframe and that ValueErrors
are raised when the column is not the long format dataframe.
"""
self.assertIsNone(ct.ensure_alt_id_in_long_form(self.alt_id_col,
self.fake_df))
bad_cols = ["foo", 23, None]
for col in bad_cols:
self.assertRaises(ValueError, ct.ensure_alt_id_in_long_form,
col, self.fake_df)
return None
def test_check_type_and_values_of_specification_dict(self):
"""
Ensure that the various type and structure checks for the specification
dictionary are working.
"""
# Ensure that a correct specification dict raises no errors.
test_func = ct.check_type_and_values_of_specification_dict
unique_alternatives = np.arange(1, 4)
good_args = [self.fake_specification, unique_alternatives]
self.assertIsNone(test_func(*good_args))
# Create various bad specification dicts to make sure the function
# raises the correct errors.
bad_spec_1 = deepcopy(self.fake_specification)
bad_spec_1["x"] = "incorrect_string"
# Use a structure that is incorrect (group_items should only be ints
# not lists)
bad_spec_2 = deepcopy(self.fake_specification)
bad_spec_2["x"] = [[1, 2], [[3]]]
# Use an alternative that is not in the universal choice set
bad_spec_3 = deepcopy(self.fake_specification)
bad_spec_3["x"] = [[1, 2, 4]]
bad_spec_4 = deepcopy(self.fake_specification)
bad_spec_4["x"] = [1, 2, 4]
# Use a completely wrong type
bad_spec_5 = deepcopy(self.fake_specification)
bad_spec_5["x"] = set([1, 2, 3])
for bad_spec, error in [(bad_spec_1, ValueError),
(bad_spec_2, ValueError),
(bad_spec_3, ValueError),
(bad_spec_4, ValueError),
(bad_spec_5, TypeError)]:
self.assertRaises(error, test_func, bad_spec, unique_alternatives)
return None
def test_check_keys_and_values_of_name_dictionary(self):
"""
Ensure that the checks of the keys and values of the name dictionary
are working as expected.
"""
# Ensure that a correct name dict raises no errors.
test_func = ct.check_keys_and_values_of_name_dictionary
num_alts = 3
args = [self.fake_names, self.fake_specification, num_alts]
self.assertIsNone(test_func(*args))
# Create various bad specification dicts to make sure the function
# raises the correct errors.
bad_names_1 = deepcopy(self.fake_names)
bad_names_1["y"] = "incorrect_string"
# Use a completely wrong type
bad_names_2 = deepcopy(self.fake_names)
bad_names_2["x"] = set(["generic x"])
# Use an incorrect number of elements
bad_names_3 = deepcopy(self.fake_names)
bad_names_3["x"] = ["generic x1", "generic x2"]
# Use the wrong type for the name
bad_names_4 = deepcopy(self.fake_names)
bad_names_4["x"] = [23]
for bad_names in [bad_names_1,
bad_names_2,
bad_names_3,
bad_names_4]:
args[0] = bad_names
self.assertRaises(ValueError, test_func, *args)
# Use two different specifications to test what could go wrong
new_spec_1 = deepcopy(self.fake_specification)
new_spec_1["x"] = "all_same"
bad_names_5 = deepcopy(self.fake_names)
bad_names_5["x"] = False
new_spec_2 = deepcopy(self.fake_specification)
new_spec_2["x"] = "all_diff"
bad_names_6 = deepcopy(self.fake_names)
bad_names_6["x"] = False
bad_names_7 = deepcopy(self.fake_names)
bad_names_7["x"] = ["foo", "bar"]
for names, spec, error in [(bad_names_5, new_spec_1, TypeError),
(bad_names_6, new_spec_2, ValueError),
(bad_names_7, new_spec_2, ValueError)]:
args[0], args[1] = names, spec
self.assertRaises(error, test_func, *args)
return None
def test_create_design_matrix(self):
"""
Ensure that create_design_matrix returns the correct numpy arrays for
model estimation.
"""
# Create a long format dataframe with variables of all types (generic,
# alternative-specific, and subset specific)
self.fake_df["y"] = np.array([12, 9, 0.90, 16, 4])
self.fake_df["z"] = np.array([2, 6, 9, 10, 1])
self.fake_df["m"] = np.array([2, 2, 2, 6, 6])
# Amend the specification of 'x'
self.fake_specification["x"] = "all_same"
self.fake_names["x"] = "x (generic coefficient)"
# Add the new variables to the specification and name dictionaries
self.fake_specification["y"] = "all_diff"
self.fake_names["y"] = ["y_alt_1", "y_alt_2", "y_alt_3"]
self.fake_specification["z"] = [[1, 2], 3]
self.fake_names["z"] = ["z_alts_1_2", "z_alt_3"]
self.fake_specification["m"] = [1, 2]
self.fake_names["m"] = ["m_alt_1", "m_alt_2"]
# Create the numpy array that should be returned
expected = np.array([[1, 12, 0, 0, 2, 0, 2, 0],
[2, 0, 9, 0, 6, 0, 0, 2],
[3, 0, 0, 0.9, 0, 9, 0, 0],
[1.5, 16, 0, 0, 10, 0, 6, 0],
[3.5, 0, 0, 4, 0, 1, 0, 0]])
expected_names = ([self.fake_names["x"]] +
self.fake_names["y"] +
self.fake_names["z"] +
self.fake_names["m"])
# Compare the expected array with the returned array
func_results = ct.create_design_matrix(self.fake_df,
self.fake_specification,
self.alt_id_col,
self.fake_names)
func_design, func_names = func_results
self.assertIsInstance(func_design, np.ndarray)
self.assertEqual(func_design.shape, (5, 8))
npt.assert_allclose(func_design, expected)
self.assertEqual(expected_names, func_names)
return None
def test_ensure_all_columns_are_used(self):
"""
Ensure appropriate warnings are raised when there are more /less
variables in one's dataframe than are accounted for in one's function.
"""
# Make sure that None is returned when there is no problem.
num_vars_used = self.fake_df.columns.size
self.assertIsNone(ct.ensure_all_columns_are_used(num_vars_used,
self.fake_df))
# Test to ensure that a warning message is raised when using
# a number of colums different from the number in the dataframe.
with warnings.catch_warnings(record=True) as context:
# Use this filter to always trigger the UserWarnings
warnings.simplefilter('always', UserWarning)
for pos, package in enumerate([(-1, "only"), (1, "more")]):
i, msg = package
num_vars_used = self.fake_df.columns.size + i
ct.ensure_all_columns_are_used(num_vars_used, self.fake_df)
# Check that the warning has been created.
self.assertEqual(len(context), pos + 1)
self.assertIsInstance(context[-1].category, type(UserWarning))
self.assertIn(msg, str(context[-1].message))
return None
def test_check_dataframe_for_duplicate_records(self):
"""
Ensure that ValueError is raised only when the passed dataframe has
duplicate observation-id and alternative-id pairs.
"""
# Alias the function that is to be tested
func = ct.check_dataframe_for_duplicate_records
# Ensure that the function returns None when given data that is okay.
good_args = [self.obs_id_col, self.alt_id_col, self.fake_df]
self.assertIsNone(func(*good_args))
# Make sure a ValueError is raised when one has repeat obs-id and
# alt-id pairs.
bad_df = self.fake_df.copy()
bad_df.loc[3, "obs_id"] = 1
bad_args = deepcopy(good_args)
bad_args[2] = bad_df
self.assertRaises(ValueError, func, *bad_args)
return None
def test_ensure_num_chosen_alts_equals_num_obs(self):
"""
Ensure that ValueError is raised only when the passed dataframe's
number of choices does not equal the declared number of observations.
"""
# Alias the function that is to be tested
func = ct.ensure_num_chosen_alts_equals_num_obs
# Ensure that the function returns None when given data that is okay.
args = [self.obs_id_col, self.choice_col, self.fake_df]
self.assertIsNone(func(*args))
# Make sure a ValueError is raised when one has more or less choices
# than observations
# Too many choice
bad_df_1 = self.fake_df.copy()
bad_df_1.loc[0, "choice"] = 1
# Too few choices
bad_df_2 = self.fake_df.copy()
bad_df_2.loc[1, "choice"] = 0
for bad_df in [bad_df_1, bad_df_2]:
args[2] = bad_df
self.assertRaises(ValueError, func, *args)
return None
def test_check_type_and_values_of_alt_name_dict(self):
"""
Ensure that a TypeError is raised when alt_name_dict is not an instance
of a dictionary, and ensure that a ValueError is raised when the keys
of alt_name_dict are not actually in the alternative ID column of the
passed dataframe.
"""
# Alias the function that is to be tested
func = ct.check_type_and_values_of_alt_name_dict
# Ensure that the function returns None when given data that is okay.
alt_name_dict = {1: "alternative 1",
2: "alternative 2",
3: "alternative 3"}
args = [alt_name_dict, self.alt_id_col, self.fake_df]
self.assertIsNone(func(*args))
# Test both ways that the function of interest can raise errors.
# Use a data structure that is not a dictionary.
bad_dict_1 = alt_name_dict.items()
# Use keys in the dictionary that are not valid alternative IDs.
# Our alternative IDs are ints, not strings.
bad_dict_2 = {'1': "alternative 1",
'2': "alternative 2",
'3': "alternative 3"}
for bad_dict, error in [(bad_dict_1, TypeError),
(bad_dict_2, ValueError)]:
args[0] = bad_dict
self.assertRaises(error, func, *args)
return None
def test_convert_long_to_wide(self):
"""
Test the basic functionality of convert_long_to_wide, ensuring correct
outputs when given correct inputs.
"""
# Create a long format dataframe with variables of all types (generic,
# alternative-specific, and subset specific)
# Add the alternative specific variable
self.fake_df["y"] = np.array([12, 9, 0.90, 16, 4])
# Add the subset specific variable (it only exists for a subset of
# alternatives, 1 and 2)
self.fake_df["z"] = np.array([2, 6, 0, 10, 0])
# Add the individual specific variables
self.fake_df["m"] = np.array([2, 2, 2, 6, 6])
# Construct the wide format dataframe by hand
wide_data = OrderedDict()
wide_data["obs_id"] = [1, 2]
wide_data["choice"] = [2, 3]
wide_data["availability_1"] = [1, 1]
wide_data["availability_2"] = [1, 0]
wide_data["availability_3"] = [1, 1]
# Add the individual specific variables
wide_data["m"] = [2, 6]
# Add the alternataive specific variables
wide_data["y_1"] = [12.0, 16.0]
wide_data["y_2"] = [9.0, np.nan]
wide_data["y_3"] = [0.9, 4.0]
# Add the subset specific variables
wide_data["z_1"] = [2.0, 10.0]
wide_data["z_2"] = [6, np.nan]
expected = | pd.DataFrame(wide_data) | pandas.DataFrame |
"""Exhastuve grid search for parameters for TSNE and UMAP"""
import argparse
import itertools
import hdbscan
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import pdist, squareform
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, matthews_corrcoef
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
import sys
sys.path.append("../notebooks/scripts/")
from Helpers import get_PCA_feature_matrix, get_euclidean_data_frame
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--distance-matrix", help="csv file with the distance matrix")
parser.add_argument("--alignment", help="FASTA file with the alignment")
parser.add_argument("--node-data", help="csv file with the clade_membership - that MUST be the name of the column.")
parser.add_argument("--n-neighbors", nargs="+", type=int, help="list of values that the search should use")
parser.add_argument("--min-dist", nargs="+", type=float, help="list of values that the search should use")
parser.add_argument("--perplexity", nargs="+", type=float, help="list of values that the search should use")
parser.add_argument("--threshold-information", nargs="+", help="the distance threshold values to be used on HDBSCAN. if not provided, it will run without.")
parser.add_argument("--learning-rate", nargs="+", type=float, help="list of values that the search should use")
parser.add_argument("--n-repeats", type=int, help="the number of times the k fold generator should repeat the k fold")
parser.add_argument("--output", help="the path where the best thresholds will be saved.")
parser.add_argument("--output-hyperparameters", help="the path where the best parameters will be saved. ")
parser.add_argument("--output-metadata", help="the path where the grid search data will be saved.")
parser.add_argument("--output-figure-HDBSCAN", help="PNG with the results displayed graphically for HDBSCAN thresholds")
parser.add_argument("--output-figure-grid-search", help="PNG with the results displayed graphically for grid search")
args = parser.parse_args()
def _get_embedding_columns_by_method(method):
if method in ("pca"):
return list(f"{method}1 {method}2 {method}3 {method}4 {method}5 {method}6 {method}7 {method}8 {method}9 {method}10".split())
if method in ("mds"):
return list(f"{method}1 {method}2".split())
if method in ("t-sne"):
return list("tsne_x tsne_y".split())
else:
return list(f"{method}_x {method}_y".split())
if(args.threshold_information is not None):
#threshold_df = pd.read_csv(args.threshold_information) threshold_df.loc[threshold_df['embedding'] == args.method][args.column_threshold].values.tolist()[0]
distance_thresholds = args.threshold_information
else:
distance_thresholds = np.arange(0,20,2)
default_tuned_values = []
list_of_embedding_strings = ["t-sne", "umap", "mds", "pca"] #["t-SNE","UMAP","MDS", "PCA"]
embedding_class = [TSNE, UMAP, MDS, PCA]
tuned_parameter_values = []
# reading in the distance matrix and node data
distance_matrix = | pd.read_csv(args.distance_matrix, index_col=0) | pandas.read_csv |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
| CategoricalDtype() | pandas.core.dtypes.dtypes.CategoricalDtype |
import concurrent.futures
import multiprocessing
import scipy
import pandas as pd
import random
import re
import time
import pickle
import os
from .DataManager import DataManager
from .Agent import Agent
from colorama import Fore
from .Utils import get_seconds
from sklearn.feature_extraction.text import TfidfVectorizer
class Coordinator:
current_date = | pd.to_datetime('2020-03-29T00:00:00Z') | pandas.to_datetime |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from collections.abc import Iterable
from datetime import datetime, timedelta
from unittest import TestCase
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.detectors.detector_consts import (
AnomalyResponse,
ChangePointInterval,
ConfidenceBand,
PercentageChange,
SingleSpike,
)
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
class SingleSpikeTest(TestCase):
def test_spike(self) -> None:
spike_time_str = "2020-03-01"
spike_time = datetime.strptime(spike_time_str, "%Y-%m-%d")
spike = SingleSpike(time=spike_time, value=1.0, n_sigma=3.0)
self.assertEqual(spike.time_str, spike_time_str)
class ChangePointIntervalTest(TestCase):
def test_changepoint(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(15)]
current_length = 10
current_seq = [
previous_seq[10] + timedelta(days=x) for x in range(current_length)
]
previous_values = np.random.randn(len(previous_seq))
current_values = np.random.randn(len(current_seq))
# add a very large value to detect spikes
current_values[0] = 100.0
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `previous`.
self.previous = TimeSeriesData(
pd.DataFrame({"time": previous_seq, "value": previous_values})
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current`.
self.current = TimeSeriesData(
pd.DataFrame({"time": current_seq, "value": current_values})
)
previous_extend = TimeSeriesData(
pd.DataFrame({"time": previous_seq[9:], "value": previous_values[9:]})
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_start`.
self.prev_start = previous_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_end`.
self.prev_end = previous_seq[9]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_start`.
self.current_start = current_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_end`.
self.current_end = current_seq[-1] + timedelta(days=1)
previous_int = ChangePointInterval(self.prev_start, self.prev_end)
previous_int.data = self.previous
# tests whether data is clipped property to start and end dates
np.testing.assert_array_equal(previous_values[0:9], previous_int.data)
# test extending the data
# now the data is extended to include the whole sequence
previous_int.end_time = previous_seq[-1] + timedelta(days=1)
previous_int.extend_data(previous_extend)
self.assertEqual(len(previous_int), len(previous_seq))
current_int = ChangePointInterval(self.current_start, self.current_end)
current_int.data = self.current
current_int.previous_interval = previous_int
# check all the properties
self.assertEqual(current_int.start_time, self.current_start)
self.assertEqual(current_int.end_time, self.current_end)
self.assertEqual(
current_int.start_time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
self.assertEqual(
current_int.end_time_str, datetime.strftime(self.current_end, "%Y-%m-%d")
)
self.assertEqual(current_int.mean_val, np.mean(current_values))
self.assertEqual(current_int.variance_val, np.var(current_values))
self.assertEqual(len(current_int), current_length)
self.assertEqual(current_int.previous_interval, previous_int)
# check spike detection
spike_list = current_int.spikes
# pyre-fixme[16]: `List` has no attribute `value`.
self.assertEqual(spike_list[0].value, 100.0)
self.assertEqual(
# pyre-fixme[16]: `List` has no attribute `time_str`.
spike_list[0].time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
def test_multichangepoint(self) -> None:
# test for multivariate time series
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(15)]
current_length = 10
current_seq = [
previous_seq[10] + timedelta(days=x) for x in range(current_length)
]
num_seq = 5
previous_values = [np.random.randn(len(previous_seq)) for _ in range(num_seq)]
current_values = [np.random.randn(len(current_seq)) for _ in range(num_seq)]
# add a very large value to detect spikes
for i in range(num_seq):
current_values[i][0] = 100 * (i + 1)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `previous`.
self.previous = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq},
**{f"value_{i}": previous_values[i] for i in range(num_seq)},
}
)
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current`.
self.current = TimeSeriesData(
pd.DataFrame(
{
**{"time": current_seq},
**{f"value_{i}": current_values[i] for i in range(num_seq)},
}
)
)
previous_extend = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq[9:]},
**{f"value_{i}": previous_values[i][9:] for i in range(num_seq)},
}
)
)
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_start`.
self.prev_start = previous_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `prev_end`.
self.prev_end = previous_seq[9]
# `current_start`.
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_start`.
self.current_start = current_seq[0]
# pyre-fixme[16]: `ChangePointIntervalTest` has no attribute `current_end`.
self.current_end = current_seq[-1] + timedelta(days=1)
previous_int = ChangePointInterval(self.prev_start, self.prev_end)
previous_int.data = self.previous
# tests whether data is clipped property to start and end dates
for i in range(num_seq):
self.assertEqual(
# pyre-fixme[16]: Optional type has no attribute `__getitem__`.
previous_int.data[:, i].tolist(),
previous_values[i][0:9].tolist(),
)
# test extending the data
# now the data is extended to include the whole sequence except the last point
previous_int.end_time = previous_seq[-1] # + timedelta(days=1)
previous_int.extend_data(previous_extend)
self.assertEqual(len(previous_int) + 1, len(previous_seq))
# let's repeat this except without truncating the final point
previous_int2 = ChangePointInterval(self.prev_start, self.prev_end)
previous_int2.data = self.previous
previous_int2.end_time = previous_seq[-1] + timedelta(days=1)
previous_int2.extend_data(previous_extend)
self.assertEqual(len(previous_int2), len(previous_seq))
# let's extend the date range so it's longer than the data
# this should not change the results
previous_int3 = ChangePointInterval(self.prev_start, self.prev_end)
previous_int3.data = self.previous
previous_int3.end_time = previous_seq[-1] + timedelta(days=2)
previous_int3.extend_data(previous_extend)
self.assertEqual(len(previous_int3), len(previous_seq))
# let's construct the current ChangePointInterval
current_int = ChangePointInterval(self.current_start, self.current_end)
current_int.data = self.current
current_int.previous_interval = previous_int
# check all the properties
self.assertEqual(current_int.start_time, self.current_start)
self.assertEqual(current_int.end_time, self.current_end)
self.assertEqual(current_int.num_series, num_seq)
self.assertEqual(
current_int.start_time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
self.assertEqual(
current_int.end_time_str, datetime.strftime(self.current_end, "%Y-%m-%d")
)
self.assertEqual(
# pyre-fixme[16]: `float` has no attribute `tolist`.
current_int.mean_val.tolist(),
[np.mean(current_values[i]) for i in range(num_seq)],
)
self.assertEqual(
current_int.variance_val.tolist(),
[np.var(current_values[i]) for i in range(num_seq)],
)
self.assertEqual(len(current_int), current_length)
self.assertEqual(current_int.previous_interval, previous_int)
# check spike detection
spike_array = current_int.spikes
self.assertEqual(len(spike_array), num_seq)
for i in range(num_seq):
# pyre-fixme[16]: `SingleSpike` has no attribute `__getitem__`.
self.assertEqual(spike_array[i][0].value, 100 * (i + 1))
self.assertEqual(
spike_array[i][0].time_str,
datetime.strftime(self.current_start, "%Y-%m-%d"),
)
class PercentageChangeTest(TestCase):
def test_perc_change(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(30)]
current_length = 31
# offset one to make the new interval start one day after the previous one ends
current_seq = [
previous_seq[-1] + timedelta(days=(x + 1)) for x in range(current_length)
]
previous_values = 1.0 + 0.25 * np.random.randn(len(previous_seq))
current_values = 10.0 + 0.25 * np.random.randn(len(current_seq))
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `previous`.
self.previous = TimeSeriesData(
pd.DataFrame({"time": previous_seq, "value": previous_values})
)
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current`.
self.current = TimeSeriesData(
pd.DataFrame({"time": current_seq, "value": current_values})
)
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `prev_start`.
self.prev_start = previous_seq[0]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `prev_end`.
self.prev_end = previous_seq[9]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current_start`.
self.current_start = current_seq[0]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current_end`.
self.current_end = current_seq[-1]
previous_int = ChangePointInterval(
previous_seq[0], (previous_seq[-1] + timedelta(days=1))
)
previous_int.data = self.previous
current_int = ChangePointInterval(
current_seq[0], (current_seq[-1] + timedelta(days=1))
)
current_int.data = self.current
current_int.previous_interval = previous_int
perc_change_1 = PercentageChange(current=current_int, previous=previous_int)
previous_mean = np.mean(previous_values)
current_mean = np.mean(current_values)
# test the ratios
ratio_val = current_mean / previous_mean
self.assertEqual(perc_change_1.ratio_estimate, ratio_val)
ratio_estimate = perc_change_1.ratio_estimate
assert isinstance(ratio_estimate, float)
self.assertAlmostEqual(ratio_estimate, 10.0, 0)
self.assertEqual(perc_change_1.perc_change, (ratio_val - 1) * 100)
self.assertEqual(perc_change_1.direction, "up")
self.assertEqual(perc_change_1.stat_sig, True)
self.assertTrue(perc_change_1.p_value < 0.05)
self.assertTrue(perc_change_1.score > 1.96)
# test a detector with false stat sig
second_values = 10.005 + 0.25 * np.random.randn(len(previous_seq))
second = TimeSeriesData(
pd.DataFrame({"time": previous_seq, "value": second_values})
)
second_int = ChangePointInterval(previous_seq[0], previous_seq[-1])
second_int.data = second
perc_change_2 = PercentageChange(current=current_int, previous=second_int)
self.assertEqual(perc_change_2.stat_sig, False)
self.assertFalse(perc_change_2.p_value < 0.05)
self.assertFalse(perc_change_2.score > 1.96)
# test the edge case when one of the intervals
# contains a single data point
current_int_2 = ChangePointInterval(current_seq[0], current_seq[1])
current_int_2.data = self.current
perc_change_3 = PercentageChange(current=current_int_2, previous=previous_int)
self.assertTrue(perc_change_3.score > 1.96)
# TODO delta method tests
def test_multi_perc_change(self) -> None:
# test for multivariate time series
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(30)]
current_length = 31
# offset one to make the new interval start one day after the previous one ends
current_seq = [
previous_seq[-1] + timedelta(days=(x + 1)) for x in range(current_length)
]
num_seq = 5
previous_values = np.array(
[1.0 + 0.0001 * np.random.randn(len(previous_seq)) for _ in range(num_seq)]
)
current_values = np.array(
[10.0 + 0.0001 * np.random.randn(len(current_seq)) for _ in range(num_seq)]
)
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `previous`.
self.previous = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq},
**{f"value_{i}": previous_values[i] for i in range(num_seq)},
}
)
)
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current`.
self.current = TimeSeriesData(
pd.DataFrame(
{
**{"time": current_seq},
**{f"value_{i}": current_values[i] for i in range(num_seq)},
}
)
)
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `prev_start`.
self.prev_start = previous_seq[0]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `prev_end`.
self.prev_end = previous_seq[9]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current_start`.
self.current_start = current_seq[0]
# pyre-fixme[16]: `PercentageChangeTest` has no attribute `current_end`.
self.current_end = current_seq[-1]
previous_int = ChangePointInterval(
previous_seq[0], previous_seq[-1] + timedelta(days=1)
)
previous_int.data = self.previous
current_int = ChangePointInterval(
current_seq[0], current_seq[-1] + timedelta(days=1)
)
current_int.data = self.current
current_int.previous_interval = previous_int
perc_change_1 = PercentageChange(current=current_int, previous=previous_int)
previous_mean = np.array([np.mean(previous_values[i]) for i in range(num_seq)])
current_mean = np.array([np.mean(current_values[i]) for i in range(num_seq)])
# test the ratios
ratio_val = current_mean / previous_mean
ratio_estimate = perc_change_1.ratio_estimate
assert isinstance(ratio_estimate, np.ndarray)
self.assertEqual(ratio_estimate.tolist(), ratio_val.tolist())
for r in ratio_estimate:
self.assertAlmostEqual(r, 10.0, 0)
perc_change = perc_change_1.perc_change
assert isinstance(perc_change, np.ndarray)
self.assertEqual(perc_change.tolist(), ((ratio_val - 1) * 100).tolist())
direction = perc_change_1.direction
assert isinstance(direction, np.ndarray)
self.assertEqual(direction.tolist(), ["up"] * num_seq)
stat_sig = perc_change_1.stat_sig
assert isinstance(stat_sig, np.ndarray)
self.assertEqual(stat_sig.tolist(), [True] * num_seq)
p_value_list, score_list = perc_change_1.p_value, perc_change_1.score
assert isinstance(p_value_list, Iterable)
assert isinstance(score_list, Iterable)
for p_value, score in zip(p_value_list, score_list):
self.assertLess(p_value, 0.05)
self.assertLess(1.96, score)
# test a detector with false stat sig
second_values = np.array(
[10.005 + 0.25 * np.random.randn(len(previous_seq)) for _ in range(num_seq)]
)
second = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq},
**{f"value_{i}": second_values[i] for i in range(num_seq)},
}
)
)
second_int = ChangePointInterval(previous_seq[0], previous_seq[-1])
second_int.data = second
perc_change_2 = PercentageChange(current=current_int, previous=second_int)
stat_sig_list, p_value_list, score_list = (
perc_change_2.stat_sig,
perc_change_2.p_value,
perc_change_2.score,
)
assert isinstance(stat_sig_list, Iterable)
assert isinstance(p_value_list, Iterable)
assert isinstance(score_list, Iterable)
for stat_sig, p_value, score in zip(stat_sig_list, p_value_list, score_list):
self.assertFalse(stat_sig)
self.assertLess(0.05, p_value)
self.assertLess(score, 1.96)
# test a detector with a negative spike
third_values = np.array(
[
1000.0 + 0.0001 * np.random.randn(len(previous_seq))
for _ in range(num_seq)
]
)
third = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq},
**{f"value_{i}": third_values[i] for i in range(num_seq)},
}
)
)
third_int = ChangePointInterval(previous_seq[0], previous_seq[-1])
third_int.data = third
perc_change_3 = PercentageChange(current=current_int, previous=third_int)
p_value_list, score_list = perc_change_3.p_value, perc_change_3.score
assert isinstance(p_value_list, Iterable)
assert isinstance(score_list, Iterable)
for p_value, score in zip(p_value_list, score_list):
self.assertLess(p_value, 0.05)
self.assertLess(score, -1.96)
# test the edge case when one of the intervals
# contains a single data point
current_int_single_point = ChangePointInterval(current_seq[0], current_seq[1])
current_int_single_point.data = self.current
perc_change_single_point = PercentageChange(
current=current_int_single_point, previous=previous_int
)
p_value_list, score_list = (
perc_change_single_point.p_value,
perc_change_single_point.score,
)
assert isinstance(p_value_list, Iterable)
assert isinstance(score_list, Iterable)
for p_value, score in zip(p_value_list, score_list):
self.assertLess(p_value, 0.05)
self.assertLess(1.96, score)
class TestAnomalyResponse(TestCase):
def test_response(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(30)]
score_ts = TimeSeriesData(
pd.DataFrame(
{"time": previous_seq, "value": np.random.randn(len(previous_seq))}
)
)
upper_values = 1.0 + np.random.randn(len(previous_seq))
upper_ts = TimeSeriesData(
| pd.DataFrame({"time": previous_seq, "value": upper_values}) | pandas.DataFrame |
#################################################
#created the 04/05/2018 09:52 by <NAME>#
#################################################
#-*- coding: utf-8 -*-
'''
'''
'''
Améliorations possibles:
'''
import warnings
warnings.filterwarnings('ignore')
#################################################
########### Imports #################
#################################################
import sys
import numpy as np
import pandas as pd
import scipy.stats
import plotly
import plotly.graph_objs as go
import plotly.offline as offline
from plotly import tools
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.metrics import log_loss
from sklearn.externals import joblib
from ast import literal_eval
#################################################
########### Global variables ####################
#################################################
### LGB modeling
params = {'learning_rate': 0.015,
'subsample': 0.9,
#'subsample_freq': 1,
'colsample_bytree': 0.9,
'colsample_bylevel':0.9,
'reg_alpha': 1,
'reg_lambda': 1,
'max_depth' : 10,
'min_data_in_leaf': 1,
'boosting': 'dart',#'rf','dart','goss','gbdt'
'objective': 'binary',
'metric': 'binary_logloss',
'is_training_metric': True,
'seed': 99,'silent' : True,"verbose":-1}
params1 = {'learning_rate': 0.015,
'subsample': 0.9,
#'subsample_freq': 1,
'colsample_bytree': 0.9,
'colsample_bylevel':0.9,
'reg_alpha': 1,
'reg_lambda': 1,
'max_depth' : 8,
'num_leaves': 15,
'min_data_in_leaf': 1,
'boosting': 'dart',#'rf','dart','goss','gbdt'
'objective': 'binary',
'metric': 'binary_logloss',
'is_training_metric': True,
'seed': 99,
'silent' : True,"verbose":-1}
MAX_TREES = 5000
######################################################
class Classifier(BaseEstimator):
def __init__(self):
pass
def fit(self, x1, y1,x2,y2):
watchlist = [(lgb.Dataset(x1, label=y1), 'train'), (lgb.Dataset(x2, label=y2), 'valid')]
self.clf2 = lgb.train(params, lgb.Dataset(x1, label=y1), MAX_TREES, lgb.Dataset(x2, label=y2),verbose_eval=200, feval=logloss_lgbm, early_stopping_rounds=300)
self.clf1 = lgb.train(params1, lgb.Dataset(x1, label=y1), MAX_TREES, lgb.Dataset(x2, label=y2),verbose_eval=200, feval=logloss_lgbm, early_stopping_rounds=300)
def predict(self, X):
return self.clf1.predict(X)
def predict_proba(self, X):
res1 = self.clf1.predict(X, num_iteration = self.clf1.best_iteration)
res2 = self.clf2.predict(X,num_iteration = self.clf2.best_iteration)
return np.array([[1-0.5*(a+b),0.5*(a+b)] for a,b in zip(res1,res2)])
fileX_train ='/home/alexis/Bureau/Stage/Time-series/data/processed/sfrdaily_20180430_0_192_0_cleandata-processed.csv'
fileY_train = '/home/alexis/Bureau/historique/label-30-04.csv'
fileX_valid ='/home/alexis/Bureau/Stage/Time-series/data/processed/sfrdaily_20180507_0_192_0_cleandata-processed.csv'
fileY_valid = '/home/alexis/Bureau/historique/label-07-05.csv'
fileX_test ='/home/alexis/Bureau/Stage/Time-series/data/processed/sfrdaily_20180509_0_192_0_cleandata-processed.csv'
fileY_test = '/home/alexis/Bureau/historique/label-09-05.csv'
#################################################
########### Important functions #################
#################################################
def load(fileX,fileY):
X = pd.DataFrame()
y = | pd.DataFrame() | pandas.DataFrame |
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline: iPython의 magic fx은 사용할 수 없다!!!
import seaborn as sns
from sklearn import preprocessing
import folium
from config.settings import DATA_DIR, TEMPLATES
from config.settings import STATICFILES_DIRS
# 데이터 파일을 dataframe으로 바꾸기
df = | pd.read_excel(DATA_DIR[0] + '/city_pop.xlsx') | pandas.read_excel |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": | date_range("20130101", periods=5, freq="s") | pandas.date_range |
# run_experiment
# Basics
import pandas as pd
import numpy as np
import datetime
import pickle
import typer
import os
# Import paths
from globals import DATA_MODELLING_FOLDER, EVALUATION_RESULTS, full_feat_models, overlapping_feat_models, full_feat_models_rfe
# Import sklearn processing/pipeline
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import RFE
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
# Models
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
# Metrics
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
class evaluation_manager():
def __init__(self, X_train, y_train, X_test, y_test, x_train_data, y_train_data, x_test_data, y_test_data, eval_name, EVALUATION_RESULTS):
# Establish data, models, and runs for the experiment
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.X_train_type = x_train_data
self.y_train_type = y_train_data
self.X_test_type = x_test_data
self.y_test_type = y_test_data
self.models = dict()
self.runs = 10
self.results_path = EVALUATION_RESULTS
self.eval_name = eval_name
self.out_path = os.path.join(self.results_path, self.eval_name)
self.model_path = self.out_path + "/models/"
if os.path.isdir(self.out_path):
raise Exception("Name already exists")
else:
os.mkdir(self.out_path + "/")
os.mkdir(self.out_path + "/models/")
def eval_run(self, pipe_model, model_name, run):
"""
Evaluates prepare pipeline model
Returns run results
"""
# Fit model
pipe_model.fit(self.X_train, self.y_train.target)
# Collect model predictions
training_predictions = pipe_model.predict(self.X_train)
test_predictions = pipe_model.predict(self.X_test)
# Obtain probabilities for calculation of AUC
y_train_score = pipe_model.predict_proba(self.X_train)[:,1]
y_test_score = pipe_model.predict_proba(self.X_test)[:,1]
# Obtain AUC scores
train_auc = roc_auc_score(self.y_train, y_train_score)
test_auc = roc_auc_score(self.y_test, y_test_score)
# Obtain training sensitivity/recall, specificity, PPV/precision, and NPV
train_tn, train_fp, train_fn, train_tp = confusion_matrix(self.y_train, training_predictions).ravel()
train_spec = train_tn/(train_tn + train_fp)
train_sens = train_tp/(train_tp + train_fn)
train_prec = train_tp/(train_tp + train_fp)
train_npv = train_tn/(train_tn + train_fn)
train_f1 = f1_score(self.y_train, training_predictions)
# Obtain validation sensitivity, specificity, PPV/precision, and NPV
test_tn, test_fp, test_fn, test_tp = confusion_matrix(self.y_test, test_predictions).ravel()
test_spec = test_tn/(test_tn + test_fp)
test_sens = test_tp/(test_tp + test_fn)
test_prec = test_tp/(test_tp + test_fp)
test_npv = test_tn/(test_tn + test_fn)
test_f1 = f1_score(self.y_test, test_predictions)
# Store scores for the current split
train_bal_acc = balanced_accuracy_score(self.y_train, training_predictions)
test_bal_acc = balanced_accuracy_score(self.y_test, test_predictions)
train_acc = accuracy_score(self.y_train, training_predictions)
test_acc = accuracy_score(self.y_test, test_predictions)
# Feature Importances
print(pipe_model.steps[1][1])
if model_name == 'Dummy_Classification' or model_name == "KNearest_Neighbors" or model_name == "Support_Vector_Machine":
FI = np.zeros(len(self.X_train.columns))
elif model_name == 'Logistic_Regression':
FI = pipe_model.steps[1][1].coef_.flatten()
elif model_name == 'Random_Forest' or model_name == 'Gradient Boosting Classifier':
FI = pipe_model.steps[1][1].feature_importances_.flatten()
else: raise Exception("model_name doesn't match options for FI")
# Store model
pickle.dump(pipe_model, open(self.model_path + "/" + model_name + "_" + str(run), 'wb'))
return train_bal_acc, train_acc, train_auc, train_tp, train_tn, train_fp, train_fn, train_sens, train_spec, train_prec, train_npv, train_f1, test_bal_acc, test_acc, test_auc, test_tp, test_tn, test_fp, test_fn, test_sens, test_spec, test_prec, test_npv, test_f1, FI
def process_feature_importances(self, FI_array, model_name):
FI_array = FI_array/self.runs
feat_importance_dict = {"Feature": self.X_train.columns, "Weight": FI_array}
self.FI_df = | pd.DataFrame(feat_importance_dict) | pandas.DataFrame |
import logging
import os
import re
import shutil
import subprocess
from builtins import object, range, str, zip
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from editdistance import eval as editdist # Alternative library: python-levenshtein
class VisualLinker(object):
def __init__(self, time=False, verbose=False):
self.logger = logging.getLogger(__name__)
self.pdf_file = None
self.verbose = verbose
self.time = time
self.coordinate_map = None
self.pdf_word_list = None
self.html_word_list = None
self.links = None
self.pdf_dim = None
delimiters = (
u"([\(\)\,\?\u2212\u201C\u201D\u2018\u2019\u00B0\*']|(?<!http):|\.$|\.\.\.)"
)
self.separators = re.compile(delimiters)
# Check if poppler-utils is installed AND the version is 0.36.0 or above
if shutil.which("pdfinfo") is None or shutil.which("pdftotext") is None:
raise RuntimeError("poppler-utils is not installed or they are not in PATH")
version = subprocess.check_output(
"pdfinfo -v", shell=True, stderr=subprocess.STDOUT, universal_newlines=True
)
m = re.search(r"\d\.\d{2}\.\d", version)
if int(m.group(0).replace(".", "")) < 360:
raise RuntimeError(
"Installed poppler-utils's version is %s, but should be 0.36.0 or above"
% m.group(0)
)
def parse_visual(self, document_name, sentences, pdf_path):
self.sentences = sentences
self.pdf_file = (
pdf_path if os.path.isfile(pdf_path) else pdf_path + document_name + ".pdf"
)
if not os.path.isfile(self.pdf_file):
self.pdf_file = self.pdf_file[:-3] + "PDF"
try:
self.extract_pdf_words()
except RuntimeError as e:
self.logger.exception(e)
return
self.extract_html_words()
self.link_lists(search_max=200)
for sentence in self.update_coordinates():
yield sentence
def extract_pdf_words(self):
self.logger.debug(
"pdfinfo '{}' | grep -a Pages | sed 's/[^0-9]*//'".format(self.pdf_file)
)
num_pages = subprocess.check_output(
"pdfinfo '{}' | grep -a Pages | sed 's/[^0-9]*//'".format(self.pdf_file),
shell=True,
)
pdf_word_list = []
coordinate_map = {}
for i in range(1, int(num_pages) + 1):
self.logger.debug(
"pdftotext -f {} -l {} -bbox-layout '{}' -".format(
str(i), str(i), self.pdf_file
)
)
html_content = subprocess.check_output(
"pdftotext -f {} -l {} -bbox-layout '{}' -".format(
str(i), str(i), self.pdf_file
),
shell=True,
)
soup = BeautifulSoup(html_content, "html.parser")
pages = soup.find_all("page")
pdf_word_list_i, coordinate_map_i = self._coordinates_from_HTML(pages[0], i)
pdf_word_list += pdf_word_list_i
# update coordinate map
coordinate_map.update(coordinate_map_i)
self.pdf_word_list = pdf_word_list
self.coordinate_map = coordinate_map
if len(self.pdf_word_list) == 0:
raise RuntimeError(
"Words could not be extracted from PDF: %s" % self.pdf_file
)
# take last page dimensions
page_width, page_height = (
int(float(pages[0].get("width"))),
int(float(pages[0].get("height"))),
)
self.pdf_dim = (page_width, page_height)
if self.verbose:
self.logger.info("Extracted {} pdf words".format(len(self.pdf_word_list)))
def _coordinates_from_HTML(self, page, page_num):
pdf_word_list = []
coordinate_map = {}
block_coordinates = {}
blocks = page.find_all("block")
i = 0 # counter for word_id in page_num
for block in blocks:
x_min_block = int(float(block.get("xmin")))
y_min_block = int(float(block.get("ymin")))
lines = block.find_all("line")
for line in lines:
y_min_line = int(float(line.get("ymin")))
y_max_line = int(float(line.get("ymax")))
words = line.find_all("word")
for word in words:
xmin = int(float(word.get("xmin")))
xmax = int(float(word.get("xmax")))
for content in self.separators.split(word.getText()):
if len(content) > 0: # Ignore empty characters
word_id = (page_num, i)
pdf_word_list.append((word_id, content))
coordinate_map[word_id] = (
page_num,
y_min_line,
xmin,
y_max_line,
xmax,
)
block_coordinates[word_id] = (y_min_block, x_min_block)
i += 1
# sort pdf_word_list by page, block top then block left, top, then left
pdf_word_list = sorted(
pdf_word_list,
key=lambda word_id__: block_coordinates[word_id__[0]]
+ coordinate_map[word_id__[0]][1:3],
)
return pdf_word_list, coordinate_map
def extract_html_words(self):
html_word_list = []
for sentence in self.sentences:
for i, word in enumerate(sentence.words):
html_word_list.append(((sentence.stable_id, i), word))
self.html_word_list = html_word_list
if self.verbose:
self.logger.info("Extracted {} html words".format(len(self.html_word_list)))
def link_lists(self, search_max=100, edit_cost=20, offset_cost=1):
# NOTE: there are probably some inefficiencies here from rehashing words
# multiple times, but we're not going to worry about that for now
def link_exact(l, u):
l, u, L, U = get_anchors(l, u)
html_dict = defaultdict(list)
pdf_dict = defaultdict(list)
for i, (_, word) in enumerate(self.html_word_list[l:u]):
if html_to_pdf[l + i] is None:
html_dict[word].append(l + i)
for j, (_, word) in enumerate(self.pdf_word_list[L:U]):
if pdf_to_html[L + j] is None:
pdf_dict[word].append(L + j)
for word, html_list in list(html_dict.items()):
pdf_list = pdf_dict[word]
if len(html_list) == len(pdf_list):
for k in range(len(html_list)):
html_to_pdf[html_list[k]] = pdf_list[k]
pdf_to_html[pdf_list[k]] = html_list[k]
def link_fuzzy(i):
(_, word) = self.html_word_list[i]
l = u = i
l, u, L, U = get_anchors(l, u)
offset = int(L + float(i - l) / (u - l) * (U - L))
searchIndices = np.clip(offset + search_order, 0, M - 1)
cost = [0] * search_max
for j, k in enumerate(searchIndices):
other = self.pdf_word_list[k][1]
if (
word.startswith(other)
or word.endswith(other)
or other.startswith(word)
or other.endswith(word)
):
html_to_pdf[i] = k
return
else:
cost[j] = int(editdist(word, other)) * edit_cost + j * offset_cost
html_to_pdf[i] = searchIndices[np.argmin(cost)]
return
def get_anchors(l, u):
while l >= 0 and html_to_pdf[l] is None:
l -= 1
while u < N and html_to_pdf[u] is None:
u += 1
if l < 0:
l = 0
L = 0
else:
L = html_to_pdf[l]
if u >= N:
u = N
U = M
else:
U = html_to_pdf[u]
return l, u, L, U
def display_match_counts():
matches = sum(
[
html_to_pdf[i] is not None
and self.html_word_list[i][1]
== self.pdf_word_list[html_to_pdf[i]][1]
for i in range(len(self.html_word_list))
]
)
total = len(self.html_word_list)
self.logger.info(
"({:d}/{:d}) = {:.2f}".format(matches, total, matches / total)
)
return matches
N = len(self.html_word_list)
M = len(self.pdf_word_list)
try:
assert N > 0 and M > 0
except Exception:
self.logger.exception("N = {} and M = {} are invalid values.".format(N, M))
html_to_pdf = [None] * N
pdf_to_html = [None] * M
search_radius = search_max // 2
# first pass: global search for exact matches
link_exact(0, N)
if self.verbose:
self.logger.debug("Global exact matching:")
display_match_counts()
# second pass: local search for exact matches
for i in range(((N + 2) // search_radius) + 1):
link_exact(
max(0, i * search_radius - search_radius),
min(N, i * search_radius + search_radius),
)
if self.verbose:
self.logger.debug("Local exact matching:")
display_match_counts()
# third pass: local search for approximate matches
search_order = np.array(
[(-1) ** (i % 2) * (i // 2) for i in range(1, search_max + 1)]
)
for i in range(len(html_to_pdf)):
if html_to_pdf[i] is None:
link_fuzzy(i)
if self.verbose:
self.logger.debug("Local approximate matching:")
display_match_counts()
# convert list to dict
matches = sum(
[
html_to_pdf[i] is not None
and self.html_word_list[i][1] == self.pdf_word_list[html_to_pdf[i]][1]
for i in range(len(self.html_word_list))
]
)
total = len(self.html_word_list)
if self.verbose:
self.logger.debug(
"Linked {:d}/{:d} ({:.2f}) html words exactly".format(
matches, total, matches / total
)
)
self.links = OrderedDict(
(self.html_word_list[i][0], self.pdf_word_list[html_to_pdf[i]][0])
for i in range(len(self.html_word_list))
)
def _calculate_offset(self, listA, listB, seedSize, maxOffset):
wordsA = zip(*listA[:seedSize])[1]
wordsB = zip(*listB[:maxOffset])[1]
offsets = []
for i in range(seedSize):
try:
offsets.append(wordsB.index(wordsA[i]) - i)
except Exception as e:
pass
return int(np.median(offsets))
def display_links(self, max_rows=100):
html = []
pdf = []
j = []
for i, l in enumerate(self.links):
html.append(self.html_word_list[i][1])
for k, b in enumerate(self.pdf_word_list):
if b[0] == self.links[self.html_word_list[i][0]]:
pdf.append(b[1])
j.append(k)
break
try:
assert len(pdf) == len(html)
except Exception:
self.logger.exception("PDF and HTML are not the same length")
total = 0
match = 0
for i, word in enumerate(html):
total += 1
if word == pdf[i]:
match += 1
self.logger.info((match, total, match / total))
data = {
# 'i': range(len(self.links)),
"html": html,
"pdf": pdf,
"j": j,
}
pd.set_option("display.max_rows", max_rows)
self.logger.info(pd.DataFrame(data, columns=["html", "pdf", "j"]))
| pd.reset_option("display.max_rows") | pandas.reset_option |
import re, random, os, json
import pandas as pd
import numpy as np
import scipy as sp
import seaborn as sns
from bokeh import mpl
from bokeh.plotting import output_file, show
from sklearn.feature_extraction.text import TfidfVectorizer
from classifier import Classifier, label2domain, manifestolabels
MANIFESTO_FOLDER = "data/wahlprogramme/"
RESULT_FOLDER = "data/resultate/"
if not os.path.isdir(RESULT_FOLDER):
os.mkdir(RESULT_FOLDER)
# Tuples with party names, files and plotting colors
partyFiles = [
('AfD',"afd.md", "blue"),
('CDU/CSU', "cducsu.md", "gray"),
('FDP', "fdp.md", "yellow"),
('SPD', "spd.md", "red"),
('Grüne', "diegruenen.md", "green"),
('<NAME>', "dielinke.md", "purple")
]
# political domains (according to manifestocodes) to be analysed
domains = [
'External Relations',
'Freedom and Democracy',
'Political System',
'Economy',
'Welfare and Quality of Life',
'Fabric of Society'
]
def clean_whitespace(txt):
'''
Replaces multiple whitespaces by blank
'''
return re.sub("\s+"," ",txt)
def read_md(fn, min_len=100):
'''
Reads manifesto from md file;
text segments shorter than min_len are discarded
'''
# uncomment next line for sentence segmentation
# split_symbol = '[\.\!\?\;] '#
# this splits texts per paragraph, marked by one or more '#'
split_symbol = '#+'
md_text = open(fn).read()
len_filter = lambda x: len(x) > min_len
text_segments = re.split(split_symbol,md_text)
texts = filter(len_filter, map(clean_whitespace, text_segments))
return texts
def classify_br(folder, fn, party, clf, max_txts=10000):
'''
Computes predictions for a given party
INPUT:
folder folder where [party].md files are stored
fn filename of [party].md file
party name of party (in case of different spelling than filename)
clf manifestoproject classifier - see classifier.py
max_txts maximal number of texts - subsamples max_txts if there are more
OUTPUT:
predictions pandas DataFrame with predictions, texts and party as columns
'''
content = list(read_md(os.path.join(folder,fn)))
if len(content) > max_txts:
content = random.sample(content, max_txts)
preds = clf.predictBatch(content)
manifesto_codes = list(set(manifestolabels().values()).intersection(set(preds.columns.tolist())))
preds['max_manifesto'] = preds[manifesto_codes].idxmax(axis=1)
preds['max_domain'] = preds[list(label2domain.keys())].idxmax(axis=1)
preds['max_leftright'] = preds[['left', 'right']].idxmax(axis=1)
preds['content'] = content
preds['party'] = party
return preds
def compute_most_distant_statements_per_topic(preds, n_most_distant=5, folder=MANIFESTO_FOLDER):
'''
Computes for each topic and party the text segments that are most distant
to the average text segments of all other parties. Could be interpreted as
'characteristic statements' of a party
INPUT:
preds predictions obtained by classify_br
n_most_distant number of 'characteristic' text segments to choose
folder folder to store results
'''
# BoW extraction
tf = TfidfVectorizer().fit(preds.content)
preds['tf_idf'] = preds.content.apply(lambda x: tf.transform([x]))
most_distant_statements = []
for domain in domains:
for party in [x[0] for x in partyFiles]:
# find statements of this party
this_party = (preds.party == party) & (preds.max_domain == domain)
# find statements of other parties
other_parties = (preds.party != party) & (preds.max_domain == domain)
# stack BoW features for this party
partyVecs = sp.sparse.vstack(preds[this_party]['tf_idf'])
partyTexts = preds[this_party]['content']
# stack BoW vectors and take their average
otherVec = sp.sparse.vstack(preds[other_parties]['tf_idf']).mean(axis=0)
# compute L_1 distance between party and other parties
dists = sp.array(abs(partyVecs - otherVec).sum(axis=1)).flatten()
# find and store 'characteristic' text segments
most_distant = [(partyTexts[idx], dists[idx]) for idx in dists.argsort()[-n_most_distant:][-1::-1]]
most_distant_statements.extend([(party, domain, m, d) for m, d in most_distant])
# store results as DataFrame
most_distant_statements_df = | pd.DataFrame(most_distant_statements, columns=['party', 'domain', 'most_distant_to_other_parties', 'distance']) | pandas.DataFrame |
# Import dependencies
def scrapeData():
import urllib.request, json
from bson.json_util import dumps, loads
import os, ssl
import pymongo
import itertools
import pandas as pd
# ### 2021
# In[2]:
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and
getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2021") as url:
inactive_2021 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[3]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2021") as url:
active_2021 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2020
# In[4]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2020") as url:
inactive_2020 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[5]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2020") as url:
active_2020 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2019
# In[6]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2019") as url:
inactive_2019 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[7]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2019") as url:
active_2019 = json.loads(url.read().decode())
# ## 2018
# In[8]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2018") as url:
inactive_2018 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[9]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2018") as url:
active_2018 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2017
# In[10]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2017") as url:
inactive_2017 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[11]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2017") as url:
active_2017 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2016
# In[12]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2016") as url:
inactive_2016 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[13]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2016") as url:
active_2016 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2015
# In[14]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2015") as url:
inactive_2015 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[15]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2015") as url:
active_2015 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2014
# In[16]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2014") as url:
inactive_2014 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[17]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2014") as url:
active_2014 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2013
# In[18]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2013") as url:
inactive_2013 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[19]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2013") as url:
active_2013 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## Concat
# In[20]:
scraped_data = active_2021 + inactive_2021 + active_2020 + inactive_2020 + active_2019 + inactive_2019 + active_2018 + inactive_2018 + active_2017 + inactive_2017 + active_2016 + inactive_2016 + active_2015 + inactive_2015 + active_2014 + inactive_2014 + active_2013 + inactive_2013
len(scraped_data)
# scraped_data
# In[21]:
#delete all fire data with erroneus values for either Latitude or Longitude (values outside the range of possibility)
final_data = []
for item in scraped_data:
if item["Latitude"] < 90 and item["Latitude"] > 0 and item["Longitude"] > -180 and item["Longitude"] < 180:
final_data.append(item)
# final_data
# In[22]:
final_df = | pd.DataFrame(final_data) | pandas.DataFrame |
# ---------------------------------------------------------------------------- #
# World Cup: Stats scanner
# Ver: 0.01
# ---------------------------------------------------------------------------- #
#
# Code by <NAME>
#
# ---------------------------------------------------------------------------- #
import os
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from time import sleep
os.chdir("/mnt/aec0936f-d983-44c1-99f5-0f5b36390285/Dropbox/Python/Predictive Analytics FIFA")
'''
browser = webdriver.Firefox()
browser.get("https://www.whoscored.com/Regions/247/Tournaments/36/Seasons/5967/Stages/15737/Show/International-FIFA-World-Cup-2018")
sleep(3)
base_url = 'https://www.whoscored.com'
def get_countries_links(browser):
return [team.get_attribute('href') for team in browser.find_elements_by_xpath('//table[@id="tournament-fixture"]//td[contains(@class,"team")]//a')]
countries_link = set()
countries_link.update(get_countries_links(browser))
browser.find_elements_by_xpath('//table[@id="tournament-fixture"]//td[contains(@class,"team")]//a')[0].get_attribute('href')
# click next page
browser.find_element_by_xpath('//span[contains(@class, "ui-icon-triangle-1-e")]').click()
sleep(1)
countries_link.update(get_countries_links(browser))
# click next page
browser.find_element_by_xpath('//span[contains(@class, "ui-icon-triangle-1-e")]').click()
sleep(1)
countries_link.update(get_countries_links(browser))
#countries_link
player_link = dict()
for country_link in countries_link:
browser.get(country_link)
sleep(1)
team = browser.find_element_by_xpath('//span[@class="team-header-name"]')
player_link[team.text] = dict()
for player in browser.find_elements_by_xpath('//table[@id="top-player-stats-summary-grid"]//tbody//tr//a'):
player_link[team.text][player.text] = player.get_attribute('href')
np.save("Data/player_link.npy", player_link)
'''
def detect_element(browser, element_id, by_what = By.ID):
# Simplify the detection of an element in the browser
element_present = EC.presence_of_element_located((by_what, element_id))
try:
WebDriverWait(browser, 5, poll_frequency = .1).until(element_present)
return True
except TimeoutException as e:
return False
player_link = np.load("Data/player_link.npy").item()
# will delete nan from already_loaded
already_loaded = rating_dict.copy()
for team in rating_dict.keys():
for player in rating_dict[team]:
if | pd.isnull(rating_dict[team][player]) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 11:30:13 2020
This script calculate damage for the yearly basis
@author: acn980
"""
import os, sys, glob
import pandas as pd
import numpy as np
import warnings
import scipy
import matplotlib.pyplot as plt
import subprocess
warnings.filterwarnings("ignore")
sys.path.insert(0,r'E:\github\seasonality_risk\Functions')
from Functions_HCMC import remove_NaN_skew, detrend_fft, collect_rainfall, calc_avg_max_min_rainfall, thiessen_rain
from Functions_HCMC import damage_surface_coord_z, load_damage, plot_damage_grid
from Functions_HCMC import simulate_rain, simulate_skew, pairs_cooc, pairs_rain, pairs_sl
#%% Setting the files and folder correctly
fn_trunk = 'E:/surfdrive/Documents'
fn_files = 'Paper/Paper5/Hydrodynamic_runs/RISK_maskTelemac'
fn = os.path.join(fn_trunk,fn_files)
#os.chdir(fn)
#%% We plot the drivers - damage curve
damage_grid = load_damage(fn_trunk, fn_files, max_rain=1000, max_sl=3000, thr_rain=50, thr_sl=1030) #max_rain, max_sl, thr_rain, thr_sl
coords, dam = damage_surface_coord_z(damage_grid)
#Plotting damage
f = plt.figure(figsize=(8,4))
ax = f.add_subplot(111, projection='3d', azim=-60, elev=30)
#Plotting damage
plot_damage_grid(damage_grid, ax = ax, rstride=1, ctride=1) # damage_grid.drop(3000,axis=1).drop(1000, axis = 0)
plt.show()
#Plotting damage
f = plt.figure(figsize=(8,4))
ax = f.add_subplot(111, projection='3d', azim=-60, elev=30)
plot_damage_grid(damage_grid.drop([50,1000], axis=0).drop([1030,3000], axis = 1), ax = ax, rstride=1, ctride=1)
plt.show()
xv, yv = np.meshgrid(damage_grid.index.values, damage_grid.columns.values, indexing = 'ij')
Z = damage_grid.to_numpy()
plt.figure()
plt.contour(xv, yv, Z, levels=np.arange(0,4e9, 5e7), c='k') # cmap=plt.cm.Reds)
plt.show()
#%%
varname1 = 'Thiessen'
varname2 = 'skew'
lag_joint = 0
n=50000
dep_type= 'copula' #'copula' #'full corr'
figure_joint = False
cop_sim_R = False
#%% We simulate based on monthly maxima of copula
if cop_sim_R:
output=subprocess.run(["C:/ProgramData/Anaconda3/envs/r_env/Scripts/Rscript.exe", "E:/github/HoChiMinh/YearlyCopFit.R", str(n), str(varname1), str(varname2)],
shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).stderr
#%% We simulate events based on their co-occurence and dependence
fn_rain = os.path.join(fn_trunk, 'Master2019/Thomas/data/NewRain/TRENDS/MONTH_CORRECTED/yearly_EVA')
param_rain = pd.read_csv(os.path.join(fn_rain,'all_params.csv'), index_col = 'distribution')
best_fit_rain = pd.read_csv(os.path.join(fn_rain,'best_fit_AIC.csv')) #, index_col = 'month')
best_fit_rain.set_index('month', inplace = True)
del fn_rain
#Calculate montly skew surge mean
fn_skew = os.path.join(fn_trunk, 'Master2019\Thomas\data\matlab_csv','skew_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
date_parser = lambda x: pd.datetime.strptime(x, "%d-%m-%Y %H:%M:%S")
skew = | pd.read_csv(fn_skew, parse_dates = True, date_parser= date_parser, index_col = 'Date') | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import tempfile
import time
from collections import OrderedDict
from datetime import datetime
from string import printable
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet
except ImportError: # pragma: no cover
fastparquet = None
try:
import sqlalchemy
except ImportError: # pragma: no cover
sqlalchemy = None
from .... import tensor as mt
from .... import dataframe as md
from ....config import option_context
from ....tests.core import require_cudf, require_ray
from ....utils import arrow_array_to_objects, lazy_import, pd_release_version
from ..dataframe import from_pandas as from_pandas_df
from ..series import from_pandas as from_pandas_series
from ..index import from_pandas as from_pandas_index, from_tileable
from ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables
from ..from_records import from_records
ray = lazy_import("ray")
_date_range_use_inclusive = pd_release_version[:2] >= (1, 4)
def test_from_pandas_dataframe_execution(setup):
# test empty DataFrame
pdf = pd.DataFrame()
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(columns=list("ab"))
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(
np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)]
)
df = from_pandas_df(pdf, chunk_size=(13, 21))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
def test_from_pandas_series_execution(setup):
# test empty Series
ps = pd.Series(name="a")
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = from_pandas_series(ps)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
ps = pd.Series(
np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name="a"
)
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
def test_from_pandas_index_execution(setup):
pd_index = pd.timedelta_range("1 days", periods=10)
index = from_pandas_index(pd_index, chunk_size=7)
result = index.execute().fetch()
pd.testing.assert_index_equal(pd_index, result)
def test_index_execution(setup):
rs = np.random.RandomState(0)
pdf = pd.DataFrame(
rs.rand(20, 10),
index=np.arange(20, 0, -1),
columns=["a" + str(i) for i in range(10)],
)
df = from_pandas_df(pdf, chunk_size=13)
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf.index)
result = df.columns.execute().fetch()
pd.testing.assert_index_equal(result, pdf.columns)
# df has unknown chunk shape on axis 0
df = df[df.a1 < 0.5]
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)
s = pd.Series(pdf["a1"], index=pd.RangeIndex(20))
series = from_pandas_series(s, chunk_size=13)
# test series.index which has value
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
s = pdf["a2"]
series = from_pandas_series(s, chunk_size=13)
# test series.index
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
# test tensor
raw = rs.random(20)
t = mt.tensor(raw, chunk_size=13)
result = from_tileable(t).execute().fetch()
pd.testing.assert_index_equal(result, pd.Index(raw))
def test_initializer_execution(setup):
arr = np.random.rand(20, 30)
pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])
df = md.DataFrame(pdf, chunk_size=(15, 10))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
df = md.DataFrame(arr, index=md.date_range("2020-1-1", periods=20))
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result, pd.DataFrame(arr, index=pd.date_range("2020-1-1", periods=20))
)
df = md.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=md.date_range("1/1/2010", periods=6, freq="D"),
)
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result,
pd.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=pd.date_range("1/1/2010", periods=6, freq="D"),
),
)
s = np.random.rand(20)
ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name="a")
series = md.Series(ps, chunk_size=7)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = md.Series(s, index=md.date_range("2020-1-1", periods=20))
result = series.execute().fetch()
pd.testing.assert_series_equal(
result, pd.Series(s, index=pd.date_range("2020-1-1", periods=20))
)
pi = | pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) | pandas.IntervalIndex.from_tuples |
from datetime import time
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.io.excel._base import _BaseExcelReader
from pandas.core.dtypes.missing import isnull
class _XlrdReader(_BaseExcelReader):
def __init__(self, filepath_or_buffer):
"""Reader using xlrd engine.
Parameters
----------
filepath_or_buffer : string, path object or Workbook
Object to be parsed.
"""
err_msg = "Install xlrd >= 1.0.0 for Excel support"
| import_optional_dependency("xlrd", extra=err_msg) | pandas.compat._optional.import_optional_dependency |
import os
import pandas as pd
from collections import defaultdict
import argparse
from pattern.text.en import singularize
# Dictionary used to store subject counts
subject_counts = defaultdict(lambda:0)
# Reads in the data
def read_data(filename):
print("Reading in {}".format(filename))
df = pd.read_csv(filename, skiprows = 1, names = ['doi', 'subjects', 'title'], delimiter="|")
return df
def sort(df, dhlw):
# Used to store our cleaned subject data
cleaned_data = | pd.DataFrame(columns=['doi', 'subjects', 'title']) | pandas.DataFrame |
import gc
import os
import time
import boto3
import dask
import fsspec
import joblib
import numpy as np
import pandas as pd
import rasterio as rio
import rioxarray
import utm
import xarray as xr
import xgboost as xgb
from pyproj import CRS
from rasterio.session import AWSSession
from s3fs import S3FileSystem
import carbonplan_trace.v1.model as m
from carbonplan_trace.v1.glas_allometric_eq import ECO_TO_REALM_MAP
from carbonplan_trace.v1.landsat_preprocess import scene_seasonal_average
from ..v1 import load, utils
# flake8: noqa
fs = S3FileSystem(requester_pays=True)
def write_nodata(ds):
for var in ds.data_vars:
ds[var].rio.write_nodata(np.nan, inplace=True)
def write_crs_dataset(ds, zone=None, overwrite=False):
'''
This function will set a CRS for a dataset (whether or not
one already exists!) so be sure you want to do that!
'''
if zone is None:
zone = '{}{}'.format(ds.utm_zone_number, ds.utm_zone_letter)
crs = CRS.from_dict({'proj': 'utm', 'zone': zone})
ds = ds.rio.set_crs(crs)
return ds
def check_mins_maxes(ds):
lat_lon_crs = CRS.from_epsg(4326)
reprojected = ds.rio.reproject(lat_lon_crs)
min_lat = reprojected.y.min().values
max_lat = reprojected.y.max().values
min_lon = reprojected.x.min().values
max_lon = reprojected.x.max().values
return min_lat, max_lat, min_lon, max_lon
def create_target_grid(min_lat, max_lat, min_lon, max_lon):
tiles = utils.find_tiles_for_bounding_box(min_lat, max_lat, min_lon, max_lon)
if len(tiles) > 0:
full_target_ds = utils.open_and_combine_lat_lon_data(
's3://carbonplan-climatetrace/intermediate/ecoregions_mask/',
tiles=tiles,
lat_lon_box=[min_lat, max_lat, min_lon, max_lon],
consolidated=False,
)
full_target_ds = full_target_ds.rename({'lat': 'y', 'lon': 'x'})
buffer = 0.01
target = full_target_ds.sel(
y=slice(min_lat - buffer, max_lat + buffer), x=slice(min_lon - buffer, max_lon + buffer)
)
target.attrs["crs"] = "EPSG:4326"
del full_target_ds
return target, tiles
else:
return None, []
def reproject_dataset_to_fourthousandth_grid(ds, zone=None):
ds = write_crs_dataset(ds, zone=zone)
min_lat, max_lat, min_lon, max_lon = check_mins_maxes(ds)
if max_lon >= 170 and min_lon <= -170:
data_list, tiles_list, bounding_box_list = [], [], []
target_east, tiles_east = create_target_grid(min_lat, max_lat, max_lon, 180)
if len(tiles_east) > 0:
reprojected_east = ds.rio.reproject_match(target_east).compute()
reprojected_east = reprojected_east.where(reprojected_east < 1e100)
data_list.append(reprojected_east)
tiles_list.append(tiles_east)
bounding_box_list.append([min_lat, max_lat, max_lon, 180])
del target_east
target_west, tiles_west = create_target_grid(min_lat, max_lat, -180, min_lon)
if len(tiles_west) > 0:
reprojected_west = ds.rio.reproject_match(target_west).compute()
reprojected_west = reprojected_west.where(reprojected_west < 1e100)
data_list.append(reprojected_west)
tiles_list.append(tiles_west)
bounding_box_list.append([min_lat, max_lat, -180, min_lon])
del target_west
return data_list, tiles_list, bounding_box_list
else:
target, tiles = create_target_grid(min_lat, max_lat, min_lon, max_lon)
# the numbers aren't too big but if we normalize they might turn into decimals
reprojected = ds.rio.reproject_match(target).compute()
del ds
del target
reprojected = reprojected.where(reprojected < 1e100)
return [reprojected], [tiles], [[min_lat, max_lat, min_lon, max_lon]]
def dataset_to_tabular(ds):
'''
Convert dataset to tabular form for inference
Parameters
----------
ds : xarray dataset
xarray dataset with multiple bands
Returns
-------
df : pandas dataframe
dataframe with columns of bands
'''
df = ds.to_dataframe()
del ds
# drop any nan values so we only carry around pixels we have landsat for
# this will drop both the parts of the dataset that are empty because
# the landsat scenes might be rotated w.r.t. the x/y grid
# but will also drop any cloud-masked regions
# TODO: further investigate %-age of nulls and root cause
df = df.dropna(how='any').reset_index()
return df
def convert_to_lat_lon(df, utm_zone_number, utm_zone_letter):
'''
Given dataframe with x/y coordinates, project
into the correct lat/lon coordinates, based upon UTM zone.
Parameters
----------
df : pandas dataframe
geodataframe whose rows are entries for each row/path scene. Must
include variables 'lon', 'lat'
utm_zone_number : str/int
string or int for the zone number (longitude) appropriate for that
scene as defined by USGS
utm_zone_letter : str
string or int for the zone letter (latitude) appropriate for that
scene as defined by USGS
Returns
-------
df : pandas dataframe
The projected information for each pixel
'''
return utm.to_latlon(df['x'], df['y'], int(utm_zone_number), utm_zone_letter)
def add_all_variables(data, tiles, year, lat_lon_box=None):
data = load.aster(data, tiles, lat_lon_box=lat_lon_box)
data = load.worldclim(data)
data = load.treecover2000(data, tiles)
data = load.ecoregion(data, tiles, lat_lon_box=lat_lon_box)
return data
def make_inference(input_data, model):
"""
input_data is assumed to be a pandas dataframe, and model uses standard sklearn API with .predict
"""
input_data['NIR_V'] = m.calc_NIR_V(input_data)
input_data = input_data.replace([np.nan, np.inf, -np.inf, None], np.nan)
input_data = input_data.dropna(subset=m.features)
gc.collect()
print(f'predicting on {len(input_data)} records')
t0 = time.time()
with joblib.parallel_backend('threading', n_jobs=8):
model.n_jobs = 8
input_data['biomass'] = model.predict(input_data)
t1 = time.time()
print(f'took {round(t1-t0)} seconds')
return input_data[['x', 'y', 'biomass']]
def predict(
model_folder,
path,
row,
year,
access_key_id,
secret_access_key,
output_write_bucket=None,
input_write_bucket=None,
bands_of_interest='all',
):
core_session = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
region_name='us-west-2',
)
aws_session = AWSSession(core_session, requester_pays=True)
fs = S3FileSystem(key=access_key_id, secret=secret_access_key, requester_pays=True)
with rio.Env(aws_session):
# create the landsat scene for that year
with dask.config.set(scheduler='single-threaded'):
t0 = time.time()
print('averaging')
landsat_ds = scene_seasonal_average(
path,
row,
year,
access_key_id,
secret_access_key,
aws_session,
core_session,
fs,
write_bucket=None,
bands_of_interest='all',
landsat_generation='landsat-7',
)
t1 = time.time()
print(f'averaging landsat took {round(t1-t0)} seconds')
if landsat_ds:
# reproject from utm to lat/lon
landsat_zone = landsat_ds.utm_zone_number + landsat_ds.utm_zone_letter
# sets null value to np.nan
write_nodata(landsat_ds)
print('reprojecting')
data_list, tiles_list, bounding_box_list = reproject_dataset_to_fourthousandth_grid(
landsat_ds.astype('float32'), zone=landsat_zone
)
del landsat_ds
dfs = []
for data, tiles, bounding_box in zip(data_list, tiles_list, bounding_box_list):
# add in other datasets
data = add_all_variables(data, tiles, year, lat_lon_box=bounding_box).load()
df = dataset_to_tabular(data.drop(['spatial_ref']))
dfs.append(df)
del df
del data
df = pd.concat(dfs)
df = df.loc[df.ecoregion > 0]
df['realm'] = df.ecoregion.apply(ECO_TO_REALM_MAP.__getitem__)
else:
df = pd.DataFrame({})
# apply the correct model for each realm
if len(df) > 0:
# write input
if input_write_bucket is not None:
utils.write_parquet(df, input_write_bucket, access_key_id, secret_access_key)
rf_result = []
for realm, sub in df.groupby('realm'):
rf = m.random_forest_model(
realm=realm,
df_train=None,
df_test=None,
output_folder=model_folder,
validation_year='none',
overwrite=False,
)
rf_result.append(make_inference(sub, rf))
rf_result = pd.concat(rf_result)
del df
else:
rf_result = | pd.DataFrame([[np.nan, np.nan, np.nan]], columns=['x', 'y', 'biomass']) | pandas.DataFrame |
import re
import json
import subprocess
import itertools
from multiprocessing import Pool
import urllib
import pandas as pd
from bs4 import BeautifulSoup
def get_schools(county, year, grade):
"""Get all the schools in a county for a year and grade"""
url = "https://app.azdhs.gov/IDRReportStats/Home/GetSchoolTable?{0}"
query = {
'bRegex': 'false',
'bRegex_0': 'false',
'bRegex_1': 'false',
'bRegex_2': 'false',
'bRegex_3': 'false',
'bRegex_4': 'false',
'bRegex_5': 'false',
'bRegex_6': 'false',
'bRegex_7': 'false',
'bRegex_8': 'false',
'bSearchable_0': 'false',
'bSearchable_1': 'true',
'bSearchable_2': 'false',
'bSearchable_3': 'false',
'bSearchable_4': 'false',
'bSearchable_5': 'false',
'bSearchable_6': 'true',
'bSearchable_7': 'true',
'bSearchable_8': 'false',
'iColumns': '9',
'iDisplayLength': '2000',
'iDisplayStart': '0',
'mDataProp_0': 'SCHOOL_YEAR',
'mDataProp_1': 'SCHOOL_NAME',
'mDataProp_2': 'SCHOOL_TYPE',
'mDataProp_3': 'SCHOOL_GRADE',
'mDataProp_4': 'ENROLLED',
'mDataProp_5': 'ADDRESS',
'mDataProp_6': 'CITY',
'mDataProp_7': 'ZIP',
'mDataProp_8': 'COUNTY',
'sColumns': ',,,,,,,,',
'sEcho': '1',
'selectedCounty': county,
'selectedGrade': grade,
'selectedYear': year,
}
command = ['curl', url.format(urllib.parse.urlencode(query))]
with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:
schools = json.loads(proc.communicate()[0].decode())['aaData']
return schools
def get_data_from_table(table):
"""Put the html table into a dictionary"""
soup = BeautifulSoup(table, 'html5lib')
data = {
'school type': {
'SCHOOL_TYPE': 'N/A'
},
'enrolled': {
'ENROLLED': 'N/A'
},
'medical': {
'PCT_MEDICAL_EXEMPT': 'N/A'
},
'personal': {
'PCT_PBE': 'N/A'
},
'every': {
'PCT_PBE_EXEMPT_ALL': 'N/A'
},
'does': {
'HAS_NURSE': 'N/A'
},
'nurse type': {
'NURSE_TYPE': ''
},
'dtap': {
'PCT_IMMUNE_DTAP': 'N/A',
'PCT_EXEMPT_DTAP': 'N/A',
'PCT_COMPLIANCE_DTAP': 'N/A'
},
'tdap': {
'PCT_IMMUNE_TDAP': 'N/A',
'PCT_EXEMPT_TDAP': 'N/A',
'PCT_COMPLIANCE_TDAP': 'N/A'
},
'mcv': {
'PCT_IMMUNE_MVMVC': 'N/A',
'PCT_EXEMPT_MVMVC': 'N/A',
'PCT_COMPLIANCE_MVMVC': 'N/A'
},
'polio': {
'PCT_IMMUNE_POLIO': 'N/A',
'PCT_EXEMPT_POLIO': 'N/A',
'PCT_COMPLIANCE_POLIO': 'N/A'
},
'mmr': {
'PCT_IMMUNE_MMR': 'N/A',
'PCT_EXEMPT_MMR': 'N/A',
'PCT_COMPLIANCE_MMR': 'N/A'
},
'hep b': {
'PCT_IMMUNE_HEPB': 'N/A',
'PCT_EXEMPT_HEPB': 'N/A',
'PCT_COMPLIANCE_HEPB': 'N/A'
},
'hep a': {
'PCT_IMMUNE_HEPA': 'N/A',
'PCT_EXEMPT_HEPA': 'N/A',
'PCT_COMPLIANCE_HEPA': 'N/A'
},
'hib': {
'PCT_IMMUNE_HIB': 'N/A',
'PCT_EXEMPT_HIB': 'N/A',
'PCT_COMPLIANCE_HIB': 'N/A'
},
'var': {
'PCT_IMMUNE_VAR': 'N/A',
'PCT_EXEMPT_VAR': 'N/A',
'PCT_COMPLIANCE_VAR': 'N/A'
},
}
for row in soup.find_all('div', {'class': 'row'}):
key = None
children = list(row.children)
if len(children) <= 1:
continue
key = children[1].text.lower()
for k in data.keys():
if re.search(k, key):
break
else:
continue
cols = data[k]
col_names = list(cols.keys())
index = 0
for child in children[2:]:
try:
text = child.text.lower()
except Exception:
continue
cols[col_names[index]] = text
index += 1
if index == len(col_names):
break
data[k] = cols
return data
def get_school_data(school_name, address, grade, year, county, zipcode, city):
"""Get data for a school"""
params = {
'paramSelectedAddress': address,
'paramSelectedCity': city,
'paramSelectedGrade': grade,
'paramSelectedSchool': school_name,
'paramSelectedYear': year,
}
cmnd = [
'curl',
'-d',
"{0}".format(urllib.parse.urlencode(params)),
"https://app.azdhs.gov/IDRReportStats/Home/GetSchoolSpecifications",
]
with subprocess.Popen(cmnd, stdout=subprocess.PIPE) as proc:
table = proc.communicate()[0].decode()
try:
data = {
'School': str(school_name),
'Grade': str(grade),
'Address': str(address),
'School Year': str(year),
'Zipcode': str(zipcode),
'County': str(county),
'City': str(city),
}
table_data = get_data_from_table(table)
for value in table_data.values():
data.update(value)
return data
except Exception:
print(f'Failed: {county}, {year}, {grade}, {school_name}')
raise
def to_csv(vaccines_df):
"""Convert the vaccines dataframe to csv files"""
def create_file_name(n):
return '_'.join(n).replace('-', '_') + '.csv'
columns = {
'Sixth': [
'SCHOOL_NAME',
'SCHOOL_TYPE',
'SCHOOL_ADDRESS_ONE',
'CITY',
'COUNTY',
'ZIP_CODE',
'HAS_NURSE',
'NURSE_TYPE',
'ENROLLED',
'PCT_IMMUNE_DTAP',
'PCT_EXEMPT_DTAP',
'PCT_COMPLIANCE_DTAP',
'PCT_IMMUNE_TDAP',
'PCT_EXEMPT_TDAP',
'PCT_COMPLIANCE_TDAP',
'PCT_IMMUNE_MVMVC',
'PCT_EXEMPT_MVMVC',
'PCT_COMPLIANCE_MVMVC',
'PCT_IMMUNE_POLIO',
'PCT_EXEMPT_POLIO',
'PCT_COMPLIANCE_POLIO',
'PCT_IMMUNE_MMR',
'PCT_EXEMPT_MMR',
'PCT_COMPLIANCE_MMR',
'PCT_IMMUNE_HEPB',
'PCT_EXEMPT_HEPB',
'PCT_COMPLIANCE_HEPB',
'PCT_IMMUNE_VAR',
'PCT_EXEMPT_VAR',
'PCT_COMPLIANCE_VAR',
'PCT_PBE',
'PCT_MEDICAL_EXEMPT',
'PCT_PBE_EXEMPT_ALL',
],
'Childcare': [
'SCHOOL_NAME',
'SCHOOL_TYPE',
'SCHOOL_ADDRESS_ONE',
'CITY',
'COUNTY',
'ZIP_CODE',
'HAS_NURSE',
'NURSE_TYPE',
'ENROLLED',
'PCT_IMMUNE_DTAP',
'PCT_EXEMPT_DTAP',
'PCT_COMPLIANCE_DTAP',
'PCT_IMMUNE_POLIO',
'PCT_EXEMPT_POLIO',
'PCT_COMPLIANCE_POLIO',
'PCT_IMMUNE_MMR',
'PCT_EXEMPT_MMR',
'PCT_COMPLIANCE_MMR',
'PCT_IMMUNE_HIB',
'PCT_EXEMPT_HIB',
'PCT_COMPLIANCE_HIB',
'PCT_IMMUNE_HEPA',
'PCT_EXEMPT_HEPA',
'PCT_COMPLIANCE_HEPA',
'PCT_IMMUNE_HEPB',
'PCT_EXEMPT_HEPB',
'PCT_COMPLIANCE_HEPB',
'PCT_IMMUNE_VAR',
'PCT_EXEMPT_VAR',
'PCT_COMPLIANCE_VAR',
'PCT_PBE',
'PCT_MEDICAL_EXEMPT',
'PCT_PBE_EXEMPT_ALL'
],
'Kindergarten': [
'SCHOOL_NAME',
'SCHOOL_TYPE',
'SCHOOL_ADDRESS_ONE',
'CITY',
'COUNTY',
'ZIP_CODE',
'HAS_NURSE',
'NURSE_TYPE',
'ENROLLED',
'PCT_IMMUNE_DTAP',
'PCT_EXEMPT_DTAP',
'PCT_COMPLIANCE_DTAP',
'PCT_IMMUNE_POLIO',
'PCT_EXEMPT_POLIO',
'PCT_COMPLIANCE_POLIO',
'PCT_IMMUNE_MMR',
'PCT_EXEMPT_MMR',
'PCT_COMPLIANCE_MMR',
'PCT_IMMUNE_HEPB',
'PCT_EXEMPT_HEPB',
'PCT_COMPLIANCE_HEPB',
'PCT_IMMUNE_VAR',
'PCT_EXEMPT_VAR',
'PCT_COMPLIANCE_VAR',
'PCT_PBE',
'PCT_MEDICAL_EXEMPT',
'PCT_PBE_EXEMPT_ALL'
]
}
group_by = ['Grade', 'School Year']
for name, group in vaccines_df.groupby(group_by):
grade, year = name
cols = columns[grade]
df = | pd.DataFrame(group) | pandas.DataFrame |
import logging
import io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import sandy
__author__ = "<NAME>"
__all__ = [
"Samples",
]
np.random.seed(1)
minimal_testcase = np.random.randn(4, 3)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
)
out.data.index.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class Samples():
"""
Attributes
----------
condition_number
data
Methods
-------
filter_by
from_csv
regression_coefficients
sm_ols
"""
def __init__(self, df):
self.data = pd.DataFrame(df, dtype=float)
def __repr__(self):
return self.data.__repr__()
@property
def data(self):
"""
Dataframe of samples.
Attributes
----------
index : `pandas.Index` or `pandas.MultiIndex`
indices
columns : `pandas.Index`
samples numbering
values : `numpy.array`
sample values as `float`
Returns
-------
`pandas.DataFrame`
tabulated samples
"""
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def condition_number(self):
"""
Return condition number of samples.
Notes
-----
..note:: the condition number can help assess multicollinearity.
"""
# The first step is to normalize the independent variables to have
# unit length
X = self.data.T.copy()
norm_x = X.values
for i, name in enumerate(X):
norm_x[:, i] = X[name] / np.linalg.norm(X[name])
norm_xtx = np.dot(norm_x.T, norm_x)
# Then, we take the square root of the ratio of the biggest to the
# smallest eigen values
eigs = np.linalg.eigvals(norm_xtx)
return np.sqrt(eigs.max() / eigs.min())
@property
def mean(self):
return self.data.mean(axis=1).rename("MEAN")
@property
def rstd(self):
return (self.std / self.mean).rename("RSTD")
@property
def std(self):
return self.data.std(axis=1).rename("STD")
def filter_by(self, key, value):
"""
Apply condition to source data and return filtered results.
Parameters
----------
`key` : `str`
any label present in the columns of `data`
`value` : `int` or `float`
value used as filtering condition
Returns
-------
`sandy.Samples`
filtered dataframe of samples
Raises
------
`sandy.Error`
if applied filter returned empty dataframe
Notes
-----
.. note:: The primary function of this method is to make sure that
the filtered dataframe is still returned as a `Samples`
object.
"""
condition = self.data.index.get_level_values(key) == value
out = self.data.copy()[condition]
if out.empty:
raise sandy.Error("applied filter returned empty dataframe")
return self.__class__(out)
def _std_convergence(self):
smp = self.data
rng = range(2, smp.shape[0])
foo = lambda x: smp.loc[:x].std()
return pd.DataFrame(map(foo, rng), index=rng)
def _mean_convergence(self):
smp = self.data
rng = range(1, smp.shape[0])
foo = lambda x: smp.loc[:x].mean()
return pd.DataFrame(map(foo, rng), index=rng)
def _heatmap(self, vmin=-1, vmax=1, cmap="bwr", **kwargs):
corr = np.corrcoef(self.data)
return sns.heatmap(corr, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)
def sm_ols(self, Y, normalized=False, intercept=False):
X = self.data.T.copy()
NX, MX = X.shape
NY = Y.size
N = min(NX, NY)
if NX != NY:
print(f"X and Y have different size, fit only first {N} samples")
if normalized:
X = X.divide(X.mean()).fillna(0)
Y = Y.divide(Y.mean()).fillna(0)
if intercept:
X = sm.add_constant(X)
model = sm.OLS(Y.iloc[:N].values, X[:N].values)
out = model.fit()
return out
def regression_coefficients(self, Y, **kwargs):
"""
Calculate regression coefficients from OLS model given an output
population.
Parameters
----------
Y : `pandas.Series`
tabulated output population
kwargs : keyword arguments, optional
arguments to pass to method `sm_ols`
Returns
-------
`pandas.DataFrame`
Dataframe with regression coefficients and standard errors.
"""
X = self.data
MX, NX = X.shape
index = X.index
res = self.sm_ols(Y, **kwargs)
params = res.params
bse = res.bse
start_at = 0 if params.size == MX else 1
coeff = pd.DataFrame({
"coeff": params[start_at:],
"stderr": bse[start_at:],
}, index=index)
return coeff
@classmethod
@cov33csv
def from_csv(cls, file, **kwargs):
"""
Read samples from csv file,
Parameters
----------
file : `str`
csv file.
**kwargs : `dict`
keyword options for `pandas.read_csv`.
Returns
-------
`sandy.Samples`
samples into a sandy object.
Examples
--------
>>> csv = minimal_testcase.to_string()
>>> sandy.Samples.from_csv(io.StringIO(csv), sep="\s+")
0 1 2
0 1.62435e+00 -6.11756e-01 -5.28172e-01
1 -1.07297e+00 8.65408e-01 -2.30154e+00
2 1.74481e+00 -7.61207e-01 3.19039e-01
3 -2.49370e-01 1.46211e+00 -2.06014e+00
>>> index = pd.MultiIndex.from_product(
... [[9437], [102], [1e-5, 1e-1, 1e1, 1e6]]
... )
>>> df = minimal_testcase.copy()
>>> df.index = index
>>> csv = df.to_csv()
>>> sandy.Samples.from_csv(io.StringIO(csv), sep="\s+", cov33csv=True)
0 1 2
MAT MT E
9437 102 1.00000e-05 1.62435e+00 -6.11756e-01 -5.28172e-01
1.00000e-01 -1.07297e+00 8.65408e-01 -2.30154e+00
1.00000e+01 1.74481e+00 -7.61207e-01 3.19039e-01
1.00000e+06 -2.49370e-01 1.46211e+00 -2.06014e+00
"""
df = | pd.read_csv(file, **kwargs) | pandas.read_csv |
#!/usr/bin/env python
"""
Copyright 2019 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Evals PLDA LLR
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import sys
import os
import argparse
import time
import logging
import numpy as np
import pandas as pd
from hyperion.hyp_defs import float_cpu, config_logger
from hyperion.utils import SCPList, TrialNdx, TrialScores, ExtSegmentList, RTTM
from hyperion.helpers.tracking_data_reader import TrackingDataReader as TDR
from hyperion.helpers import PLDAFactory as F
from hyperion.transforms import TransformList
def flatten_segment_scores(ndx_seg, scores):
scores = scores.align_with_ndx(ndx_seg)
idx=(ndx_seg.trial_mask.T == True).nonzero()
new_segment_ids = []
segment_ids = []
model_ids = []
flat_scores = np.zeros((len(idx[0]),), dtype=float)
k = 0
for item in zip(idx[0], idx[1]):
model_ids.append(ndx_seg.model_set[item[1]])
segment_ids.append(ndx_seg.seg_set[item[0]])
new_segment_ids.append('%s-%08d' % (ndx_seg.seg_set[item[0]],k))
flat_scores[k] = scores.scores[item[1], item[0]]
k +=1
new_segment_ids = np.array(new_segment_ids)
segment_ids = np.array(segment_ids)
model_ids = np.array(model_ids)
return new_segment_ids, segment_ids, model_ids, flat_scores
def prepare_output_ext_segments(ext_segments_in, new_ext_segment_ids, ext_segment_ids, model_ids, scores):
df_map = pd.DataFrame({'new_ext_segment_id': new_ext_segment_ids, 'ext_segment_id': ext_segment_ids})
new_segments = | pd.merge(ext_segments_in.segments, df_map) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# ## Visualize a representation of the spherized LINCS Cell Painting dataset
# In[1]:
import umap
import pathlib
import numpy as np
import pandas as pd
import plotnine as gg
from pycytominer.cyto_utils import infer_cp_features
# In[2]:
np.random.seed(9876)
# In[3]:
profile_path = pathlib.Path("profiles")
batches = ["2016_04_01_a549_48hr_batch1", "2017_12_05_Batch2"]
norm_methods = ["whole_plate", "dmso"]
file_filler = "_dmso_spherized_profiles_with_input_normalized_by_"
output_dir = pathlib.Path("figures")
output_dir = {batch: pathlib.Path(output_dir, batch) for batch in batches}
# In[4]:
# Identify UMAP embeddings for all spherized profiles
embeddings = {batch: {} for batch in batches}
for batch in batches:
for norm_method in norm_methods:
file = pathlib.Path(profile_path, f"{batch}{file_filler}{norm_method}.csv.gz")
print(f"Now obtaining UMAP embeddings for {file}...")
# Load spherized data
spherized_df = | pd.read_csv(file) | pandas.read_csv |
def convert_to_perlodes(TaXon_table_xlsx, operational_taxon_list, path_to_outdirs):
import PySimpleGUI as sg
import pandas as pd
from pandas import DataFrame
import numpy as np
from pathlib import Path
#get the taxonomy from the operational taxon list
operational_taxon_list_df = pd.read_excel(Path(operational_taxon_list), header=2, sheet_name="Operationelle Taxaliste")
taxonomy_list = operational_taxon_list_df["Taxonname\n(Perlodes-Datenbank)"].values.tolist()
taxonomy_list = [x for x in taxonomy_list if str(x) != 'nan']
# get the according IDs from the operational taxon list
IDs_list = operational_taxon_list_df["ID_\nART"].values.tolist()
IDs_list = [x for x in IDs_list if str(x) != 'nan']
# create a dict to store both the ID and the taxonomy
operational_taxon_list_dict = {}
for ID, taxonomy in zip(IDs_list, taxonomy_list):
operational_taxon_list_dict[taxonomy] = int(ID)
# load the taxon table and create a list
TaXon_table_xlsx = Path(TaXon_table_xlsx)
TaXon_table_df = pd.read_excel(TaXon_table_xlsx)
TaXon_table_taxonomy = TaXon_table_df.columns.tolist()[0:7]
samples_list = TaXon_table_df.columns.tolist()[10:]
# store hits and dropped OTUs
hit_list, dropped_list, transversion_list = [], [], []
# loop through the taxon table
for taxonomy in TaXon_table_df[TaXon_table_taxonomy].drop_duplicates().values.tolist():
# collect the OTU name, species, genus and family and convert to perlodes format
OTU = taxonomy[0]
Species = taxonomy[6]
Species_group = str(taxonomy[6]) + "-Gr."
Genus = str(taxonomy[5]) + " sp."
Family = str(taxonomy[4]) + " Gen. sp."
# test if the OTU has a hit at: Species level, Genus level or Family level
if Species in operational_taxon_list_dict.keys():
# add to hit list
hit_list.append([OTU] + [str(operational_taxon_list_dict[Species])] + [Species])
# add to perlodes log file
transversion_list.append(taxonomy + [str(operational_taxon_list_dict[Species])] + [Species])
elif Species_group in operational_taxon_list_dict.keys():
hit_list.append([OTU] + [str(operational_taxon_list_dict[Species_group])] + [Species_group])
transversion_list.append(taxonomy + [str(operational_taxon_list_dict[Species_group])] + [Species_group])
elif Genus in operational_taxon_list_dict.keys():
hit_list.append([OTU] + [str(operational_taxon_list_dict[Genus])] + [Genus])
transversion_list.append(taxonomy + [str(operational_taxon_list_dict[Genus])] + [Genus])
elif Family in operational_taxon_list_dict.keys():
hit_list.append([OTU] + [str(operational_taxon_list_dict[Family])] + [Family])
transversion_list.append(taxonomy + [str(operational_taxon_list_dict[Family])] + [Family])
# otherwise store the hit with an "nan"
else:
hit_list.append([OTU] + ["nan"])
dropped_list.append(OTU)
transversion_list.append(taxonomy + ["", ""])
# create an output list for perlodes
# make read abundaces binary
perlodes_input_list = []
for hit, row in zip(hit_list, TaXon_table_df.values.tolist()):
# skip OTUs that were not in the OPT
if hit[1] != "nan":
reads_list = []
# loop through all the sample of the file
for reads in row[10:]:
# reads > 0 --> 1
if reads > 0:
reads_list.append(1)
# reads == 0 --> 0
else:
reads_list.append(0)
# now append the taxonomy and the presence/absence to the perlodes list
perlodes_input_list.append([hit[1]] + [hit[2]] + reads_list)
# print the number of dropped OTUs
number_of_initial_OTUs = len(TaXon_table_df[TaXon_table_taxonomy].drop_duplicates().values.tolist())
print("Warning: Dropped " + str(len(dropped_list)) + " of " + str(number_of_initial_OTUs) + " OTUs.\n")
# write the perlodes output file
perlodes_df = | pd.DataFrame(perlodes_input_list) | pandas.DataFrame |
"""
This file contains functions that allows running adaptive
selection in parallel.
@author: <NAME>
"""
from typing import List, Any, Optional
import pandas as pd
from sklearn.base import clone
# It can serialize class methods and lambda functions.
import pathos.multiprocessing as mp
def add_partition_key(
df: pd.DataFrame,
series_keys: List[str],
n_partitions: int
) -> pd.DataFrame:
"""
Add to `df` a new column that helps to balance load between
different processes uniformly.
:param df:
data to be transformed in long format
:param series_keys:
columns that are identifiers of unique time series
:param n_partitions:
number of processes that will be used for parallel
execution
:return:
DataFrame with a new column named 'partition_key'
"""
keys_df = df[series_keys].drop_duplicates()
keys_df = (
keys_df
.reset_index()
.rename(columns={'index': 'partition_key'})
)
keys_df['partition_key'] = keys_df['partition_key'].apply(
lambda x: x % n_partitions
)
df = df.merge(keys_df, on=series_keys)
return df
def fit_selector_in_parallel(
selector_instance: Any,
df: pd.DataFrame,
name_of_target: str,
series_keys: List[str],
scoring_keys: Optional[List[str]] = None,
n_processes: int = 1
) -> 'type(selector_instance)':
"""
Create a new selector of specified parameters and fit it with
paralleling based on enumeration of unique time series.
:param selector_instance:
instance that specifies class of resulting selector
and its initial parameters
:param df:
DataFrame in long format that contains time series
:param name_of_target:
name of target column
:param series_keys:
columns that are identifiers of unique time series
:param scoring_keys:
identifiers of groups such that best forecasters are
selected per a group, not per an individual time series,
see more in documentation on `fit` method of selector
:param n_processes:
number of parallel processes, default is 1
:return:
new fitted instance of selector
"""
fit_kwargs = {
'name_of_target': name_of_target,
'series_keys': series_keys,
'scoring_keys': scoring_keys or series_keys
}
try:
df = add_partition_key(df, series_keys, n_processes)
selectors = mp.Pool(n_processes).map(
lambda x: clone(selector_instance).fit(x, **fit_kwargs),
[group for _, group in df.groupby('partition_key', as_index=False)]
) # pragma: no cover (`coverage` has issues with multiprocessing)
results_tables = [
selector.best_scores_ for selector in selectors
]
best_scores = pd.concat(results_tables)
selector = selectors[0] # An arbitrary fitted selector.
selector.best_scores_ = best_scores
return selector
finally:
df.drop('partition_key', axis=1, inplace=True)
def predict_with_selector_in_parallel(
selector: Any,
df: pd.DataFrame,
n_processes: int = 1
) -> pd.DataFrame:
"""
Predict future values of series with paralleling by series keys.
:param selector:
instance that has been fitted before
:param df:
DataFrame in long format that contains time series
:param n_processes:
number of parallel processes, default is 1
:return:
DataFrame in long format with predictions
"""
try:
df = add_partition_key(df, selector.series_keys_, n_processes)
predictions = mp.Pool(n_processes).map(
lambda x: selector.predict(x),
[group for _, group in df.groupby('partition_key', as_index=False)]
) # pragma: no cover (`coverage` has issues with multiprocessing)
result = | pd.concat(predictions) | pandas.concat |
from __future__ import division
import numpy as np
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .iec_functions import IecFunctions
class IecInputs(ModelSharedInputs):
"""
Input class for IEC.
"""
def __init__(self):
"""Class representing the inputs for IEC"""
super(IecInputs, self).__init__()
self.dose_response = pd.Series([], dtype="float")
self.lc50 = pd.Series([], dtype="float")
self.threshold = pd.Series([], dtype="float")
class IecOutputs(object):
"""
Output class for IEC.
"""
def __init__(self):
"""Class representing the outputs for IEC"""
super(IecOutputs, self).__init__()
self.out_z_score_f = | pd.Series([], dtype="float", name="out_z_score_f") | pandas.Series |
from .microfaune_package.microfaune.detection import RNNDetector
from .microfaune_package.microfaune import audio
import matplotlib.pyplot as plt
import pandas as pd
import scipy.signal as scipy_signal
import numpy as np
import seaborn as sns
from .IsoAutio import *
def local_line_graph(
local_scores,
clip_name,
sample_rate,
samples,
automated_df=None,
premade_annotations_df=None,
premade_annotations_label="Human Labels",
log_scale=False,
save_fig=False,
normalize_local_scores=False):
"""
Function that produces graphs with the local score plot and spectrogram of
an audio clip. Now integrated with Pandas so you can visualize human and
automated annotations.
Args:
local_scores (list of floats)
- Local scores for the clip determined by the RNN.
clip_name (string)
- Directory of the clip.
sample_rate (int)
- Sample rate of the audio clip, usually 44100.
samples (list of ints)
- Each of the samples from the audio clip.
automated_df (Dataframe)
- Dataframe of automated labelling of the clip.
premade_annotations_df (Dataframe)
- Dataframe labels that have been made outside of the scope of this
function.
premade_annotations_label (string)
- Descriptor of premade_annotations_df
log_scale (boolean)
- Whether the axis for local scores should be logarithmically
scaled on the plot.
save_fig (boolean)
- Whether the clip should be saved in a directory as a png file.
Returns:
None
"""
# Calculating the length of the audio clip
duration = samples.shape[0] / sample_rate
# Calculating the number of local scores outputted by Microfaune
num_scores = len(local_scores)
# the case for normalizing the local scores between [0,1]
if normalize_local_scores:
local_scores_max = max(local_scores)
for ndx in range(num_scores):
local_scores[ndx] = local_scores[ndx] / local_scores_max
# Making sure that the local score of the x-axis are the same across the
# spectrogram and the local score plot
step = duration / num_scores
time_stamps = np.arange(0, duration, step)
if len(time_stamps) > len(local_scores):
time_stamps = time_stamps[:-1]
# general graph features
fig, axs = plt.subplots(2)
fig.set_figwidth(22)
fig.set_figheight(10)
fig.suptitle("Spectrogram and Local Scores for " + clip_name)
# score line plot - top plot
axs[0].plot(time_stamps, local_scores)
axs[0].set_xlim(0, duration)
if log_scale:
axs[0].set_yscale('log')
else:
axs[0].set_ylim(0, 1)
axs[0].grid(which='major', linestyle='-')
# Adding in the optional automated labels from a Pandas DataFrame
# if automated_df is not None:
if not automated_df.empty:
ndx = 0
for row in automated_df.index:
minval = automated_df["OFFSET"][row]
maxval = automated_df["OFFSET"][row] + \
automated_df["DURATION"][row]
axs[0].axvspan(xmin=minval, xmax=maxval, facecolor="yellow",
alpha=0.4, label="_" * ndx + "Automated Labels")
ndx += 1
# Adding in the optional premade annotations from a Pandas DataFrame
if not premade_annotations_df.empty:
ndx = 0
for row in premade_annotations_df.index:
minval = premade_annotations_df["OFFSET"][row]
maxval = premade_annotations_df["OFFSET"][row] + \
premade_annotations_df["DURATION"][row]
axs[0].axvspan(
xmin=minval,
xmax=maxval,
facecolor="red",
alpha=0.4,
label="_" *
ndx +
premade_annotations_label)
ndx += 1
axs[0].legend()
# spectrogram - bottom plot
# Will require the input of a pandas dataframe
Pxx, freqs, bins, im = axs[1].specgram(
samples,
Fs=sample_rate,
NFFT=4096,
noverlap=2048,
window=np.hanning(4096),
cmap="ocean")
axs[1].set_xlim(0, duration)
axs[1].set_ylim(0, 22050)
axs[1].grid(which='major', linestyle='-')
# save graph
if save_fig:
plt.savefig(clip_name + "_Local_Score_Graph.png")
# TODO rework function so that instead of generating the automated labels, it
# takes the automated_df as input same as it does with the manual dataframe.
def local_score_visualization(
clip_path,
weight_path=None,
premade_annotations_df=None,
premade_annotations_label="Human Labels",
automated_df=False,
isolation_parameters=None,
log_scale=False,
save_fig=False,
normalize_local_scores=False):
"""
Wrapper function for the local_line_graph function for ease of use.
Processes clip for local scores to be used for the local_line_graph
function.
Args:
clip_path (string)
- Path to an audio clip.
weight_path (string)
- Weights to be used for RNNDetector.
premade_annotations_df (Dataframe)
- Dataframe of annotations to be displayed that have been created
outside of the function.
premade_annotations_label (string)
- String that serves as the descriptor for the premade_annotations
dataframe.
automated_df (Dataframe)
- Whether the audio clip should be labelled by the isolate function
and subsequently plotted.
log_scale (boolean)
- Whether the axis for local scores should be logarithmically
scaled on the plot.
save_fig (boolean)
- Whether the plots should be saved in a directory as a png file.
Returns:
None
"""
# Loading in the clip with Microfaune's built-in loading function
try:
SAMPLE_RATE, SIGNAL = audio.load_wav(clip_path)
except BaseException:
print("Failure in loading", clip_path)
return
# downsample the audio if the sample rate > 44.1 kHz
# Force everything into the human hearing range.
try:
if SAMPLE_RATE > 44100:
rate_ratio = 44100 / SAMPLE_RATE
SIGNAL = scipy_signal.resample(
SIGNAL, int(len(SIGNAL) * rate_ratio))
SAMPLE_RATE = 44100
except BaseException:
print("Failure in downsampling", clip_path)
return
# Converting to Mono if Necessary
if len(SIGNAL.shape) == 2:
# averaging the two channels together
SIGNAL = SIGNAL.sum(axis=1) / 2
# Initializing the detector to baseline or with retrained weights
if weight_path is None:
# Microfaune RNNDetector class
detector = RNNDetector()
else:
try:
# Initializing Microfaune hybrid CNN-RNN with new weights
detector = RNNDetector(weight_path)
except BaseException:
print("Error in weight path:", weight_path)
return
try:
# Computing Mel Spectrogram of the audio clip
microfaune_features = detector.compute_features([SIGNAL])
# Running the Mel Spectrogram through the RNN
global_score, local_score = detector.predict(microfaune_features)
except BaseException:
print(
"Skipping " +
clip_path +
" due to error in Microfaune Prediction")
# In the case where the user wants to look at automated bird labels
if premade_annotations_df is None:
premade_annotations_df = | pd.DataFrame() | pandas.DataFrame |
import copy
from io import StringIO
import numpy as np
import pandas as pd
from django import forms
from django.core.exceptions import ValidationError
from django.forms.widgets import RadioSelect, Select, Textarea, TextInput
from pandas.errors import ParserError
from core.utils.util import md5_hash
from .models import Label, Project, ProjectPermissions
def clean_data_helper(data, supplied_labels, metadata_fields=[]):
ALLOWED_TYPES = [
"text/csv",
"text/tab-separated-values",
"application/vnd.ms-excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.openxmlformats-officedocument.spreadsheetml.template",
"application/vnd.ms-excel.sheet.macroenabled.12",
"application/vnd.ms-excel.template.macroenabled.12",
"application/vnd.ms-excel.addin.macroenabled.12",
"application/vnd.ms-excel.sheet.binary.macroenabled.12",
]
REQUIRED_HEADERS = ["Text", "Label"]
MAX_FILE_SIZE = 500 * 1000 * 1000
if data.size > MAX_FILE_SIZE:
raise ValidationError(
"File is too large. Received {0} but max size is {1}.".format(
data.size, MAX_FILE_SIZE
)
)
try:
if data.content_type == "text/tab-separated-values":
data = pd.read_csv(
StringIO(data.read().decode("utf8", "ignore")),
sep="\t",
dtype=str,
).dropna(axis=0, how="all")
elif data.content_type == "text/csv":
data = pd.read_csv(
StringIO(data.read().decode("utf8", "ignore")),
dtype=str,
).dropna(axis=0, how="all")
elif data.content_type.startswith("application/vnd") and data.name.endswith(
".csv"
):
data = pd.read_csv(
StringIO(data.read().decode("utf8", "ignore")),
dtype=str,
).dropna(axis=0, how="all")
elif data.content_type.startswith("application/vnd") and data.name.endswith(
".xlsx"
):
data = | pd.read_excel(data, dtype=str) | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 16:06:04 2020
@author: ryancrisanti
"""
from .prediction import Prediction
from .account import Account
from .utilities import format_name, save, load
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
class Comparison:
def __init__(self, predictions=[], accounts=[], name=''):
self.name = format_name(name)
self._predictions = list(predictions)
self._accounts = list(accounts)
@property
def predictions(self):
return self._predictions
@property
def named_predictions(self):
return {p.name: p for p in self.predictions}
@property
def accounts(self):
return self._accounts
@property
def named_accounts(self):
return {a.name: a for a in self.accounts}
@property
def acnt_values(self):
if len(self.accounts) == 0:
return pd.DataFrame()
else:
return pd.concat([acc.value for acc in self.accounts], axis=1)
@property
def tot_acnt_value(self):
'''Resolves unmatched dates with forward filling'''
vals = self.acnt_values.fillna(method='ffill').dropna().sum(axis=1)
vals.rename('TotalAccountsValue', inplace=True)
return vals
def add_prediction(self, pred):
if not isinstance(pred, Prediction):
raise TypeError
self._predictions.append(pred)
def add_predictions(self, preds):
for pred in preds:
self.add_prediction(pred)
def add_account(self, acct):
if not isinstance(acct, Account):
raise TypeError
self._accounts.append(acct)
def add_accounts(self, accts):
for acct in accts:
self.add_account(acct)
def pred_diffs(self, date_method='linear', linear_freq='D',
linear_start='account', linear_end='account'):
'''
Parameters
----------
date_method : str, optional
Can be either "linear" or "account".
* linear (by day, week, month, etc.)
* account (use dates in tot_acnt_value index)
The default is 'linear'.
linear_freq : str, optional
If `date_method` is "linear", this determines the frequency.
Otherwise, this is ignored. The default is 'D'.
linear_start : pd.Timestamp or 'account', optional
If `date_method` is "linear", this determines the start date.
Keyword 'account' means to use the first date in tot_acnt_value
index. The default is 'account'.
linear_end : pd.Timestamp or 'account', optional
If `date_method` is "linear", this determines the end date.
Keyword 'account' means to use the last date in tot_acnt_value
index. The default is 'account'.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
diffs : TYPE
DESCRIPTION.
'''
if date_method == 'linear':
if linear_start == 'account':
linear_start = self.tot_acnt_value.index.min()
if linear_end == 'account':
linear_end = self.tot_acnt_value.index.max()
index = pd.date_range(linear_start, linear_end, freq=linear_freq)
elif date_method == 'account':
index = self.tot_acnt_value.index
else:
raise ValueError
# Simulate all predictions
for pred in self.predictions:
pred.project(dates=index, end_date=None, time_granularity=None)
# Make df with all preds & total acct
df = | pd.DataFrame(self.tot_acnt_value) | pandas.DataFrame |
"""
Backs up ToodleDo
"""
import sys
import os
import requests
import yaml
import pandas as pd
from getpass import getpass
from requests_oauthlib import OAuth2Session
import requests
import urllib
import json
import logging
# TODO modify redirection URI? Localhost is a bit weird, there might be something running there.
# So, just play around with possibilities and see what works.
# TODO create a dummy user account and try to restore info there
# TODO Add writing scope
# TODO Commons with constants? Make sure the script is runnable form anywhere
CUR_FILE_DIR = os.path.dirname(os.path.realpath(__file__))+os.path.sep
API_URL_PREFIX = "http://api.toodledo.com/3/"
GET_URL_POSTFIX = '/get.php'
# Tasks: http://api.toodledo.com/3/tasks/index.php
DEFAULT_TASK_FIELDS = ["id", "title", "modified", "completed"]
OPTIONAL_TASK_FIELDS = ["folder", "context", "goal", "location", "tag", "startdate", "duedate",
"duedatemod", "starttime", "duetime", "remind", "repeat", "status", "star", "priority",
"length", "timer", "added", "note", "parent", "children", "order", "meta", "previous",
"attachment", "shared", "addedby", "via", "attachments"]
DEFAULT_FOLDER_FIELDS = ["id","name","private","archived","ord"]
DEFAULT_CONTEXT_FIELDS = ["id","name","private"]
DEFAULT_GOAL_FIELDS = ["id","name","level","archived","contributes","note"]
DEFAULT_LOCATION_FIELDS = ["id","name","description","lat","lon"]
DEFAULT_NOTES_FIELDS = ["id","title","modified","added","folder","private","text"]
LIST_ROW_DEFAULT_FIELDS=["id","added","modified","version","list","cells"]
LIST_COL_DEFAULT_FIELDS=["id","title","type","sort","width"]
AUTHORIZATION_URL = "https://api.toodledo.com/3/account/authorize.php"
TOKEN_URL = 'https://api.toodledo.com/3/account/token.php'
TOKEN_FILENAME = CUR_FILE_DIR+"token.txt"
CONFIG_FILENAME = CUR_FILE_DIR+"config.yaml"
CLIENT_ID_FIELD = 'CLIENT_ID'
CLIENT_SECRET_FIELD = 'CLIENT_SECRET'
REDIRECT_URL_FIELD = 'REDIRECT_URL'
BACKUP_FOLDER_FIELD = 'BACKUP_FOLDER'
ALL_SCOPES = ["basic","folders", "tasks","notes","outlines","lists"]
def get_token_response(request_body):
access_token, refresh_token = None, None
token_response = requests.post(TOKEN_URL, data = request_body)
if token_response.status_code == 200:
token_dict = json.loads(token_response.text)
if "access_token" in token_dict:
access_token = token_dict["access_token"]
if "refresh_token" in token_dict:
refresh_token = token_dict["refresh_token"]
else:
logging.warning("Failed to refresh. Status: %d. Result:\n%s",
token_response.status_code, str(token_response.text))
return access_token, refresh_token
def get_authorization_response(config, oauth):
authorization_url, state = oauth.authorization_url(AUTHORIZATION_URL)
# Here print is intended. We are working with console.
print('Please go to thir URL and authorize access:')
print(authorization_url)
authorization_response = input('Enter the full callback URL: ')
return authorization_response
def refresh_tokens(config, access_token, refresh_token):
# If failed to refresh, we'll be OK anyway
body = {'client_id': config[CLIENT_ID_FIELD],
'client_secret': config[CLIENT_SECRET_FIELD],
'redirect_uri': config[REDIRECT_URL_FIELD],
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
try:
new_access_token, new_refresh_token = get_token_response(body)
if new_access_token is None:
new_access_token = access_token
logging.info("Keeping old access token: %s", new_access_token)
else:
logging.info("New access token: %s", new_access_token)
if new_refresh_token is None:
new_refresh_token = refresh_token
logging.info("Keeping old refresh token: %s", new_refresh_token)
else:
logging.info("New refresh token: %s", new_refresh_token)
except Exception as e:
logging.warning("Failed to refresh. Might still be OK with old token.", str(e))
new_access_token, new_refresh_token = access_token, refresh_token
return new_access_token, new_refresh_token
def get_tokens_from_scratch(config):
oauth = OAuth2Session(config[CLIENT_ID_FIELD],
redirect_uri=config[REDIRECT_URL_FIELD],
scope=ALL_SCOPES)
authorization_response = get_authorization_response(config, oauth)
connection_success=False
first_time = True
while not connection_success:
try:
if not first_time:
logging.info("Trying to reconnect...")
authorization_response = get_authorization_response(config, oauth)
first_time = False
code = urllib.parse.parse_qs(
urllib.parse.urlsplit(authorization_response).query
)["code"][0]
# Just could not get in OAuth. It kept throwing
# "(missing_token) Missing access token parameter"
# Well, let's just get it working manually then.
body = {'client_id': config[CLIENT_ID_FIELD],
'client_secret': config[CLIENT_SECRET_FIELD],
'code': code,
'redirect_uri': config[REDIRECT_URL_FIELD],
'grant_type': 'authorization_code',
'authorization_response': authorization_response,
}
access_token, refresh_token = get_token_response(body)
connection_success = (access_token is not None)and(refresh_token is not None)
except Exception as e:
logging.warning("Token fetch failed: %s", str(e))
# TODO prevent infinite loop here? Prompt after error?
# Limit the number of retries? Parametrize?
return access_token, refresh_token
def save_tokens(access_token, refresh_token):
with open(TOKEN_FILENAME,"wt") as f:
f.write(access_token+"\n"+refresh_token)
logging.info("Saved tokens")
def get_tokens(config):
access_token = None
refresh_token = None
if os.path.isfile(TOKEN_FILENAME):
with open(TOKEN_FILENAME,"rt") as f:
s = f.read().split('\n')
if len(s) == 2:
access_token, refresh_token = s[0], s[1]
logging.info("Access token from file: %s", access_token)
logging.info("Refresh token from file: %s",refresh_token)
access_token, refresh_token = refresh_tokens(config, access_token, refresh_token)
if access_token is None or refresh_token is None:
access_token, refresh_token = get_tokens_from_scratch(config)
logging.info("Obtained tokens successfully")
logging.info("Final access token: %s", access_token)
logging.info("Final refresh token: %s",refresh_token)
return access_token, refresh_token
def generic_get_and_backup(access_token: str, parameter_name: str,
default_fields: list, optional_fields: list = [],
filename: str=None, readable_table_name: str=None,
url_additions: dict={}, start_from=0, return_json: bool=False):
result_df = pd.DataFrame(columns=default_fields+optional_fields)
readable_table_name = \
readable_table_name if readable_table_name is not None else parameter_name
url = API_URL_PREFIX + parameter_name + GET_URL_POSTFIX
try:
# TODO consider parameters: after=1234567890&f=xml
data = {'access_token': access_token}
if len(optional_fields)>0:
data['fields'] = ",".join(optional_fields)
for i in url_additions:
data[i] = url_additions[i]
response = requests.post(url, data = data)
if response.status_code == 200:
result_json_parsed = json.loads(response.text)
if type(result_json_parsed) == list:
if len(result_json_parsed) > start_from:
result_df = pd.DataFrame(result_json_parsed[start_from:]) # 0 is num and total
logging.info("Read %s successfully", readable_table_name)
else:
logging.info("List of %s is empty", readable_table_name)
else:
logging.warning("Failed to read %s. Response body: %s",
readable_table_name, result_json_parsed)
else:
logging.warning(
"Failed to read %s. Response status code: %d.\n Detailed response: %s",
readable_table_name, response.status_code, str(response.text))
except Exception as e:
logging.warning("Failed to list %s: %s", readable_table_name, str(e))
if filename is not None:
try:
result_df.to_csv(filename, index=False)
logging.info("Saved %s successfully", readable_table_name)
except Exception as e:
logging.warning("Failed to backup %s: %s", readable_table_name, str(e))
else:
logging.info("No filename provided. Not saving %s.", readable_table_name)
if return_json:
return result_df, result_json_parsed
return result_df
def get_raw_tasks(access_token):
"""
Raw tasks contain some fields in human-unreadable form. For example, folder or context.
"""
return generic_get_and_backup(access_token=access_token, parameter_name='tasks',
default_fields=DEFAULT_TASK_FIELDS, optional_fields=OPTIONAL_TASK_FIELDS,
readable_table_name="raw tasks", start_from=1)
def get_and_backup_folders(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='folders', default_fields=DEFAULT_FOLDER_FIELDS)
def get_and_backup_contexts(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='contexts', default_fields=DEFAULT_CONTEXT_FIELDS)
def get_and_backup_goals(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='goals', default_fields=DEFAULT_GOAL_FIELDS)
def get_and_backup_locations(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='locations', default_fields=DEFAULT_LOCATION_FIELDS)
def get_and_backup_notes(access_token, filename):
return generic_get_and_backup(access_token=access_token, filename=filename,
parameter_name='notes', default_fields=DEFAULT_NOTES_FIELDS)
def backup_list_details(access_token, list_info, lists_path):
list_col_df = pd.DataFrame(list_info["cols"])
try:
list_col_df.to_csv(lists_path+"cols_list_"+str(list_info["id"])+".csv", index=False)
logging.info("Saved list %s columns successfully", list_info["id"])
except Exception as e:
logging.warning("Failed to backup list %s columns: %s", list_info["id"], str(e))
#http://api.toodledo.com/3/rows/get.php?access_token=yourtoken&after=1234567890&list=1234567890
list_row_df, row_json =generic_get_and_backup(
access_token=access_token,
parameter_name='rows',
default_fields=LIST_ROW_DEFAULT_FIELDS,
filename=lists_path+"rows_list_"+str(list_info["id"])+".csv",
url_additions={"list": list_info["id"]},
return_json=True)
row_ids = list()
col_ids = list()
values = list()
if len(list_row_df) > 0:
for i in range(len(row_json)):
for j in range(len(row_json[i]["cells"])):
if ("c"+str(j+1)) in row_json[i]["cells"]:
values.append(row_json[i]["cells"]["c"+str(j+1)])
else:
values.append(None)
col_ids.append(list_info["cols"][j]["id"])
row_ids.append(row_json[i]["id"])
list_cell_df = pd.DataFrame({"value": values, "row_id": row_ids, "column_ids": col_ids})
else:
list_cell_df = pd.DataFrame({"value": [], "row_id": [], "column_ids": []})
list_cell_df["list_id"] = list_info["id"]
list_row_df["list_id"] = list_info["id"]
list_col_df["list_id"] = list_info["id"]
return list_row_df, list_col_df, list_cell_df
def get_and_backup_lists(access_token, backup_path):
result_df = pd.DataFrame(columns=["id","added","modified","title","version","note","keywords","rows"])
url = API_URL_PREFIX + "lists" + GET_URL_POSTFIX
all_list_rows = None
all_list_cols = None
all_list_cells = None
try:
# TODO consider parameters: after=1234567890&f=xml
data = {'access_token': access_token}
response = requests.post(url, data = data)
if response.status_code == 200:
result_json_parsed = json.loads(response.text)
lists_path = backup_path+"Lists"+os.path.sep
if not os.path.isdir(lists_path):
logging.info("Lists directory did not exist. Creating...")
os.mkdir(lists_path)
if type(result_json_parsed) == list:
if len(result_json_parsed) > 0:
for i in result_json_parsed:
cur_list_rows, cur_list_cols, cur_list_cells = backup_list_details(access_token, i, lists_path)
if all_list_rows is None:
all_list_rows = cur_list_rows
else:
all_list_rows = all_list_rows.append(cur_list_rows, ignore_index=True)
if all_list_cols is None:
all_list_cols = cur_list_cols
else:
all_list_cols = all_list_cols.append(cur_list_cols, ignore_index=True)
if all_list_cells is None:
all_list_cells = cur_list_cells
else:
all_list_cells = all_list_cells.append(cur_list_cells, ignore_index=True)
del i["cols"]
result_df = pd.DataFrame(result_json_parsed)
logging.info("Read lists successfully")
else:
logging.info("List of lists is empty")
else:
logging.warning("Failed to read lists. Response body: %s", result_json_parsed)
else:
logging.warning(
"Failed to read lists. Response status code: %d.\n Detailed response: %s",
response.status_code, str(response.text))
except Exception as e:
logging.warning("Failed to lists lists: %s", str(e))
try:
result_df.to_csv(backup_path+'lists.csv', index=False)
logging.info("Saved lists successfully")
except Exception as e:
logging.warning("Failed to backup lists: %s", str(e))
try:
all_list_rows.to_csv(backup_path+'lists_rows.csv', index=False)
logging.info("Saved all list rows successfully")
except Exception as e:
logging.warning("Failed to backup list rows: %s", str(e))
try:
all_list_cols.to_csv(backup_path+'lists_cols.csv', index=False)
logging.info("Saved all list columns successfully")
except Exception as e:
logging.warning("Failed to backup list columns: %s", str(e))
try:
all_list_cells.to_csv(backup_path+'lists_cells.csv', index=False)
logging.info("Saved all list cells successfully")
except Exception as e:
logging.warning("Failed to backup list cells: %s", str(e))
return result_df, all_list_rows, all_list_cols
def get_and_backup_outlines(access_token, backup_path):
all_outline_rows = None
result_df = pd.DataFrame(columns=["id","added","modified","title","hidden","version","note","keywords","count", "updated_at"])
url = API_URL_PREFIX + "outlines" + GET_URL_POSTFIX
try:
# TODO consider parameters: after=1234567890&f=xml
data = {'access_token': access_token}
response = requests.post(url, data = data)
if response.status_code == 200:
result_json_parsed = json.loads(response.text)
outlines_path = backup_path+"Outlines"+os.path.sep
if not os.path.isdir(outlines_path):
logging.info("Outlines directory did not exist. Creating...")
os.mkdir(outlines_path)
if type(result_json_parsed) == list:
if len(result_json_parsed) > 0:
for i in result_json_parsed:
try:
cur_outline_df = | pd.DataFrame(i["outline"]["children"]) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
from utils import Config
from sklearn.model_selection import train_test_split
# dataset
data_dir = Path("data")
train = | pd.read_csv(data_dir / "kor_pair_train.csv") | pandas.read_csv |
from django.shortcuts import render
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from core import forms
import numpy as np
import pandas as pd
# Create your views here.
class HomePageView(TemplateView):
template_name = 'core/index.html'
form_class = forms.MainForm
@property
def reference_index(self):
return np.array([
"Asus A455LA",
"Asus X441UV",
"Asus VIVOBOOK",
"Asus Zenbook",
])
@property
def reference_header(self):
return np.array([
"Speed Processor",
"VGA",
"Layar",
"RAM",
"Jenis HD",
"Harga",
])
@property
def reference_header_function(self):
return np.array([max, max, max, max, max, min])
@property
def reference_data(self):
return np.array([
[8, 2, 7, 8, 7.5, 10],
[8, 2, 6, 9, 8, 6],
[10, 7, 7, 10, 9, 5],
[5, 6, 5, 4, 7, 10],
], dtype=float)
# def get_weights(self):
# form = forms.MainForm(request.GET)
# if form.is_valid():
# choice = form.cleaned_data['case']
# if case == 1:
# return np.array([[7.5, 2.5, 5.0, 7.0, 5.0, 0.0]])
# elif case == 2:
# return np.array([[7.5, 2.5, 5.0, 3.5, 5.0, 6.125]])
# elif case == 3:
# return np.array([[10.0, 10.0, 5.0, 10.0, 5.0, 6.125]])
# elif case ==
def solve(self):
form = forms.MainForm(self.request.GET)
if form.is_valid():
case = form.cleaned_data['case']
if case == "0":
focus = [0, 3, 5]
weights = np.array([0.6, 0.6, 10.0])
elif case == "1":
focus = [0, 3, 5]
weights = np.array([0.4, 0.4, 0.375])
elif case == "2":
focus = list(range(6))
weights = np.array([10., 10., 7.5, 9.0, 6.0, 0.375])
else:
focus = [0, 3, 4]
weights = np.array([0.3, 0.4, 0.7])
focus_table = self.reference_data[:, focus]
focus_header = self.reference_header[focus]
normalized_table = np.zeros(focus_table.shape)
for enu, col in enumerate(focus):
f = self.reference_header_function[col]
val = f(focus_table[:, enu])
if f is max:
normalized_table[:, enu] = focus_table[:, enu] / val
else:
normalized_table[:, enu] = val / focus_table[:, enu]
score = np.sum(normalized_table * weights, axis=1)
wut = np.max(score)
suggestion = np.argmax(score)
index = self.reference_index
return {
'case': case,
'focus': focus,
'weights': pd.DataFrame(weights[np.newaxis], columns=focus_header),
'table': pd.DataFrame(self.reference_data, index=index, columns=self.reference_header),
'focus_table': pd.DataFrame(focus_table, index=index, columns=focus_header),
'normalized_table': | pd.DataFrame(normalized_table, index=index, columns=focus_header) | pandas.DataFrame |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import resample
from sklearn.metrics import mean_squared_error
import math
# from .models.DeepCOVID import DeepCOVID
from models.DeepCOVID import DeepCOVID
# params
#N_SAMPLES = 20
#N=3 # stochastic repetitions for each combination of prev predictions
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from joblib import Parallel, delayed
from joblib import wrap_non_picklable_objects
import os
device = torch.device("cpu")
dtype = torch.float
train_since_23_death = ['NJ','NY']
train_since_23 = ['NJ','AK','IN','MA', 'WA', 'WV']
train_since_29 = ['AL']
train_since_35 = ['AZ','KY','NE']
def get_region_data(datapath,next,region,target_name, include_col, exclude_col, n_samples,bootstrap=False,preds=None):
'''
Get regional data, will open only the file for that week
@param target_name: 'death' or 'hosp'
@param next: 1 or 2
@param datapath: e.g. "./data/merged-data3.csv"
@param check_train_data: to visualize data X,y,X_test
@param preds: list of 1 or more past predictions
'''
start_col =4
# NOTE: change this later
df = pd.read_csv(datapath, header=0)
# df = pd.read_csv("./data/merged-data3.csv", header=0)
# df = df.drop(['deathIncrease','death_jhu_cumulative'],axis=1)
if target_name=='hosp':
if region in train_since_23:
df = df[(df.loc[:,'epiweek'] >= 23)]
elif region in train_since_29:
df = df[(df.loc[:,'epiweek'] >= 29)]
elif region in train_since_35:
df = df[(df.loc[:,'epiweek'] >= 35)]
elif target_name=='death':
if region in train_since_23:
df = df[(df.loc[:,'epiweek'] >= 23)]
###Select features columns based on include_col
# print('exclude_col',exclude_col)
for col in exclude_col:
# include_col.remove(col)
if col in include_col:
include_col.remove(col)
main = df.iloc[:,:start_col].copy()
rest = df[include_col].copy()
##Combine the first 4 columns of df and the include_col as a new dataframe
df = | pd.concat([main,rest], axis =1) | pandas.concat |
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import sklearn
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
# In[ ]:
def filtering(level): # filter class data based on level which students response
db=pd.read_csv('/Users/surin/Documents/201720723/3-1/도메인분석및SW설계/Final/rcmd/finalAcademy.csv', engine='python') # load database
filtered=db[db.level==level] # filter class data
data=filtered.values.tolist() # change dataframe to list
return data
# In[ ]:
def getRecommendation(data, preference):
pref=[preference] # pref is the list based on students preference
std= | pd.DataFrame(pref,columns=['class_size','tuition','careerOfTeacher','ageDistribution']) | pandas.DataFrame |
# coding: utf-8
# # Parameter Calibration
# This notebook describes a mathematical framework for selecting policy parameters - namely the emissions intensity baseline and permit price. Please be aware of the following key assumptions underlying this model:
#
# * Generators bid into the market at their short-run marginal cost (SRMC);
# * the market for electricity is perfectly competitive;
# * the policy maker is able to directly control the emissions intensity baseline and permit price.
#
# Steps taken to conduct the analysis:
# 1. Import packages and declare declare paths to files
# 2. Load data
# 3. Organise data
# 6. Construct model used to select scheme parameters. The model consists of three blocks of equations:
# * Primal block - contains constraints related to a standard DCOPF model;
# * Dual block - dual constraints associated with dual program of standard DCOPF model;
# * Strong duality constraint block - block of constraints linking primal and dual objectives.
# 7. Run DCOPF model to find business-as-usual emissions and wholesale prices.
# 8. Run model used to select policy parameters, save output
#
# ## Import packages
# In[1]:
import os
import re
import time
import pickle
import random
from math import pi
import numpy as np
import pandas as pd
import datetime as dt
from pyomo.environ import *
import matplotlib.pyplot as plt
# Seed random number generator
np.random.seed(seed=10)
# ## Declare paths to files
# In[2]:
# Identifier used to update paths depending on the number of scenarios investigated
number_of_scenarios = '100_scenarios'
class DirectoryPaths(object):
"Paths to relevant directories"
def __init__(self):
self.data_dir = os.path.join(os.path.curdir, os.path.pardir, os.path.pardir, 'data')
self.scenarios_dir = os.path.join(os.path.curdir, os.path.pardir, '1_create_scenarios')
self.output_dir = os.path.join(os.path.curdir, 'output', number_of_scenarios)
paths = DirectoryPaths()
# ## Model data
# ### Input data
# In[3]:
class RawData(object):
"Collect input data"
def __init__(self):
# Paths to directories
DirectoryPaths.__init__(self)
# Network data
# ------------
# Nodes
self.df_n = pd.read_csv(os.path.join(self.data_dir, 'network_nodes.csv'), index_col='NODE_ID')
# AC edges
self.df_e = pd.read_csv(os.path.join(self.data_dir, 'network_edges.csv'), index_col='LINE_ID')
# HVDC links
self.df_hvdc_links = pd.read_csv(os.path.join(self.data_dir, 'network_hvdc_links.csv'), index_col='HVDC_LINK_ID')
# AC interconnector links
self.df_ac_i_links = pd.read_csv(os.path.join(self.data_dir, 'network_ac_interconnector_links.csv'), index_col='INTERCONNECTOR_ID')
# AC interconnector flow limits
self.df_ac_i_limits = pd.read_csv(os.path.join(self.data_dir, 'network_ac_interconnector_flow_limits.csv'), index_col='INTERCONNECTOR_ID')
# Generators
# ----------
# Generating unit information
self.df_g = pd.read_csv(os.path.join(self.data_dir, 'generators.csv'), index_col='DUID', dtype={'NODE': int})
self.df_g['SRMC_2016-17'] = self.df_g['SRMC_2016-17'].map(lambda x: x + np.random.uniform(0, 2))
# Operating scenarios
# -------------------
with open(os.path.join(paths.scenarios_dir, 'output', '{0}.pickle'.format(number_of_scenarios)), 'rb') as f:
self.df_scenarios = pickle.load(f)
# Create object containing raw model data
raw_data = RawData()
# ### Organise data for model
# In[4]:
class OrganiseData(object):
"Organise data to be used in mathematical program"
def __init__(self):
# Load model data
RawData.__init__(self)
def reindex_nodes(self):
# Original node indices
df_index_map = self.df_n.index.to_frame().rename(columns={'NODE_ID': 'original'}).reset_index().drop('NODE_ID',axis=1)
# New node indices
df_index_map['new'] = df_index_map.apply(lambda x: x.name + 1, axis=1)
# Create dictionary mapping original node indices to new node indices
index_map = df_index_map.set_index('original')['new'].to_dict()
# Network nodes
# -------------
# Construct new index and assign to dataframe
new_index = pd.Index(self.df_n.apply(lambda x: index_map[x.name], axis=1), name=self.df_n.index.name)
self.df_n.index = new_index
# Network edges
# -------------
# Reindex 'from' and 'to' nodes in network edges dataframe
def _reindex_from_and_to_nodes(row, order=False):
"""Re-index 'from' and 'to' nodes. If required, change node order such that 'from' node index < 'to' node index"""
# Original 'from' and 'to' nodes
n_1 = index_map[row['FROM_NODE']]
n_2 = index_map[row['TO_NODE']]
if order:
# If original 'from' node index is less than original 'to' node index keep same order, else reverse order
if n_1 < n_2:
f, t = n_1, n_2
else:
f, t = n_2, n_1
return pd.Series({'FROM_NODE': f, 'TO_NODE': t})
else:
return pd.Series({'FROM_NODE': n_1, 'TO_NODE': n_2})
self.df_e[['FROM_NODE', 'TO_NODE']] = self.df_e.apply(_reindex_from_and_to_nodes, args=(True,), axis=1)
# Sort lines by 'from' and 'to' node indices
self.df_e.sort_values(by=['FROM_NODE', 'TO_NODE'], inplace=True)
# Generators
# ----------
self.df_g['NODE'] = self.df_g['NODE'].map(lambda x: df_index_map.set_index('original')['new'].loc[x])
# Network HVDC links
# ------------------
self.df_hvdc_links[['FROM_NODE', 'TO_NODE']] = self.df_hvdc_links.apply(_reindex_from_and_to_nodes, axis=1)
# Network interconnectors
# -----------------------
self.df_ac_i_links[['FROM_NODE', 'TO_NODE']] = self.df_ac_i_links.apply(_reindex_from_and_to_nodes, axis=1)
# Operating scenarios
# -------------------
df_temp = self.df_scenarios.reset_index()
df_temp['NODE_ID'] = df_temp.apply(lambda x: index_map[x['NODE_ID']] if type(x['NODE_ID']) == int else x['NODE_ID'], axis=1)
self.df_scenarios = df_temp.set_index(['level', 'NODE_ID']).T
reindex_nodes(self)
def get_admittance_matrix(self):
"Construct admittance matrix for network"
# Initialise dataframe
df_Y = pd.DataFrame(data=0j, index=self.df_n.index, columns=self.df_n.index)
# Off-diagonal elements
for index, row in self.df_e.iterrows():
fn, tn = row['FROM_NODE'], row['TO_NODE']
df_Y.loc[fn, tn] += - (1 / (row['R_PU'] + 1j * row['X_PU'])) * row['NUM_LINES']
df_Y.loc[tn, fn] += - (1 / (row['R_PU'] + 1j * row['X_PU'])) * row['NUM_LINES']
# Diagonal elements
for i in self.df_n.index:
df_Y.loc[i, i] = - df_Y.loc[i, :].sum()
# Add shunt susceptance to diagonal elements
for index, row in self.df_e.iterrows():
fn, tn = row['FROM_NODE'], row['TO_NODE']
df_Y.loc[fn, fn] += (row['B_PU'] / 2) * row['NUM_LINES']
df_Y.loc[tn, tn] += (row['B_PU'] / 2) * row['NUM_LINES']
return df_Y
def get_HVDC_incidence_matrix(self):
"Incidence matrix for HVDC links"
# Incidence matrix for HVDC links
df = pd.DataFrame(index=self.df_n.index, columns=self.df_hvdc_links.index, data=0)
for index, row in self.df_hvdc_links.iterrows():
# From nodes assigned a value of 1
df.loc[row['FROM_NODE'], index] = 1
# To nodes assigned a value of -1
df.loc[row['TO_NODE'], index] = -1
return df
def get_reference_nodes(self):
"Get reference node IDs"
# Filter Regional Reference Nodes (RRNs) in Tasmania and Victoria.
mask = (model_data.df_n['RRN'] == 1) & (model_data.df_n['NEM_REGION'].isin(['TAS1', 'VIC1']))
reference_node_ids = model_data.df_n[mask].index
return reference_node_ids
def get_generator_node_map(self, generators):
"Get set of generators connected to each node"
generator_node_map = (self.df_g.reindex(index=generators)
.reset_index()
.rename(columns={'OMEGA_G': 'DUID'})
.groupby('NODE').agg(lambda x: set(x))['DUID']
.reindex(self.df_n.index, fill_value=set()))
return generator_node_map
# Create object containing organised model data
model_data = OrganiseData()
# Perturb generator SRMCs by a random number uniformly distributed between 0 and 1. Unique SRMCs assist the solver to find a unique solution.
# In[5]:
model_data.df_g['SRMC_2016-17'] = model_data.df_g['SRMC_2016-17'].map(lambda x: x + np.random.uniform(0, 1))
model_data.df_g['SRMC_2016-17'].head()
# Save generator, node, and scenario information so they can be used in later processing and plotting steps.
# In[6]:
with open(os.path.join(paths.output_dir, 'df_g.pickle'), 'wb') as f:
pickle.dump(model_data.df_g, f)
with open(os.path.join(paths.output_dir, 'df_n.pickle'), 'wb') as f:
pickle.dump(model_data.df_n, f)
with open(os.path.join(paths.output_dir, 'df_scenarios.pickle'), 'wb') as f:
pickle.dump(model_data.df_scenarios, f)
# ## Model
# Wrap optimisation model in function. Pass parameters to solve for different scenarios.
# In[7]:
def run_model(model_type=None, mode=None, tau_list=None, phi_list=None, E_list=None, R_list=None, target_bau_average_price_multiple_list=None, bau_average_price=None, fix_phi=None, fix_tau=None, stream_solver=False):
"""Construct and run model used to calibrate REP scheme parameters
Parameters
----------
model_type : str
Either DCOPF or MPPDC
mode : str
Mode in which to run model. E.g. compute baseline given fixed permit price and price target
tau_list : list
Fixed permit prices for which the model should be run [$/tCO2]
phi_list : list
Fixed emissions intensity baselines for which the model should be run [tCO2/MWh]
E_list : list
Average emissions intensity constraints [tCO2/MWh]
R_list : list
Minimum scheme revenue constraint [$]
target_bau_average_price_multiple_list : list
Wholesale electricity price target as multiple of the business-as-usual (BAU) price [$/MWh]
bau_average_price : float
Business-as-usual average wholesale electricity price [$/MWh]
fix_phi : float
Fixed value of emissions intensity baseline (only applies to DCOPF model)
fix_tau : float
Fixed value of permit price (only applies to DCOPF model)
stream_solver : bool
Indicator if solver output should be streamed to terminal
"""
# Checking model options are correctly inputted
# ---------------------------------------------
if model_type not in ['DCOPF', 'MPPDC']:
raise(Exception("Must specify either 'DCOPF' or 'MPPDC' as the model type"))
if model_type is 'MPPDC' and mode not in ['find_price_targeting_baseline', 'fixed_policy_parameters', 'find_permit_price_and_baseline']:
raise(Exception("If model_type 'MPPDC' specified, must choose from 'find_price_targeting_baseline', 'fixed_policy_parameters', 'find_permit_price_and_baseline'"))
# Initialise model object
# -----------------------
model = ConcreteModel(name='DCOPF')
# Setup solver
# ------------
solver = 'cplex'
solver_io = 'lp'
keepfiles = False
solver_opt = {}
opt = SolverFactory(solver, solver_io=solver_io)
# Sets
# ----
# Operating scenarios
if model_type is 'DCOPF': model.T = Set(initialize=['DUMMY'])
if model_type is 'MPPDC': model.T = Set(initialize=model_data.df_scenarios.index)
# Nodes
model.I = Set(initialize=model_data.df_n.index)
# Network reference nodes (Mainland and Tasmania)
reference_nodes = model_data.get_reference_nodes()
model.N = Set(initialize=reference_nodes)
# AC network edges
df_Y = model_data.get_admittance_matrix()
ac_edges = [(df_Y.columns[i], df_Y.columns[j]) for i, j in zip(np.where(df_Y != 0)[0], np.where(df_Y != 0)[1]) if (i < j)]
model.K = Set(initialize=ac_edges)
# HVDC links
hvdc_incidence_matrix = model_data.get_HVDC_incidence_matrix().T
model.M = Set(initialize=hvdc_incidence_matrix.index)
# Generators - only non-hydro dispatchable plant
mask = (model_data.df_g['SCHEDULE_TYPE'] == 'SCHEDULED') & ~(model_data.df_g['FUEL_CAT'] == 'Hydro')
model.G = Set(initialize=model_data.df_g[mask].index)
# Parameters
# ----------
# Generation lower bound [MW]
def P_MIN_RULE(model, g):
return 0
model.P_MIN = Param(model.G, initialize=P_MIN_RULE)
# Generation upper bound [MW]
def P_MAX_RULE(model, g):
return float(model_data.df_g.loc[g, 'REG_CAP'])
model.P_MAX = Param(model.G, initialize=P_MAX_RULE)
# Voltage angle difference between connected nodes i and j lower bound [rad]
model.VANG_MIN = Param(initialize=float(-pi / 2))
# Voltage angle difference between connected nodes i and j upper bound [rad]
model.VANG_MAX = Param(initialize=float(pi / 2))
# Susceptance matrix [pu]
def B_RULE(model, i, j):
return float(np.imag(df_Y.loc[i, j]))
model.B = Param(model.I, model.I, initialize=B_RULE)
# HVDC incidence matrix
def C_RULE(model, m, i):
return float(hvdc_incidence_matrix.loc[m, i])
model.C = Param(model.M, model.I, initialize=C_RULE)
# HVDC reverse flow from node i to j lower bound [MW]
def H_MIN_RULE(model, m):
return - float(model_data.df_hvdc_links.loc[m, 'REVERSE_LIMIT_MW'])
model.H_MIN = Param(model.M, initialize=H_MIN_RULE)
# HVDC forward flow from node i to j upper bound [MW]
def H_MAX_RULE(model, m):
return float(model_data.df_hvdc_links.loc[m, 'FORWARD_LIMIT_MW'])
model.H_MAX = Param(model.M, initialize=H_MAX_RULE)
# AC power flow limits on branches
def F_MIN_RULE(model, i, j):
return -99999
model.F_MIN = Param(model.K, initialize=F_MIN_RULE, mutable=True)
def F_MAX_RULE(model, i, j):
return 99999
model.F_MAX = Param(model.K, initialize=F_MAX_RULE, mutable=True)
# Adjust power flow limits for major AC interconnectors
for index, row in model_data.df_ac_i_links.drop('VIC1-NSW1').iterrows():
i, j = row['FROM_NODE'], row['TO_NODE']
# Take into account direction of branch flow
if i < j:
model.F_MAX[i, j] = model_data.df_ac_i_limits.loc[index, 'FORWARD_LIMIT_MW']
model.F_MIN[i, j] = - model_data.df_ac_i_limits.loc[index, 'REVERSE_LIMIT_MW']
else:
model.F_MAX[j, i] = model_data.df_ac_i_limits.loc[index, 'REVERSE_LIMIT_MW']
model.F_MIN[j, i] = - model_data.df_ac_i_limits.loc[index, 'FORWARD_LIMIT_MW']
# Generator emissions intensities [tCO2/MWh]
def E_RULE(model, g):
return float(model_data.df_g.loc[g, 'EMISSIONS'])
model.E = Param(model.G, initialize=E_RULE)
# Generator short run marginal costs [$/MWh]
def A_RULE(model, g):
return float(model_data.df_g.loc[g, 'SRMC_2016-17'])
model.A = Param(model.G, initialize=A_RULE)
# System base power [MVA]
model.S = Param(initialize=100)
# Revenue constraint [$] - Initialise to very large negative value (loose constraint)
model.R = Param(initialize=-9e9, mutable=True)
# Target wholsale electricity price [$/MWh]
model.TARGET_PRICE = Param(initialize=30, mutable=True)
# Target REP scheme revenue
model.MIN_SCHEME_REVENUE = Param(initialize=-float(5e9), mutable=True)
# Upper-level program
# -------------------
# Primal variables
# ----------------
# Emissions intensity baseline [tCO2/MWh]
model.phi = Var(initialize=0, within=NonNegativeReals)
# Permit price [$/tCO2]
model.tau = Var(initialize=0)
# Lower-level program
# -------------------
# Primal block (indexed over T) (model.LL_PRIM)
# ---------------------------------------------
def LL_PRIM_RULE(b, t):
# Parameters
# ----------
# Demand at each node (to be set prior to running model)
b.P_D = Param(model.I, initialize=0, mutable=True)
# Intermittent power injection at each node (to be set prior to running model)
b.P_R = Param(model.I, initialize=0, mutable=True)
# Trading interval length [h] - use 1hr if running DCOPF model
if t == 'DUMMY':
b.L = 1
else:
b.L = Param(initialize=float(model_data.df_scenarios.loc[t, ('hours', 'duration')]))
# Variables
# ---------
b.P = Var(model.G)
b.vang = Var(model.I)
b.H = Var(model.M)
# Constraints
# -----------
# Power output lower bound
def P_LB_RULE(b, g):
return model.P_MIN[g] - b.P[g] <= 0
b.P_LB = Constraint(model.G, rule=P_LB_RULE)
# Power output upper bound
def P_UB_RULE(b, g):
return b.P[g] - model.P_MAX[g] <= 0
b.P_UB = Constraint(model.G, rule=P_UB_RULE)
# Voltage angle difference between connected nodes lower bound
def VANG_DIFF_LB_RULE(b, i, j):
return model.VANG_MIN - b.vang[i] + b.vang[j] <= 0
b.VANG_DIFF_LB = Constraint(model.K, rule=VANG_DIFF_LB_RULE)
# Voltage angle difference between connected nodes upper bound
def VANG_DIFF_UB_RULE(b, i, j):
return b.vang[i] - b.vang[j] - model.VANG_MAX <= 0
b.VANG_DIFF_UB = Constraint(model.K, rule=VANG_DIFF_UB_RULE)
# Fix voltage angle = 0 for reference nodes
def VANG_REF_RULE(b, n):
return b.vang[n] == 0
b.VANG_REF = Constraint(model.N, rule=VANG_REF_RULE)
# Map between nodes and generators connected to each node
generator_node_map = model_data.get_generator_node_map([g for g in model.G])
# Nodal power balance constraint
def POWER_BALANCE_RULE(b, i):
# Branches connected to node i
K_i = [k for k in model.K if i in k]
# Nodes connected to node i
I_i = [ii for branch in K_i for ii in branch if (ii != i)]
return (-model.S * sum(model.B[i, j] * (b.vang[i] - b.vang[j]) for j in I_i)
- sum(model.C[m, i] * b.H[m] for m in model.M)
- b.P_D[i]
+ sum(b.P[g] for g in generator_node_map.loc[i] if g in model.G)
+ b.P_R[i] == 0)
b.POWER_BALANCE = Constraint(model.I, rule=POWER_BALANCE_RULE)
# AC branch flow limits upper bound
def AC_FLOW_LB_RULE(b, i, j):
return model.F_MIN[i, j] - model.S * model.B[i, j] * (b.vang[i] - b.vang[j]) <= 0
b.AC_FLOW_LB = Constraint(model.K, rule=AC_FLOW_LB_RULE)
# AC branch flow limits lower bound
def AC_FLOW_UB_RULE(b, i, j):
return model.S * model.B[i, j] * (b.vang[i] - b.vang[j]) - model.F_MAX[i, j] <= 0
b.AC_FLOW_UB = Constraint(model.K, rule=AC_FLOW_UB_RULE)
# HVDC branch flow limits lower bound
def HVDC_FLOW_LB_RULE(b, m):
return model.H_MIN[m] - b.H[m] <= 0
b.HVDC_FLOW_LB = Constraint(model.M, rule=HVDC_FLOW_LB_RULE)
# HVDC branch flow limits upper bound
def HVDC_FLOW_UB_RULE(b, m):
return b.H[m] - model.H_MAX[m] <= 0
b.HVDC_FLOW_UB = Constraint(model.M, rule=HVDC_FLOW_UB_RULE)
# Dual block (indexed over T) (model.LL_DUAL)
# -------------------------------------------
def LL_DUAL_RULE(b, t):
# Variables
# ---------
b.alpha = Var(model.G, within=NonNegativeReals)
b.beta = Var(model.G, within=NonNegativeReals)
b.gamma = Var(model.K, within=NonNegativeReals)
b.delta = Var(model.K, within=NonNegativeReals)
b.zeta = Var(model.N)
b.lambda_var = Var(model.I)
b.kappa = Var(model.K, within=NonNegativeReals)
b.eta = Var(model.K, within=NonNegativeReals)
b.omega = Var(model.M, within=NonNegativeReals)
b.psi = Var(model.M, within=NonNegativeReals)
# Constraints
# -----------
def DUAL_CONS_1_RULE(b, g):
# Node at which generator g is located
f_g = model_data.df_g.loc[g, 'NODE']
# Don't apply scheme to existing hydro plant
if model_data.df_g.loc[g, 'FUEL_CAT'] == 'Hydro':
return model.A[g] - b.alpha[g] + b.beta[g] - b.lambda_var[f_g] == 0
else:
return model.A[g] + ((model.E[g] - model.phi) * model.tau) - b.alpha[g] + b.beta[g] - b.lambda_var[f_g] == 0
b.DUAL_CONS_1 = Constraint(model.G, rule=DUAL_CONS_1_RULE)
def DUAL_CONS_2_RULE(b, i):
# Branches connected to node i
K_i = [k for k in model.K if i in k]
# Nodes connected to node i
I_i = [ii for branch in K_i for ii in branch if (ii != i)]
return (sum( (b.gamma[k] - b.delta[k] + (model.B[k] * model.S * (b.kappa[k] - b.eta[k])) ) * (np.sign(i - k[0]) + np.sign(i - k[1])) for k in K_i)
+ sum(model.S * ((b.lambda_var[i] * model.B[i, j]) - (b.lambda_var[j] * model.B[j, i])) for j in I_i)
+ sum(b.zeta[n] for n in model.N if n == i) == 0)
b.DUAL_CONS_2 = Constraint(model.I, rule=DUAL_CONS_2_RULE)
def DUAL_CONS_3_RULE(b, m):
return sum(b.lambda_var[i] * model.C[m, i] for i in model.I) - b.omega[m] + b.psi[m] == 0
b.DUAL_CONS_3 = Constraint(model.M, rule=DUAL_CONS_3_RULE)
# Strong duality constraints (indexed over T)
# -------------------------------------------
def SD_CONS_RULE(model, t):
return (sum(model.LL_PRIM[t].P[g] * ( model.A[g] + (model.E[g] - model.phi) * model.tau ) if model_data.df_g.loc[g, 'FUEL_CAT'] == 'Fossil' else model.LL_PRIM[t].P[g] * model.A[g] for g in model.G)
== sum(model.LL_PRIM[t].P_D[i] * model.LL_DUAL[t].lambda_var[i] - (model.LL_PRIM[t].P_R[i] * model.LL_DUAL[t].lambda_var[i]) for i in model.I)
+ sum((model.LL_DUAL[t].omega[m] * model.H_MIN[m]) - (model.LL_DUAL[t].psi[m] * model.H_MAX[m]) for m in model.M)
+ sum(model.LL_DUAL[t].alpha[g] * model.P_MIN[g] for g in model.G)
- sum(model.LL_DUAL[t].beta[g] * model.P_MAX[g] for g in model.G)
+ sum((model.VANG_MIN * model.LL_DUAL[t].gamma[k]) - (model.VANG_MAX * model.LL_DUAL[t].delta[k]) + (model.LL_DUAL[t].kappa[k] * model.F_MIN[k]) - (model.LL_DUAL[t].eta[k] * model.F_MAX[k]) for k in model.K))
# Run DCOPF
# ---------
if model_type is 'DCOPF':
# Keep dual variables for DCOPF scenario
model.dual = Suffix(direction=Suffix.IMPORT)
# Build model
model.LL_PRIM = Block(model.T, rule=LL_PRIM_RULE)
# Fix phi and tau
model.phi.fix(fix_phi)
model.tau.fix(fix_tau)
# DCOPF OBJECTIVE
# ---------------
def DCOPF_OBJECTIVE_RULE(model):
return sum(model.LL_PRIM[t].P[g] * (model.A[g] + ((model.E[g] - model.phi) * model.tau) ) if model_data.df_g.loc[g, 'FUEL_CAT'] == 'Fossil' else model.LL_PRIM[t].P[g] * model.A[g] for t in model.T for g in model.G)
model.DCOPF_OBJECTIVE = Objective(rule=DCOPF_OBJECTIVE_RULE, sense=minimize)
# Container to store results
results = []
# Solve model for each time period
for t in model_data.df_scenarios.index:
# Update demand and intermittent power injections at each node
for i in model.I:
# Demand
model.LL_PRIM['DUMMY'].P_D[i] = float(model_data.df_scenarios.loc[t, ('demand', i)])
# Intermittent injections fixed power injections from hydro plant
model.LL_PRIM['DUMMY'].P_R[i] = float(model_data.df_scenarios.loc[t, ('intermittent', i)] + model_data.df_scenarios.loc[t, ('hydro', i)])
# Solve model
r = opt.solve(model, keepfiles=keepfiles, tee=stream_solver, options=solver_opt)
print('Finished solving DCOPF for period {}'.format(t))
# Store model output
model.solutions.store_to(r)
# Convert to DataFrame
try:
df_results = pd.DataFrame(r['Solution'][0])
df_results['SCENARIO_ID'] = t
df_results['FIXED_PHI'] = fix_phi
df_results['FIXED_TAU'] = fix_tau
except:
df_results = 'infeasible'
# If model not infeasible store results in list to be concatenated
if type(df_results) != str:
results.append(df_results)
return pd.concat(results)
# Run MPPDC
# ---------
if model_type is 'MPPDC':
# Build model
print('Building primal block')
model.LL_PRIM = Block(model.T, rule=LL_PRIM_RULE)
print('Building dual block')
model.LL_DUAL = Block(model.T, rule=LL_DUAL_RULE)
print('Building strong duality constraints')
model.SD_CONS = Constraint(model.T, rule=SD_CONS_RULE)
print('Finished building blocks')
# Add revenue constraint to model if values of R are specified
# Note: Exclude existing hydro and renewables from scheme (prevent windfall profits to existing generators)
eligible_gens = [g for g in model.G if model_data.df_g.loc[g, 'FUEL_CAT'] == 'Fossil']
model.R_CONS = Constraint(expr=sum((model.E[g] - model.phi) * model.tau * model.LL_PRIM[t].P[g] * model.LL_PRIM[t].L for t in model.T for g in eligible_gens) >= model.R)
# Dummy variables used to minimise difference between average price and target price
model.x_1 = Var(within=NonNegativeReals)
model.x_2 = Var(within=NonNegativeReals)
# Expressions for total revenue, total demand, and average wholesale price
model.TOTAL_REVENUE = Expression(expr=sum(model.LL_DUAL[t].lambda_var[i] * model.LL_PRIM[t].L * model.LL_PRIM[t].P_D[i] for t in model.T for i in model.I))
model.TOTAL_DEMAND = Expression(expr=sum(model.LL_PRIM[t].L * model.LL_PRIM[t].P_D[i] for t in model.T for i in model.I))
model.AVERAGE_PRICE = Expression(expr=model.TOTAL_REVENUE / model.TOTAL_DEMAND)
# Expression for total emissions and average emissions intensity
model.TOTAL_EMISSIONS = Expression(expr=sum(model.LL_PRIM[t].P[g] * model.LL_PRIM[t].L * model.E[g] for g in model.G for t in model.T))
model.AVERAGE_EMISSIONS_INTENSITY = Expression(expr=model.TOTAL_EMISSIONS / model.TOTAL_DEMAND)
# Expression for net scheme revenue
model.NET_SCHEME_REVENUE = Expression(expr=sum((model.E[g] - model.phi) * model.tau * model.LL_PRIM[t].P[g] * model.LL_PRIM[t].L for g in model.G for t in model.T))
# Constraints used to minimise difference between average wholesale price and target
model.x_1_CONS = Constraint(expr=model.x_1 >= model.AVERAGE_PRICE - model.TARGET_PRICE)
model.x_2_CONS = Constraint(expr=model.x_2 >= model.TARGET_PRICE - model.AVERAGE_PRICE)
# MPPDC objective function
def MPPDC_OBJECTIVE_RULE(model):
return model.x_1 + model.x_2
model.MPPDC_OBJECTIVE = Objective(rule=MPPDC_OBJECTIVE_RULE, sense=minimize)
# Useful functions
# ----------------
def _fix_LLPRIM_vars():
"Fix generator output, voltage angles, and HVDC power flows."
for t in model.T:
for g in model.G:
model.LL_PRIM[t].P[g].fix()
for m in model.M:
model.LL_PRIM[t].H[m].fix()
for i in model.I:
model.LL_PRIM[t].vang[i].fix()
def _unfix_LLPRIM_vars():
"Unfix generator output, voltage angles, and HVDC power flows"
for t in model.T:
for g in model.G:
model.LL_PRIM[t].P[g].unfix()
for m in model.M:
model.LL_PRIM[t].H[m].unfix()
for i in model.I:
model.LL_PRIM[t].vang[i].unfix()
def _store_output(results):
"Store fixed variable values in solutions set of model object"
print('Storing fixed variables')
for t in model.T:
# Store generator output
for g in model.G:
results['Solution'][0]['Variable']['LL_PRIM[{0}].P[{1}]'.format(t, g)] = {'Value': model.LL_PRIM[t].P[g].value}
# Store voltage angles
for i in model.I:
results['Solution'][0]['Variable']['LL_PRIM[{0}].vang[{1}]'.format(t, i)] = {'Value': model.LL_PRIM[t].vang[i].value}
# Store HVDC power flows
for m in model.M:
results['Solution'][0]['Variable']['LL_PRIM[{0}].H[{1}]'.format(t, m)] = {'Value': model.LL_PRIM[t].H[m].value}
return results
# Solve model for each policy parameter scenario
# ----------------------------------------------
print('Updating demand and fixed power injection parameters')
# Initialise dictionary to store model output
results = {}
# Loop through scenarios - initialise parameters for demand and fixed power injections
for t in model.T:
# Loop through nodes
for i in model.I:
# Node demand [MW]
model.LL_PRIM[t].P_D[i] = float(model_data.df_scenarios.loc[t, ('demand', i)])
# Power injections from intermittent sources + hydro [MW]
model.LL_PRIM[t].P_R[i] = float(model_data.df_scenarios.loc[t, ('intermittent', i)] + model_data.df_scenarios.loc[t, ('hydro', i)])
if mode is 'find_price_targeting_baseline':
# Loop through permit prices
for tau in tau_list:
# Loop through price targets
for target_bau_average_price_multiple in target_bau_average_price_multiple_list:
# Loop through revenue targets
for R in R_list:
# Start time
t0 = time.time()
# Fix phi and tau, solve model
model.phi.fix(0)
model.tau.fix(tau)
model.TARGET_PRICE = target_bau_average_price_multiple * bau_average_price
model.R = R
# Solve model for fixed permit price and baseline
r = opt.solve(model, keepfiles=keepfiles, tee=stream_solver, options=solver_opt)
print('Finished first stage')
# Fix lower level primal variables to their current values
_fix_LLPRIM_vars()
# Free phi
model.phi.unfix()
# Re-run model to compute baseline that minimise difference between average price and target
r = opt.solve(model, keepfiles=keepfiles, tee=stream_solver, options=solver_opt)
# Store solutions in results object
model.solutions.store_to(r)
# Add fixed generator and node data to model object
r = _store_output(r)
# Convert results object to DataFrame
try:
df_results = | pd.DataFrame(r['Solution'][0]) | pandas.DataFrame |
"""Utility functions, mostly for internal use."""
import os
import colorsys
import warnings
from urllib.request import urlopen, urlretrieve
from http.client import HTTPException
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.colors as mplcol
import matplotlib.pyplot as plt
__all__ = ["desaturate", "saturate", "set_hls_values",
"despine", "get_dataset_names", "get_data_home", "load_dataset"]
def remove_na(arr):
"""Helper method for removing NA values from array-like.
Parameters
----------
arr : array-like
The array-like from which to remove NA values.
Returns
-------
clean_arr : array-like
The original array with NA values removed.
"""
return arr[pd.notnull(arr)]
def sort_df(df, *args, **kwargs):
"""Wrapper to handle different pandas sorting API pre/post 0.17."""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
try:
return df.sort_values(*args, **kwargs)
except AttributeError:
return df.sort(*args, **kwargs)
def ci_to_errsize(cis, heights):
"""Convert intervals to error arguments relative to plot heights.
Parameters
----------
cis: 2 x n sequence
sequence of confidence interval limits
heights : n sequence
sequence of plot heights
Returns
-------
errsize : 2 x n array
sequence of error size relative to height values in correct
format as argument for plt.bar
"""
cis = np.atleast_2d(cis).reshape(2, -1)
heights = np.atleast_1d(heights)
errsize = []
for i, (low, high) in enumerate(np.transpose(cis)):
h = heights[i]
elow = h - low
ehigh = high - h
errsize.append([elow, ehigh])
errsize = np.asarray(errsize).T
return errsize
def pmf_hist(a, bins=10):
"""Return arguments to plt.bar for pmf-like histogram of an array.
DEPRECATED: will be removed in a future version.
Parameters
----------
a: array-like
array to make histogram of
bins: int
number of bins
Returns
-------
x: array
left x position of bars
h: array
height of bars
w: float
width of bars
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
n, x = np.histogram(a, bins)
h = n / n.sum()
w = x[1] - x[0]
return x[:-1], h, w
def desaturate(color, prop):
"""Decrease the saturation channel of a color by some percent.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
prop : float
saturation channel of color will be multiplied by this value
Returns
-------
new_color : rgb tuple
desaturated color code in RGB tuple representation
"""
# Check inputs
if not 0 <= prop <= 1:
raise ValueError("prop must be between 0 and 1")
# Get rgb tuple rep
rgb = mplcol.colorConverter.to_rgb(color)
# Convert to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# Desaturate the saturation channel
s *= prop
# Convert back to rgb
new_color = colorsys.hls_to_rgb(h, l, s)
return new_color
def saturate(color):
"""Return a fully saturated color with the same hue.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
Returns
-------
new_color : rgb tuple
saturated color code in RGB tuple representation
"""
return set_hls_values(color, s=1)
def set_hls_values(color, h=None, l=None, s=None): # noqa
"""Independently manipulate the h, l, or s channels of a color.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
h, l, s : floats between 0 and 1, or None
new values for each channel in hls space
Returns
-------
new_color : rgb tuple
new color code in RGB tuple representation
"""
# Get an RGB tuple representation
rgb = mplcol.colorConverter.to_rgb(color)
vals = list(colorsys.rgb_to_hls(*rgb))
for i, val in enumerate([h, l, s]):
if val is not None:
vals[i] = val
rgb = colorsys.hls_to_rgb(*vals)
return rgb
def axlabel(xlabel, ylabel, **kwargs):
"""Grab current axis and label it."""
ax = plt.gca()
ax.set_xlabel(xlabel, **kwargs)
ax.set_ylabel(ylabel, **kwargs)
def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or dict, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward). A single value
applies to all spines; a dict can be used to set offset values per
side.
trim : bool, optional
If True, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
try:
val = offset.get(side, 0)
except AttributeError:
val = offset
ax_i.spines[side].set_position(('outward', val))
# Potentially move the ticks
if left and not right:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.minorTicks
)
ax_i.yaxis.set_ticks_position("right")
for t in ax_i.yaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.yaxis.minorTicks:
t.tick2line.set_visible(min_on)
if bottom and not top:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.minorTicks
)
ax_i.xaxis.set_ticks_position("top")
for t in ax_i.xaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.xaxis.minorTicks:
t.tick2line.set_visible(min_on)
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = np.asarray(ax_i.get_xticks())
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = np.asarray(ax_i.get_yticks())
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks)
def _kde_support(data, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
DEPRECATED: will be removed in a future version.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores
def ci(a, which=95, axis=None):
"""Return a percentile range from an array of values."""
p = 50 - which / 2, 50 + which / 2
return np.percentile(a, p, axis)
def sig_stars(p):
"""Return a R-style significance string corresponding to p values.
DEPRECATED: will be removed in a future version.
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
def get_dataset_names():
"""Report available example datasets, useful for reporting issues."""
# delayed import to not demand bs4 unless this function is actually used
from bs4 import BeautifulSoup
http = urlopen('https://github.com/mwaskom/seaborn-data/')
gh_list = BeautifulSoup(http)
return [l.text.replace('.csv', '')
for l in gh_list.find_all("a", {"class": "js-navigation-open"})
if l.text.endswith('.csv')]
def get_data_home(data_home=None):
"""Return a path to the cache directory for example datasets.
This directory is then used by :func:`load_dataset`.
If the ``data_home`` argument is not specified, it tries to read from the
``SEABORN_DATA`` environment variable and defaults to ``~/seaborn-data``.
"""
if data_home is None:
data_home = os.environ.get('SEABORN_DATA',
os.path.join('~', 'seaborn-data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_dataset(name, cache=True, data_home=None, **kws):
"""Load an example dataset from the online repository (requires internet).
This function provides quick access to a small number of example datasets
that are useful for documenting seaborn or generating reproducible examples
for bug reports. It is not necessary for normal usage.
Note that some of the datasets have a small amount of preprocessing applied
to define a proper ordering for categorical variables.
Use :func:`get_dataset_names` to see a list of available datasets.
Parameters
----------
name : str
Name of the dataset (``{name}.csv`` on
https://github.com/mwaskom/seaborn-data).
cache : boolean, optional
If True, try to load from the local cache first, and save to the cache
if a download is required.
data_home : string, optional
The directory in which to cache data; see :func:`get_data_home`.
kws : keys and values, optional
Additional keyword arguments are passed to passed through to
:func:`pandas.read_csv`.
Returns
-------
df : :class:`pandas.DataFrame`
Tabular data, possibly with some preprocessing applied.
"""
path = ("https://raw.githubusercontent.com/"
"mwaskom/seaborn-data/master/{}.csv")
full_path = path.format(name)
if cache:
cache_path = os.path.join(get_data_home(data_home),
os.path.basename(full_path))
if not os.path.exists(cache_path):
urlretrieve(full_path, cache_path)
full_path = cache_path
df = pd.read_csv(full_path, **kws)
if df.iloc[-1].isnull().all():
df = df.iloc[:-1]
# Set some columns as a categorical type with ordered levels
if name == "tips":
df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
if name == "flights":
df["month"] = pd.Categorical(df["month"], df.month.unique())
if name == "exercise":
df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
if name == "titanic":
df["class"] = | pd.Categorical(df["class"], ["First", "Second", "Third"]) | pandas.Categorical |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from datetime import datetime
from sys import stdout
from sklearn.preprocessing import scale
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel
from sklearn.utils.validation import indexable
from sklearn.model_selection import check_cv
from sklearn.metrics.scorer import check_scoring
from sklearn.model_selection._validation import _fit_and_score
from sklearn.externals.joblib import Parallel, delayed
def compute_features():
# Load json data
with open('json_file.json') as data_file:
patients = json.load(data_file)
print("JSON file loaded")
# Features computation
print("Features computation launched")
visits = []
for patient in patients.values():
for i in range(1, len(patient['visits']) + 1):
visits.append(patient['visits'][str(i)])
n_visits = len(visits)
print("n_visits = %s" % n_visits)
# Features DataFrame with encounter_nums index
encounter_nums = [int(visit.get('encounter_num')) for visit in visits]
X = pd.DataFrame(index=encounter_nums)
# Time vector & censoring indicator
print("Adding labels...", end="")
next_visit = [visit.get('next_visit') for visit in visits]
T = np.array([1e10 if str(t) == 'none' else t for t in next_visit]).astype(
int)
end_dates = pd.to_datetime([visit.get('end_date') for visit in visits])
C = pd.to_datetime('2016-01-15 00:00:00') - end_dates
days, seconds = C.days, C.seconds
C = days * 24 + seconds // 3600 # in hours (discrete)
delta = (T <= C).astype(int)
Y = T
Y[delta == 0] = C[delta == 0]
labels = pd.DataFrame({'Y': Y, 'delta': delta}, index=encounter_nums)
X = pd.concat([X, labels], axis=1)
print(" done")
# Basic features
print("Adding basic features...", end="")
# Add also patient_num & encounter_num for future random choice
patient_num, encounter_num = [], []
sex, baseline_HB, genotype_SS, age, transfu_count = [], [], [], [], []
LS_ALONE, LS_INACTIVE, MH_ACS, MH_AVN, MH_DIALISIS = [], [], [], [], []
MH_HEART_FAILURE, MH_ISCHEMIC_STROKE, MH_LEG_ULCER = [], [], []
MH_NEPHROPATHY, MH_PHTN, MH_PRIAPISM, MH_RETINOPATHY = [], [], [], []
OPIOID_TO_DISCHARGE, ORAL_OPIOID, USED_MORPHINE = [], [], []
USED_OXYCODONE, duration, previous_visit, rea = [], [], [], []
for patient in patients.values():
for _ in range(1, len(patient['visits']) + 1):
patient_num.append(patient['patient_num'])
sex.append(1 if int(patient['sex']) == 1 else 0)
baseline_HB.append(patient['baseline_HB'])
genotype_SS.append(patient['genotype_SS'])
for visit in visits:
encounter_num.append(visit.get('encounter_num'))
age.append(visit.get('age'))
rea.append(visit.get('rea'))
LS_ALONE.append(visit.get('LS_ALONE'))
LS_INACTIVE.append(visit.get('LS_INACTIVE'))
MH_ACS.append(visit.get('MH_ACS'))
MH_AVN.append(visit.get('MH_AVN'))
MH_DIALISIS.append(visit.get('MH_DIALISIS'))
MH_HEART_FAILURE.append(visit.get('MH_HEART_FAILURE'))
MH_ISCHEMIC_STROKE.append(visit.get('MH_ISCHEMIC_STROKE'))
MH_LEG_ULCER.append(visit.get('MH_LEG_ULCER'))
MH_NEPHROPATHY.append(visit.get('MH_NEPHROPATHY'))
MH_PHTN.append(visit.get('MH_PHTN'))
MH_PRIAPISM.append(visit.get('MH_PRIAPISM'))
MH_RETINOPATHY.append(visit.get('MH_RETINOPATHY'))
ORAL_OPIOID.append(visit.get('ORAL_OPIOID'))
USED_MORPHINE.append(visit.get('USED_MORPHINE'))
USED_OXYCODONE.append(visit.get('USED_OXYCODONE'))
duration.append(visit.get('duration'))
previous_visit.append(visit.get('previous_visit'))
transfu_count.append(visit.get('transfu_count'))
threshold = 24 * 30 * 18 # 18 months
previous_visit = [0 if (t == 'none' or t > threshold) else 1 for t in
previous_visit]
MH_ACS = [1 if int(x) == 2 else x for x in MH_ACS]
MH_AVN = [1 if int(x) == 2 else x for x in MH_AVN]
MH_DIALISIS = [1 if int(x) == 2 else x for x in MH_DIALISIS]
MH_HEART_FAILURE = [1 if int(x) == 2 else x for x in MH_HEART_FAILURE]
MH_ISCHEMIC_STROKE = [1 if int(x) == 2 else x for x in MH_ISCHEMIC_STROKE]
MH_LEG_ULCER = [1 if int(x) == 2 else x for x in MH_LEG_ULCER]
MH_NEPHROPATHY = [1 if int(x) == 2 else x for x in MH_NEPHROPATHY]
MH_PHTN = [1 if int(x) == 2 else x for x in MH_PHTN]
MH_PRIAPISM = [1 if int(x) == 2 else x for x in MH_PRIAPISM]
MH_RETINOPATHY = [1 if int(x) == 2 else x for x in MH_RETINOPATHY]
X_basic = pd.DataFrame(
{'patient_num': patient_num, 'encounter_num': encounter_num, 'sex': sex,
'genotype_SS': genotype_SS, 'age': age, 'rea': rea,
'LS_INACTIVE': LS_INACTIVE, 'MH_ACS': MH_ACS, 'MH_AVN': MH_AVN,
'MH_DIALISIS': MH_DIALISIS, 'MH_HEART_FAILURE': MH_HEART_FAILURE,
'MH_ISCHEMIC_STROKE': MH_ISCHEMIC_STROKE,
'MH_LEG_ULCER': MH_LEG_ULCER, 'LS_ALONE': LS_ALONE,
'MH_NEPHROPATHY': MH_NEPHROPATHY, 'MH_PHTN': MH_PHTN,
'MH_PRIAPISM': MH_PRIAPISM, 'MH_RETINOPATHY': MH_RETINOPATHY,
'ORAL_OPIOID': ORAL_OPIOID, 'baseline_HB': baseline_HB,
'USED_MORPHINE': USED_MORPHINE, 'USED_OXYCODONE': USED_OXYCODONE,
'duration': duration, 'previous_visit': previous_visit,
'transfu_count': transfu_count},
index=encounter_nums)
X = pd.concat([X, X_basic], axis=1)
print(" done")
# Bio data
print("Adding bio features...", end="")
bio_data, bio_names = pd.DataFrame(), []
for visit in visits:
encounter_num = int(visit.get('encounter_num'))
tmp = pd.DataFrame(index=[encounter_num])
end_date = pd.to_datetime(visit.get('end_date'))
for bio_name, bio_values in visit.get('bio').items():
# keep last value
bio_names.append(bio_name)
values = [val['nval_num'] for val in bio_values.values()]
tmp[bio_name] = values[-1]
# only keep last 48h values
offset = end_date - pd.DateOffset(hours=48)
values, index = [], []
for dic in bio_values.values():
val_time = pd.to_datetime(dic['date_bio'])
if val_time > offset:
values.append(float(dic['nval_num']))
index.append(float(
(val_time - offset) / pd.Timedelta(
'1 hour')))
# if at least 2 pts, add slope
if len(values) > 1:
x, y = index, values
# least-squares
A = np.vstack([np.array(x), np.ones(len(x))]).T
slope, _ = np.linalg.lstsq(A, y)[0]
else:
slope = np.nan
bio_names.append(bio_name + ' slope')
tmp[bio_name + ' slope'] = slope
bio_data = bio_data.append(tmp)
bio_names_count = pd.Series(
bio_names).value_counts() * 100 / n_visits
bio_percentage = 35
bio_param_kept = bio_names_count[bio_names_count > bio_percentage]
bio_data = bio_data[bio_param_kept.index]
print(" done")
X = pd.concat([X, bio_data], axis=1)
# Vital parameters data
print("\nAdding vital parameters features...")
param_no_gp = ['Poids [kg]', 'Taille [cm]',
'Débit O2 [L/min]']
param_gp = ['Fréquence cardiaque [bpm]',
'Fréquence respiratoire [mvt/min]', 'PA max [mmHg]',
'PA min [mmHg]', 'Température [°C]',
'Saturation en oxygène [%]']
plot_curves_for_visits = np.random.randint(1, n_visits + 1, 3)
print("\nPlot Gaussian Processes learned for a few random sampled visits")
vital_parameter_data = | pd.DataFrame() | pandas.DataFrame |
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if engine == "numba":
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = | concat(values, axis=self.axis) | pandas.concat |
import os
import pickle
import pandas as pd
from collections import Counter
from numpy.random import choice
import random
import re
import simplejson
data_dir = '/home/hsinghal/workspace/DB_AS_A_SERVICE/input_data'
store_into = '/home/hsinghal/workspace/DB_AS_A_SERVICE/custom_scripts'
# -------------------------------
# Identify the population distribution across cities
# -------------------------------
# World cities data
world_cities = pd.read_table(os.path.join(data_dir, 'worldcitiespop_sample.txt'), sep=",", index_col=None)
# Keep only US cities
us_cities = world_cities[world_cities.Country == 'us']
# Keep cities with available population numbers
us_cities_with_pop = us_cities[us_cities.Population > 0]
# Get population probabilities
us_cities_with_pop['population_prob'] = us_cities_with_pop.Population / us_cities_with_pop.Population.sum()
# Keep only relevant columns
us_cities_data = us_cities_with_pop[['City', 'Region', 'population_prob']]
# Create list of (city,state) and a list of probabilities
city_state = []
city_state_population_prob = []
for each_row in us_cities_data.iterrows():
row = each_row[1]
# city_state.append((row['City'].title(), row['Region']))
city_state.append(row['City'].title())
city_state_population_prob.append(row['population_prob'])
choice(city_state, p=city_state_population_prob)
# Store pickled
with open(os.path.join(store_into, 'city_pop_distr.pkl'), 'wb') as fout:
pickle.dump([city_state, city_state_population_prob], fout, -1)
# -----------------
# Get Names data
# -----------------
# Read all names from NationalNames.csv
national_names = pd.read_table(os.path.join(data_dir, 'NationalNames_sample.csv'), sep=",", index_col=False)
national_names.columns
national_names.head()
firstnames_namedb = pd.read_table(os.path.join(data_dir, 'namedb_first_names_us.txt'), header=None)
firstnames_namedb.columns = ['firstname']
firstnames_namedb.head()
# Get all first names
first_names = list(set(national_names.Name.tolist() + firstnames_namedb.firstname.tolist()))
# Get last names
lastnames_namedb = pd.read_table(os.path.join(data_dir, 'namedb_surnames_us.txt'),header=None)
lastnames_namedb.columns = ['lastname']
last_names = list(set(lastnames_namedb.lastname.tolist()))
len(last_names)
# Get last names for uk
lastnames_namedb_uk = pd.read_table(os.path.join(data_dir, 'namedb_surnames_uk.txt'),header=None)
lastnames_namedb_uk.columns = ['lastname']
last_names_uk = list(set(lastnames_namedb_uk.lastname.tolist()))
len(last_names_uk)
last_names_list = list(set(last_names + last_names_uk))
with open(os.path.join(store_into,'first_last_names.pkl'),'wb') as fout:
pickle.dump([first_names, last_names_list], fout, -1)
# -----------------
# Get Email Address
# -----------------
# Get domains
popular_domains_distr = [
("gmail.com", 0.1),
("yahoo.com", 0.08),
("hotmail.com", 0.07),
("aol.com", 0.05),
("msn.com", 0.04)]
# ref https://github.com/mailcheck/mailcheck/wiki/List-of-Popular-Domains
# the list below is not used
email_domains_all = ["aol.com", "att.net", "comcast.net", "facebook.com", "gmail.com", "gmx.com", "googlemail.com",
"google.com", "hotmail.com", "hotmail.co.uk", "mac.com", "me.com", "mail.com", "msn.com",
"live.com", "sbcglobal.net", "verizon.net", "yahoo.com", "yahoo.co.uk",
"email.com", "games.com", "gmx.net", "hush.com", "hushmail.com", "icloud.com", "inbox.com",
"lavabit.com", "love.com" , "outlook.com", "pobox.com", "rocketmail.com",
"safe-mail.net", "wow.com", "ygm.com" , "ymail.com", "zoho.com", "fastmail.fm",
"yandex.com","iname.com",
"bellsouth.net", "charter.net", "comcast.net", "cox.net", "earthlink.net", "juno.com",
"btinternet.com", "virginmedia.com", "blueyonder.co.uk", "freeserve.co.uk", "live.co.uk",
"ntlworld.com", "o2.co.uk", "orange.net", "sky.com", "talktalk.co.uk", "tiscali.co.uk",
"virgin.net", "wanadoo.co.uk", "bt.com",
"sina.com", "qq.com", "naver.com", "hanmail.net", "daum.net", "nate.com", "yahoo.co.jp", "yahoo.co.kr", "yahoo.co.id", "yahoo.co.in", "yahoo.com.sg", "yahoo.com.ph",
"hotmail.fr", "live.fr", "laposte.net", "yahoo.fr", "wanadoo.fr", "orange.fr", "gmx.fr", "sfr.fr", "neuf.fr", "free.fr",
"gmx.de", "hotmail.de", "live.de", "online.de", "t-online.de", "web.de", "yahoo.de",
"mail.ru", "rambler.ru", "yandex.ru", "ya.ru", "list.ru",
"hotmail.be", "live.be", "skynet.be", "voo.be", "tvcablenet.be", "telenet.be",
"hotmail.com.ar", "live.com.ar", "yahoo.com.ar", "fibertel.com.ar", "speedy.com.ar", "arnet.com.ar",
"hotmail.com", "gmail.com", "yahoo.com.mx", "live.com.mx", "yahoo.com", "hotmail.es", "live.com", "hotmail.com.mx", "prodigy.net.mx", "msn.com",
"yahoo.com.br", "hotmail.com.br", "outlook.com.br", "uol.com.br", "bol.com.br", "terra.com.br", "ig.com.br", "itelefonica.com.br", "r7.com", "zipmail.com.br", "globo.com", "globomail.com", "oi.com.br"]
email_domains = pd.read_table(os.path.join(data_dir, 'email_domains.txt'), header=None, index_col=False)
email_domains.columns = ['email_domain']
email_domains_more = [k for k in email_domains.email_domain.tolist() if re.search("com$", k) and 'mail2' not in k]
prob_of_other_domains = (1 - sum([k[1] for k in popular_domains_distr])) / len(email_domains_more)
email_domains_items = [k[0] for k in popular_domains_distr] + email_domains_more
email_domains_prob = [k[1] for k in popular_domains_distr] + [prob_of_other_domains] * len(email_domains_more)
choice(email_domains_items, p=email_domains_prob)
with open(os.path.join(store_into,'email_domains_distr.pkl'),'wb') as fout:
pickle.dump([email_domains_items, email_domains_prob], fout, -1)
# ----------------
# Get Git user name for email address
# ----------------
gitdata = open(os.path.join(data_dir, 'git_names.json'), 'r').readlines()
git_userids = []
for i in gitdata:
i = i.strip()
d = simplejson.loads(i)
git_userids.append(d['actor']['login'])
with open(os.path.join(store_into,'git_user_names.pkl'),'wb') as fout:
pickle.dump(git_userids, fout, -1)
# ***************************************************
# ***************************************************
# ---------------------
# Create Table Hotels
# ---------------------
def address_clean(addr):
if len(str(addr)) < 4:
return ''
elif str(addr) == 'NaN':
return ''
return str(addr)
"""
CREATE TABLE Hotels (
hotel_id int NOT NULL,
hotel_full_name varchar(255) NOT NULL,
hotel_latitude decimal(8,3),
hotel_longitude decimal(8,3),
hotel_address varchar(255) NOT NULL,
hotel_country varchar(255) NOT NULL,
hotel_currency varchar(10) NOT NULL,
hotel_star_rating varchar(10) NOT NULL,
hotel_location varchar(255) NOT NULL
);
"""
# Read Active Properties List
df_active_props = pd.read_table(os.path.join(data_dir, 'ActivePropertyList.txt'), sep="|", index_col=None)
df_active_props.head(10)
# Iterate over each row and only keep US hotels
result_rows = []
START_ID = 53689
for each_row in df_active_props.iterrows():
row = each_row[1]
temp_dict = {
'hotel_id': each_row[0] + START_ID,
'hotel_full_name': row['Name'],
'hotel_latitude': row['Latitude'],
'hotel_longitude': row['Longitude'],
'hotel_address': ' '.join([address_clean(row['Address1']), address_clean(row['Address2'])]),
'hotel_country': row['Country'],
'hotel_currency': row['PropertyCurrency'],
'hotel_star_rating': row['StarRating'],
'hotel_location': row['Location']
}
if row['Country'] == 'US':
result_rows.append(temp_dict)
# Create pandas data frame
hotels_df = pd.DataFrame.from_records(result_rows)
hotels_df.head()
hotels_df = hotels_df[['hotel_id', 'hotel_full_name', 'hotel_latitude', 'hotel_longitude', 'hotel_address', 'hotel_country', 'hotel_currency','hotel_star_rating','hotel_location']]
# Store results in results_data directory
hotels_df.to_pickle(os.path.join(store_into, 'hotels.pkl'))
# ------------------
# Hotel Facilities Reference List
# -----------------
"""
CREATE TABLE Ref_Hotel_Facilities (
facility_code int NOT NULL,
facility_description varchar(50) NOT NULL
);
"""
# Read hotel facilities list
df_ref_hotel_facilities = pd.read_table(os.path.join(data_dir, 'AttributeList.txt'), sep="|", index_col=None)
df_ref_hotel_facilities.head()
hotel_facilities_list = df_ref_hotel_facilities['AttributeDesc'].values.tolist()
result_rows = []
START_ID = 34213
ctr = 0
for each_row in hotel_facilities_list:
temp_dict = {
'facility_code': ctr + START_ID,
'facility_description': each_row
}
result_rows.append(temp_dict)
ctr += 1
# Create pandas data frame
ref_hotel_facilities = pd.DataFrame.from_records(result_rows)
# Store results in results_data directory
ref_hotel_facilities[['facility_code', 'facility_description']].to_pickle(os.path.join(store_into, 'ref_hotel_facilities.pkl'))
# ------------------
# Hotel Facilities - For Each Hotel
# -----------------
"""
CREATE TABLE Hotel_Facilities (
hotel_id int NOT NULL,
facility_code int NOT NULL
);
"""
# Read in hotels.pkl and ref_hotel_facilities.pkl
# For each hotel, identify hotel facilities
# Read ref_hotel_facilities.pkl
ref_hotel_facilities = pd.read_pickle('ref_hotel_facilities.pkl')
ref_hotel_facilities.head()
# Read hotels.pkl
hotels_df = pd.read_pickle('hotels.pkl')
# Iterate over each hotel
# For each hotel, get a random selection of facilities
all_results = []
for each_row in hotels_df.iterrows():
row = each_row[1]
# Get a random selection of facilities
random_percent = random.uniform(1, 5) / 100
hotels_ref = ref_hotel_facilities.sample(frac=random_percent, replace=False)
for each_facility in hotels_ref.iterrows():
temp_dict = {
'hotel_id': row['hotel_id'],
'facility_code': each_facility[1]['facility_code']
}
all_results.append(temp_dict)
# Create data frame
hotel_facilities_df = hotel_facilities = | pd.DataFrame.from_records(all_results) | pandas.DataFrame.from_records |
import streamlit as st
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
st.title('Machine Learning - CLASSIFICATION')
st.sidebar.write("""
This is a web app demo using python libraries such as Streamlit, Sklearn etc
""")
#st.sidebar.write ("For more info, please contact:")
#st.sidebar.write("<a href='https://www.linkedin.com/in/yong-poh-yu/'>Dr. <NAME> </a>", unsafe_allow_html=True)
choice = st.sidebar.radio(
"Choose a dataset",
('Default', 'User-defined '),
index = 0
)
st.write(f"## You Have Selected <font color='Aquamarine'>{choice}</font> Dataset", unsafe_allow_html=True)
def get_default_dataset(name):
data = None
if name == 'Iris':
data = datasets.load_iris()
elif name == 'Wine':
data = datasets.load_wine()
else:
data = datasets.load_breast_cancer()
X = data.data
y = data.target
return X, y
def add_dataset_ui(choice_name):
X=[]
y=[]
X_names = []
X1 = []
if choice_name == 'Default':
dataset_name = st.sidebar.selectbox(
'Select Dataset',
('Iris', 'Breast Cancer', 'Wine')
)
X, y = get_default_dataset (dataset_name)
X_names = X
else:
uploaded_file = st.sidebar.file_uploader(
"Upload a CSV",
type='csv' )
if uploaded_file!=None:
st.write(uploaded_file)
data = pd.read_csv(uploaded_file)
y_name = st.sidebar.selectbox(
'Select Label @ y variable',
sorted(data)
)
X_names = st.sidebar.multiselect(
'Select Predictors @ X variables.',
sorted(data),
default = sorted(data)[1],
help = "You may select more than one predictor"
)
y = data.loc[:,y_name]
X = data.loc[:,X_names]
X1 = X.select_dtypes(include=['object'])
X2 = X.select_dtypes(exclude=['object'])
if sorted(X1) != []:
X1 = X1.apply(LabelEncoder().fit_transform)
X = pd.concat([X2,X1],axis=1)
y = LabelEncoder().fit_transform(y)
else:
st.write(f"## <font color='Aquamarine'>Note: Please upload a CSV file to analyze the data.</font>", unsafe_allow_html=True)
return X,y, X_names, X1
X, y , X_names, cat_var= add_dataset_ui (choice)
classifier_name = st.sidebar.selectbox(
'Select classifier',
('KNN', 'SVM', 'Random Forest')
)
test_data_ratio = st.sidebar.slider('Select testing size or ratio',
min_value= 0.10,
max_value = 0.50,
value=0.2)
random_state = st.sidebar.slider('Select random state', 1, 9999,value=1234)
st.write("## 1: Summary (X variables)")
if len(X)==0:
st.write("<font color='Aquamarine'>Note: Predictors @ X variables have not been selected.</font>", unsafe_allow_html=True)
else:
st.write('Shape of predictors @ X variables :', X.shape)
st.write('Summary of predictors @ X variables:', | pd.DataFrame(X) | pandas.DataFrame |
#!/usr/bin/env python3
import os
from collections import defaultdict, namedtuple
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegressionCV
from sklearn import metrics
from functools import partial
import gc
import pickle as pkl
import gzip
import json
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from camaptools.EnhancedFutures import EnhancedProcessPoolExecutor, EnhancedMPIPoolExecutor
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html : Note that in binary classification, recall of the positive class is also known as “sensitivity”; recall of the negative class is “specificity”.
Results = namedtuple('Results', ['df_results', 'df_results_max', 'df_coef', 'df_scores', 'auc_data'])
class RegressionMetaManager(object):
"""A wrapper around RegressionManagers that also takes care of running and distributing jobs for Datasets
"""
def __init__(self, datasets, out_dir, workers=0, executor=None):
self.out_dir = out_dir
self.workers = workers
self.executor = EnhancedProcessPoolExecutor if executor is None else executor
self.managers = [RegressionManager(dat, self.workers, self.executor) for dat in datasets]
for rem in self.managers:
rem.dataset.workers = self.workers
rem.dataset.executor = self.executor
def set_load_peptides_options(self, *args, **kwargs):
for rem in self.managers:
rem.dataset.load_peptides_options(*args, **kwargs)
def run(self):
tex = ThreadPoolExecutor(max_workers=1)
future = tex.submit(lambda: defaultdict(lambda: defaultdict(list)))
for rem in self.managers:
print(rem.name)
self._run(rem)
self.results = future.result()
future = tex.submit(self._save, rem, self.results, self.out_dir)
self.results = future.result()
tex.shutdown()
def join(self):
results = self.results
for subname in results:
df_results = pd.concat(results[subname]['df_results'], axis=1)
df_results_max = pd.concat(results[subname]['df_results_max'], axis=1)
df_coef = pd.concat(results[subname]['df_coef'], axis=0)
results[subname] = Results._make([df_results, df_results_max, df_coef, pd.DataFrame(), {}])
self.results = dict(results)
self.write(self.results, self.out_dir)
@staticmethod
def _run(rem):
#rem.dataset.pepfiles = [x for x in rem.dataset.pepfiles if 'W9' in x]
rem.dataset.load_peptides()
rem.dataset.construct_datasets()
rem.dataset.clear_unused()
rem.initialize_trainers()
rem.start(optimize_mem_usage=True)
rem.join()
@staticmethod
def _save(rem, results, out_sub_dir):
for subname in list(rem.results.keys()):
res = rem.results[subname]
out_dir = os.path.join(out_sub_dir, subname)
out_bak_dir = os.path.join(out_sub_dir, subname, '_bak')
out_scores_dir = os.path.join(out_sub_dir, subname, 'scores')
out_auc_dir = os.path.join(out_sub_dir, subname, 'auc')
os.makedirs(out_bak_dir, exist_ok=True)
os.makedirs(out_scores_dir, exist_ok=True)
os.makedirs(out_auc_dir, exist_ok=True)
res.df_results.to_csv(os.path.join(out_bak_dir, rem.name + '.results.tsv'),
sep='\t', float_format='%g')
res.df_results_max.to_csv(os.path.join(out_bak_dir, rem.name + '.results.max.tsv'),
sep='\t', float_format='%g')
res.df_coef.to_csv(os.path.join(out_bak_dir, rem.name + '.coefficients.tsv'),
sep='\t', float_format='%g')
res.df_scores.to_csv(os.path.join(out_scores_dir, rem.name + '.scores.tsv.gz'),
sep='\t', float_format='%g', compression='gzip')
with gzip.open(os.path.join(out_auc_dir, rem.name + '.auc_data.json.gz'), 'wb') as f:
f.write(str.encode(json.dumps(res.auc_data) + '\n'))
results[subname]['df_results'].append(res.df_results)
results[subname]['df_results_max'].append(res.df_results_max)
results[subname]['df_coef'].append(res.df_coef)
del res
del rem.results[subname]
gc.collect()
return results
@staticmethod
def write(results, out_base_dir):
for subname, res in results.items():
out_dir = os.path.join(out_base_dir, subname)
os.makedirs(out_dir, exist_ok=True)
out_file_results = os.path.join(out_dir, 'metrics.tsv')
out_file_results_max = os.path.join(out_dir, 'metrics.redmax.tsv')
out_file_coefficients = os.path.join(out_dir, 'coefficients.tsv')
print('Saving %s at %s' % (out_file_results, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
print('Saving %s at %s' % (out_file_results_max, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
print('Saving %s at %s' % (out_file_coefficients, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
res.df_results.to_csv(out_file_results, sep='\t', float_format='%g', na_rep='nan')
res.df_results_max.to_csv(out_file_results_max, sep='\t', float_format='%g', na_rep='nan')
res.df_coef.to_csv(out_file_coefficients, sep='\t', float_format='%g', na_rep='nan')
class RegressionManager(object):
"""
"""
def __init__(self, dataset, workers=0, executor=None):
self.dataset = dataset
self.workers = workers
self.executor = EnhancedProcessPoolExecutor if executor is None else executor
self.allele = list(self.dataset.alleles)[0] if len(self.dataset.alleles) == 1 else 'HLA-MinScore'
self.name = self.dataset.dataset_name
def initialize_trainers(self):
self.trainers = []
subgroups = defaultdict(list)
for i, (dat, (seed, subname)) in enumerate(zip(self.dataset.datasets, self.dataset.metadata)):
self.trainers.append(RegressionTrainer(dat, seed))
subgroups[subname].append(i)
self.subgroups = dict(subgroups)
def start(self, optimize_mem_usage=False):
with self.executor(max_workers=self.workers, use_threads=False) as ex:
for t in self.trainers:
t.submit(ex)
print(ex)
if optimize_mem_usage:
for t in self.trainers:
del t.dataset
del self.dataset.datasets
gc.collect()
def join(self):
for t in self.trainers:
t.join()
self.results = {}
for subname, indexes in self.subgroups.items():
trainers = [self.trainers[i] for i in indexes]
metadata = [self.dataset.metadata[i] for i in indexes]
self.results[subname] = Results._make(self._join(trainers, metadata))
def _join(self, trainers, metadata):
results_dct = defaultdict(list)
results_max_dct = defaultdict(list)
coef_dct = defaultdict(list)
auc_data = defaultdict(list)
df_scores_list = []
for t, (replicate, subname) in zip(trainers, metadata):
for k in t.results_dct:
results_dct[k].append(t.results_dct[k])
for k in t.results_max_dct:
results_max_dct[k].append(t.results_max_dct[k])
for k in t.coef_dct:
coef_dct[k].append(t.coef_dct[k])
for k in t.auc_data:
auc_data[k].append(t.auc_data[k])
df_sc = t.df_scores.copy().replace(np.nan, None)
df_sc.index = pd.Index([(self.name, self.allele, replicate, x[0], x[1])
for x in df_sc.index], name= ['Dataset', 'Allele', 'N'] + df_sc.index.names)
df_scores_list.append(df_sc)
df_results = pd.DataFrame(results_dct).sort_index(axis=1)
df_results.index = df_results.index + 1
df_results.columns = pd.Index([(self.allele, x[0], x[1]) for x in df_results.columns])
df_results.index = pd.Index([(self.name, x) for x in df_results.index])
df_results = df_results.transpose()
df_results.index.names = ['Allele', 'Metric', 'Regression']
df_results_max = pd.DataFrame(results_max_dct).sort_index(axis=1)
df_results_max.index = df_results_max.index + 1
df_results_max.columns = pd.Index([(self.allele, x[0], x[1]) for x in df_results_max.columns])
df_results_max.index = pd.Index([(self.name, x) for x in df_results_max.index])
df_results_max = df_results_max.transpose()
df_results_max.index.names = ['Allele', 'Metric', 'Regression']
auc_data = {self.name + ':::' + self.allele: dict(auc_data)}
dct = {}
for prefix, val_lst in coef_dct.items():
for rep, values in enumerate(val_lst):
rep += 1
minidct = {var: val for var, val in zip(values['variable'], values['coefficient'])}
dct[(self.name, self.allele, prefix, rep)] = minidct
df_coef = pd.DataFrame(dct).transpose()
df_coef.index.names = ['Dataset', 'Allele', 'Regression', 'N']
df_scores = pd.concat(df_scores_list, copy=False, axis=0).sort_index(axis=1)
return df_results, df_results_max, df_coef, df_scores, auc_data
class RegressionTrainer(object):
"""
"""
def __init__(self, dataset, seed, workers=0, executor=None):
""" NOTE: Cannot fork if using MPIPoolExecutor (must use workers=0)
"""
self.workers = workers
# careful with parallelization as LogisticRegression spawns its own processes with all available CPUs
self.executor = EnhancedProcessPoolExecutor if executor is None else executor
self.dataset = dataset
self.seed = seed
self.all_combinations = [
["BS"],
["TPM"],
["CAMAPShuff"],
["CAMAP"],
["BS", "TPM"],
["BS", "CAMAPShuff"],
["BS", "CAMAP"],
["TPM", "CAMAPShuff"],
["TPM", "CAMAP"],
["BS", "TPM", "CAMAPShuff"],
["BS", "TPM", "CAMAP"]
]
def submit(self, ex=None):
# expects an active executor
ex = EnhancedProcessPoolExecutor(max_workers=0) if ex is None else ex # use sequential pseudo-threads
df_training, df_test = self.dataset
self.regressions = []
#w = self.workers + 1
#sub_x_labels_groups = [self.all_combinations[n:n+w] for n in range(0, len(self.all_combinations), w)]
#for sub_x_labels in sub_x_labels_groups:
# p = ex.submit(self._submit, sub_x_labels, df_training, df_test, self.seed, self.executor)
# self.regressions.append(p)
for x_labels in self.all_combinations:
p = ex.submit(self.train_regression, x_labels, df_training, df_test, self.seed)
self.regressions.append(p)
#@classmethod
#def _submit(cls, sub_x_labels, df_training, df_test, seed, executor):
# workers = len(sub_x_labels) - 1
# sub_regressions = []
# with executor(max_workers=workers, use_threads=True) as sex: # secondary ex
# for x_labels in sub_x_labels:
# p = sex.submit(cls.train_regression, x_labels, df_training, df_test, seed)
# sub_regressions.append(p)
# print(sex)
# return [p.result() for p in sub_regressions]
def join(self):
#regressions = [res for p in self.regressions for res in p.result()]
regressions = [p.result() for p in self.regressions]
results = {}
results_max = {}
coef_dct = {}
auc_data = {}
df_scores = pd.DataFrame()
for r, rm, coef, auc, df_sc in regressions:
results = {**results, **r}
results_max = {**results_max, **rm}
coef_dct = {**coef_dct, **coef}
auc_data = {**auc_data, **auc}
df_scores = | pd.concat([df_scores, df_sc], axis=1) | pandas.concat |
#!/usr/bin/env python3
import os
import sys
import re
import pandas as pd, geopandas as gpd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib import Path
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
import rasterio
from rasterio import features as riofeatures
from rasterio import plot as rioplot
from shapely.geometry import Polygon
"""
Plot Rating Curves and Compare to USGS Gages
Parameters
----------
fim_dir : str
Directory containing FIM output folders.
output_dir : str
Directory containing rating curve plots and tables.
usgs_gages_filename : str
File name of USGS rating curves.
nwm_flow_dir : str
Directory containing NWM recurrence flows files.
number_of_jobs : str
Number of jobs.
stat_groups : str
string of columns to group eval metrics.
"""
def check_file_age(file):
'''
Checks if file exists, determines the file age, and recommends
updating if older than 1 month.
Returns
-------
None.
'''
file = Path(file)
if file.is_file():
modification_time = file.stat().st_mtime
current_time = time.time()
file_age_days = (current_time - modification_time)/86400
if file_age_days > 30:
check = f'{file.name} is {int(file_age_days)} days old, consider updating.\nUpdate with rating_curve_get_usgs_curves.py'
else:
check = f'{file.name} is {int(file_age_days)} days old.'
return check
# recurr_intervals = ['recurr_1_5_cms.csv','recurr_5_0_cms.csv','recurr_10_0_cms.csv']
def generate_rating_curve_metrics(args):
elev_table_filename = args[0]
branches_folder = args[1]
usgs_gages_filename = args[2]
usgs_recurr_stats_filename = args[3]
nwm_recurr_data_filename = args[4]
rc_comparison_plot_filename = args[5]
nwm_flow_dir = args[6]
catfim_flows_filename = args[7]
huc = args[8]
alt_plot = args[9]
elev_table = pd.read_csv(elev_table_filename,dtype={'location_id': object, 'feature_id':object,'HydroID':object, 'levpa_id':object})
elev_table.dropna(subset=['location_id'], inplace=True)
usgs_gages = pd.read_csv(usgs_gages_filename,dtype={'location_id': object, 'feature_id':object})
# Aggregate FIM4 hydroTables
hydrotable = pd.DataFrame()
for branch in elev_table.levpa_id.unique():
branch_elev_table = elev_table.loc[elev_table.levpa_id == branch].copy()
branch_hydrotable = pd.read_csv(join(branches_folder, str(branch), f'hydroTable_{branch}.csv'),dtype={'HydroID':object,'feature_id':object})
# Only pull SRC for hydroids that are in this branch
branch_hydrotable = branch_hydrotable.loc[branch_hydrotable.HydroID.isin(branch_elev_table.HydroID)]
branch_hydrotable.drop(columns=['order_'], inplace=True)
# Join SRC with elevation data
branch_elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
branch_hydrotable = branch_hydrotable.merge(branch_elev_table, on="HydroID")
# Append to full rating curve dataframe
if hydrotable.empty:
hydrotable = branch_hydrotable
else:
hydrotable = hydrotable.append(branch_hydrotable)
# Join rating curves with elevation data
#elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
#hydrotable = hydrotable.merge(elev_table, on="HydroID")
relevant_gages = list(hydrotable.location_id.unique())
usgs_gages = usgs_gages[usgs_gages['location_id'].isin(relevant_gages)]
usgs_gages = usgs_gages.reset_index(drop=True)
if len(usgs_gages) > 0:
# Adjust rating curve to elevation
hydrotable['elevation_ft'] = (hydrotable.stage + hydrotable.dem_adj_elevation) * 3.28084 # convert from m to ft
# hydrotable['raw_elevation_ft'] = (hydrotable.stage + hydrotable.dem_elevation) * 3.28084 # convert from m to ft
hydrotable['discharge_cfs'] = hydrotable.discharge_cms * 35.3147
usgs_gages = usgs_gages.rename(columns={"flow": "discharge_cfs", "elevation_navd88": "elevation_ft"})
hydrotable['source'] = "FIM"
usgs_gages['source'] = "USGS"
limited_hydrotable = hydrotable.filter(items=['location_id','elevation_ft','discharge_cfs','source', 'HydroID', 'levpa_id', 'dem_adj_elevation'])
select_usgs_gages = usgs_gages.filter(items=['location_id', 'elevation_ft', 'discharge_cfs','source'])
if 'default_discharge_cms' in hydrotable.columns: # check if both "FIM" and "FIM_default" SRCs are available
hydrotable['default_discharge_cfs'] = hydrotable.default_discharge_cms * 35.3147
limited_hydrotable_default = hydrotable.filter(items=['location_id','elevation_ft', 'default_discharge_cfs'])
limited_hydrotable_default['discharge_cfs'] = limited_hydrotable_default.default_discharge_cfs
limited_hydrotable_default['source'] = "FIM_default"
rating_curves = limited_hydrotable.append(select_usgs_gages)
rating_curves = rating_curves.append(limited_hydrotable_default)
else:
rating_curves = limited_hydrotable.append(select_usgs_gages)
# Add stream order
stream_orders = hydrotable.filter(items=['location_id','order_']).drop_duplicates()
rating_curves = rating_curves.merge(stream_orders, on='location_id')
rating_curves['order_'] = rating_curves['order_'].astype('int')
# NWM recurr intervals
recurr_intervals = ("2","5","10","25","50","100")
recurr_dfs = []
for interval in recurr_intervals:
recurr_file = join(nwm_flow_dir, 'nwm21_17C_recurr_{}_0_cms.csv'.format(interval))
df = | pd.read_csv(recurr_file, dtype={'feature_id': str}) | pandas.read_csv |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# where the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CallbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
2400,
'Incorrect number of functions called: %s != 2400' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-06', tz='UTC')
SIM_PARAMS_START_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
dates = pd.date_range(cls.START_DATE, cls.END_DATE)
assert len(dates) == 4, "Expected four dates."
# Two assets with the same ticker, ending on days[1] and days[3], plus
# a benchmark that spans the whole period.
cls.sids = [1, 2, 3]
cls.asset_starts = [dates[0], dates[2]]
cls.asset_ends = [dates[1], dates[3]]
return pd.DataFrame.from_records([
{'symbol': 'DUP',
'start_date': cls.asset_starts[0],
'end_date': cls.asset_ends[0],
'exchange': 'TEST',
'asset_name': 'FIRST'},
{'symbol': 'DUP',
'start_date': cls.asset_starts[1],
'end_date': cls.asset_ends[1],
'exchange': 'TEST',
'asset_name': 'SECOND'},
{'symbol': 'BENCH',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST',
'asset_name': 'BENCHMARK'},
], index=cls.sids)
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
set_symbol_lookup_date = zipline.api.set_symbol_lookup_date
def initialize(context):
set_symbol_lookup_date(self.asset_ends[0])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[0])
set_symbol_lookup_date(self.asset_ends[1])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[1])
with self.assertRaises(UnsupportedDatetimeFormat):
set_symbol_lookup_date('foobar')
self.run_algorithm(initialize=initialize)
class TestPositions(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2020-09-01', tz='utc')
END_DATE = pd.Timestamp('2020-09-04', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1000
ASSET_FINDER_EQUITY_SIDS = (1, 133)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(
{
'open': [90, 95, 100, 105],
'high': [90, 95, 100, 105],
'low': [90, 95, 100, 105],
'close': [90, 95, 100, 105],
'volume': 100,
},
index=cls.equity_daily_bar_days,
)
return ((sid, frame) for sid in sids)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1000: {
'symbol': 'CLF06',
'root_symbol': 'CL',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'auto_close_date': cls.END_DATE + cls.trading_calendar.day,
'exchange': 'CMES',
'multiplier': 100,
},
},
orient='index',
)
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': 2.0,
'high': 2.0,
'low': 2.0,
'close': 2.0,
'volume': 100,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
def test_portfolio_exited_position(self):
# This test ensures ensures that 'phantom' positions do not appear in
# context.portfolio.positions in the case that a position has been
# entered and fully exited.
def initialize(context, sids):
context.ordered = False
context.exited = False
context.sids = sids
def handle_data(context, data):
if not context.ordered:
for s in context.sids:
context.order(context.sid(s), 1)
context.ordered = True
if not context.exited:
amounts = [pos.amount for pos
in itervalues(context.portfolio.positions)]
if (
len(amounts) > 0 and
all([(amount == 1) for amount in amounts])
):
for stock in context.portfolio.positions:
context.order(context.sid(stock), -1)
context.exited = True
# Should be 0 when all positions are exited.
context.record(num_positions=len(context.portfolio.positions))
result = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
sids=self.ASSET_FINDER_EQUITY_SIDS,
)
expected_position_count = [
0, # Before entering the first position
2, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(result.iloc[i,:]['num_positions'], expected)
def test_noop_orders(self):
asset = self.asset_finder.retrieve_asset(1)
# Algorithm that tries to buy with extremely low stops/limits and tries
# to sell with extremely high versions of same. Should not end up with
# any positions for reasonable data.
def handle_data(algo, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
algo.order(asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
algo.order(asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
algo.order(asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
algo.order(asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
algo.order(asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
algo.order(asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
algo.order(asset, 100, limit_price=.00000001)
algo.order(asset, -100, stop_price=.00000001)
daily_stats = self.run_algorithm(handle_data=handle_data)
# Verify that positions are empty for all dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.all())
def test_position_weights(self):
sids = (1, 133, 1000)
equity_1, equity_133, future_1000 = \
self.asset_finder.retrieve_all(sids)
def initialize(algo, sids_and_amounts, *args, **kwargs):
algo.ordered = False
algo.sids_and_amounts = sids_and_amounts
algo.set_commission(
us_equities=PerTrade(0), us_futures=PerTrade(0),
)
algo.set_slippage(
us_equities=FixedSlippage(0),
us_futures=FixedSlippage(0),
)
def handle_data(algo, data):
if not algo.ordered:
for s, amount in algo.sids_and_amounts:
algo.order(algo.sid(s), amount)
algo.ordered = True
algo.record(
position_weights=algo.portfolio.current_portfolio_weights,
)
daily_stats = self.run_algorithm(
sids_and_amounts=zip(sids, [2, -1, 1]),
initialize=initialize,
handle_data=handle_data,
)
expected_position_weights = [
# No positions held on the first day.
pd.Series({}),
# Each equity's position value is its price times the number of
# shares held. In this example, we hold a long position in 2 shares
# of equity_1 so its weight is (95.0 * 2) = 190.0 divided by the
# total portfolio value. The total portfolio value is the sum of
# cash ($905.00) plus the value of all equity positions.
#
# For a futures contract, its weight is the unit price times number
# of shares held times the multiplier. For future_1000, this is
# (2.0 * 1 * 100) = 200.0 divided by total portfolio value.
pd.Series({
equity_1: 190.0 / (190.0 - 95.0 + 905.0),
equity_133: -95.0 / (190.0 - 95.0 + 905.0),
future_1000: 200.0 / (190.0 - 95.0 + 905.0),
}),
pd.Series({
equity_1: 200.0 / (200.0 - 100.0 + 905.0),
equity_133: -100.0 / (200.0 - 100.0 + 905.0),
future_1000: 200.0 / (200.0 - 100.0 + 905.0),
}),
pd.Series({
equity_1: 210.0 / (210.0 - 105.0 + 905.0),
equity_133: -105.0 / (210.0 - 105.0 + 905.0),
future_1000: 200.0 / (210.0 - 105.0 + 905.0),
}),
]
for i, expected in enumerate(expected_position_weights):
assert_equal(daily_stats.iloc[i]['position_weights'], expected)
class TestBeforeTradingStart(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 10000
SIM_PARAMS_DATA_FREQUENCY = 'minute'
EQUITY_DAILY_BAR_LOOKBACK_DAYS = EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 1
DATA_PORTAL_FIRST_TRADING_DAY = pd.Timestamp("2016-01-05", tz='UTC')
EQUITY_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
data_start = ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp(
'2016-01-05',
tz='utc',
)
SPLIT_ASSET_SID = 3
ASSET_FINDER_EQUITY_SIDS = 1, 2, SPLIT_ASSET_SID
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_in_range(
cls.data_start,
cls.END_DATE,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(minutes_count) + 1
split_data = pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': 100 * minutes_arr,
},
index=asset_minutes,
)
split_data.iloc[480:] = split_data.iloc[480:] / 2.0
for sid in (1, 8554):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
yield 2, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
50,
)
yield cls.SPLIT_ASSET_SID, split_data
@classmethod
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
}
])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
for sid in sids:
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
def test_data_in_bts_minute(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
60,
"1m"
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# fetching data at midnight gets us the previous market minute's data
self.assertEqual(240, results.iloc[0].the_price1)
self.assertEqual(242, results.iloc[0].the_high1)
# make sure that price is ffilled, but not other fields
self.assertEqual(350, results.iloc[0].the_price2)
self.assertTrue(np.isnan(results.iloc[0].the_high2))
# 10-minute history
# asset1 day1 price should be 331-390
np.testing.assert_array_equal(
range(331, 391), algo.history_values[0]["price"][1]
)
# asset1 day1 high should be 333-392
np.testing.assert_array_equal(
range(333, 393), algo.history_values[0]["high"][1]
)
# asset2 day1 price should be 19 300s, then 40 350s
np.testing.assert_array_equal(
[300] * 19, algo.history_values[0]["price"][2][0:19]
)
np.testing.assert_array_equal(
[350] * 40, algo.history_values[0]["price"][2][20:]
)
# asset2 day1 high should be all NaNs except for the 19th item
# = 2016-01-05 20:20:00+00:00
np.testing.assert_array_equal(
np.full(19, np.nan), algo.history_values[0]["high"][2][0:19]
)
self.assertEqual(352, algo.history_values[0]["high"][2][19])
np.testing.assert_array_equal(
np.full(40, np.nan), algo.history_values[0]["high"][2][20:]
)
def test_data_in_bts_daily(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
1,
"1d",
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
self.assertEqual(392, results.the_high1[0])
self.assertEqual(390, results.the_price1[0])
# nan because asset2 only trades every 50 minutes
self.assertTrue(np.isnan(results.the_high2[0]))
self.assertTrue(350, results.the_price2[0])
self.assertEqual(392, algo.history_values[0]["high"][1][0])
self.assertEqual(390, algo.history_values[0]["price"][1][0])
self.assertEqual(352, algo.history_values[0]["high"][2][0])
self.assertEqual(350, algo.history_values[0]["price"][2][0])
def test_portfolio_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data
assert (context.hd_portfolio == bts_portfolio)
record(pos_value=bts_portfolio.positions_value)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Asset starts with price 1 on 1/05 and increases by 1 every minute.
# Simulation starts on 1/06, where the price in bts is 390, and
# positions_value is 0. On 1/07, price is 780, and after buying one
# share on the first bar of 1/06, positions_value is 780
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
def test_account_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=context.account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Starting portfolio value is 10000. Order for the asset fills on the
# second bar of 1/06, where the price is 391, and costs the default
# commission of 0. On 1/07, the price is 780, and the increase in
# portfolio value is 780-392-0
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0,
places=2)
def test_portfolio_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data, except for the positions
for k in bts_portfolio.__dict__:
if k != 'positions':
assert (context.hd_portfolio.__dict__[k]
== bts_portfolio.__dict__[k])
record(pos_value=bts_portfolio.positions_value)
record(pos_amount=bts_portfolio.positions[sid(3)].amount)
record(
last_sale_price=bts_portfolio.positions[sid(3)].last_sale_price
)
def handle_data(context, data):
if not context.ordered:
order(sid(3), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, positions value should by 780, same as without split
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
# On 1/07, after applying the split, 1 share becomes 2
self.assertEqual(results.pos_amount.iloc[0], 0)
self.assertEqual(results.pos_amount.iloc[1], 2)
# On 1/07, after applying the split, last sale price is halved
self.assertEqual(results.last_sale_price.iloc[0], 0)
self.assertEqual(results.last_sale_price.iloc[1], 390)
def test_account_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=bts_account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, portfolio value is the same as without split
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0, places=2)
class TestAlgoScript(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-12-31', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
EQUITY_DAILY_BAR_LOOKBACK_DAYS = 5 # max history window length
STRING_TYPE_NAMES = [s.__name__ for s in string_types]
STRING_TYPE_NAMES_STRING = ', '.join(STRING_TYPE_NAMES)
ASSET_TYPE_NAME = Asset.__name__
CONTINUOUS_FUTURE_NAME = ContinuousFuture.__name__
ASSET_OR_STRING_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME] +
STRING_TYPE_NAMES)
ASSET_OR_STRING_OR_CF_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME,
CONTINUOUS_FUTURE_NAME] +
STRING_TYPE_NAMES)
ARG_TYPE_TEST_CASES = (
('history__assets', (bad_type_history_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history__fields', (bad_type_history_fields,
STRING_TYPE_NAMES_STRING,
True)),
('history__bar_count', (bad_type_history_bar_count, 'int', False)),
('history__frequency', (bad_type_history_frequency,
STRING_TYPE_NAMES_STRING,
False)),
('current__assets', (bad_type_current_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current__fields', (bad_type_current_fields,
STRING_TYPE_NAMES_STRING,
True)),
('is_stale__assets', (bad_type_is_stale_assets, 'Asset', True)),
('can_trade__assets', (bad_type_can_trade_assets, 'Asset', True)),
('history_kwarg__assets',
(bad_type_history_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg_bad_list__assets',
(bad_type_history_assets_kwarg_list,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg__fields',
(bad_type_history_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
('history_kwarg__bar_count',
(bad_type_history_bar_count_kwarg, 'int', False)),
('history_kwarg__frequency',
(bad_type_history_frequency_kwarg, STRING_TYPE_NAMES_STRING, False)),
('current_kwarg__assets',
(bad_type_current_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current_kwarg__fields',
(bad_type_current_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
)
sids = 0, 1, 3, 133
# FIXME: Pass a benchmark explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
data = make_simple_equity_info(
cls.sids,
cls.START_DATE,
cls.END_DATE,
)
data.loc[3, 'symbol'] = 'TEST'
return data
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
cal = cls.trading_calendars[Equity]
sessions = cal.sessions_in_range(cls.START_DATE, cls.END_DATE)
frame = pd.DataFrame({
'close': 10., 'high': 10.5, 'low': 9.5, 'open': 10., 'volume': 100,
}, index=sessions)
for sid in sids:
yield sid, frame
def test_noop(self):
self.run_algorithm(
initialize=initialize_noop,
handle_data=handle_data_noop,
)
def test_noop_string(self):
self.run_algorithm(script=noop_algo)
def test_no_handle_data(self):
self.run_algorithm(script=no_handle_data)
def test_api_calls(self):
self.run_algorithm(
initialize=initialize_api,
handle_data=handle_data_api,
)
def test_api_calls_string(self):
self.run_algorithm(script=api_algo)
def test_api_get_environment(self):
platform = 'zipline'
algo = self.make_algo(
script=api_get_environment_algo,
platform=platform,
)
algo.run()
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
self.run_algorithm(script=api_symbol_algo)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = self.make_algo(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data.current(sid(0), "price"))
context.incr += 1""",
)
results = test_algo.run()
# flatten the list of txns
all_txns = [val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
expected_spread = 0.05
expected_price = test_algo.recorded_vars["price"] - expected_spread
self.assertEqual(expected_price, txn['price'])
# make sure that the $100 commission was applied to our cash
# the txn was for -1000 shares at 9.95, means -9.95k. our capital_used
# for that day was therefore 9.95k, but after the $100 commission,
# it should be 9.85k.
self.assertEqual(9850, results.capital_used[1])
self.assertEqual(100, results["orders"].iloc[1][0]["commission"])
@parameterized.expand(
[
('no_minimum_commission', 0,),
('default_minimum_commission', 0,),
('alternate_minimum_commission', 2,),
]
)
def test_volshare_slippage(self, name, minimum_commission):
tempdir = TempDirectory()
try:
if name == "default_minimum_commission":
commission_line = "set_commission(commission.PerShare(0.02))"
else:
commission_line = \
"set_commission(commission.PerShare(0.02, " \
"min_trade_cost={0}))".format(minimum_commission)
# verify order -> transaction -> portfolio position.
# --------------
# XXX: This is the last remaining consumer of
# create_daily_trade_source.
trades = factory.create_daily_trade_source(
[0], self.sim_params, self.asset_finder, self.trading_calendar
)
data_portal = create_data_portal_from_trade_history(
self.asset_finder, self.trading_calendar, tempdir,
self.sim_params, {0: trades}
)
test_algo = self.make_algo(
data_portal=data_portal,
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
{0}
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order small lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data.current(sid(0), "price"))
record(volume=data.current(sid(0), "volume"))
record(incr=context.incr)
context.incr += 1
""".format(commission_line),
)
results = test_algo.run()
all_txns = [
val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 67)
# all_orders are all the incremental versions of the
# orders as each new fill comes in.
all_orders = list(toolz.concat(results['orders']))
if minimum_commission == 0:
# for each incremental version of each order, the commission
# should be its filled amount * 0.02
for order_ in all_orders:
self.assertAlmostEqual(
order_["filled"] * 0.02,
order_["commission"]
)
else:
# the commission should be at least the min_trade_cost
for order_ in all_orders:
if order_["filled"] > 0:
self.assertAlmostEqual(
max(order_["filled"] * 0.02, minimum_commission),
order_["commission"]
)
else:
self.assertEqual(0, order_["commission"])
finally:
tempdir.cleanup()
def test_incorrectly_set_futures_slippage_model(self):
code = dedent(
"""
from zipline.api import set_slippage, slippage
class MySlippage(slippage.FutureSlippageModel):
def process_order(self, data, order):
return data.current(order.asset, 'price'), order.amount
def initialize(context):
set_slippage(MySlippage())
"""
)
test_algo = self.make_algo(script=code)
with self.assertRaises(IncompatibleSlippageModel):
# Passing a futures slippage model as the first argument, which is
# for setting equity models, should fail.
test_algo.run()
def test_algo_record_vars(self):
test_algo = self.make_algo(script=record_variables)
results = test_algo.run()
for i in range(1, 252):
self.assertEqual(results.iloc[i-1]["incr"], i)
def test_algo_record_nan(self):
test_algo = self.make_algo(script=record_float_magic % 'nan')
results = test_algo.run()
for i in range(1, 252):
self.assertTrue(np.isnan(results.iloc[i-1]["data"]))
def test_batch_market_order_matches_multiple_manual_orders(self):
share_counts = pd.Series([50, 100])
multi_blotter = RecordBatchBlotter()
multi_test_algo = self.make_algo(
script=dedent("""\
from collections import OrderedDict
from six import iteritems
from zipline.api import sid, order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
it = zip(context.assets, {share_counts})
for asset, shares in it:
order(asset, shares)
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=multi_blotter,
)
multi_stats = multi_test_algo.run()
self.assertFalse(multi_blotter.order_batch_called)
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 2, \
"len(orders) was %s but expected 2" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=batch_blotter,
)
batch_stats = batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
for stats in (multi_stats, batch_stats):
stats.orders = stats.orders.apply(
lambda orders: [toolz.dissoc(o, 'id') for o in orders]
)
stats.transactions = stats.transactions.apply(
lambda txns: [toolz.dissoc(txn, 'order_id') for txn in txns]
)
assert_equal(multi_stats, batch_stats)
def test_batch_market_order_filters_null_orders(self):
share_counts = [50, 0]
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 1, \
"len(orders) was %s but expected 1" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=share_counts),
blotter=batch_blotter,
)
batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
def test_order_dead_asset(self):
# after asset 0 is dead
params = SimulationParameters(
start_session=pd.Timestamp("2007-01-03", tz='UTC'),
end_session=pd.Timestamp("2007-01-05", tz='UTC'),
trading_calendar=self.trading_calendar,
)
# order method shouldn't blow up
self.run_algorithm(
script="""
from zipline.api import order, sid
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
""",
)
# order_value and order_percent should blow up
for order_str in ["order_value", "order_percent"]:
test_algo = self.make_algo(
script="""
from zipline.api import order_percent, order_value, sid
def initialize(context):
pass
def handle_data(context, data):
{0}(sid(0), 10)
""".format(order_str),
sim_params=params,
)
with self.assertRaises(CannotOrderDelistedAsset):
test_algo.run()
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
self.run_algorithm(script=access_portfolio_in_init)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
self.run_algorithm(script=access_account_in_init)
def test_without_kwargs(self):
"""
Test that api methods on the data object can be called with positional
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(sim_params=params, script=call_without_kwargs)
def test_good_kwargs(self):
"""
Test that api methods on the data object can be called with keyword
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(script=call_with_kwargs, sim_params=params)
@parameterized.expand([('history', call_with_bad_kwargs_history),
('current', call_with_bad_kwargs_current)])
def test_bad_kwargs(self, name, algo_text):
"""
Test that api methods on the data object called with bad kwargs return
a meaningful TypeError that we create, rather than an unhelpful cython
error
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual("%s() got an unexpected keyword argument 'blahblah'"
% name, cm.exception.args[0])
@parameterized.expand(ARG_TYPE_TEST_CASES)
def test_arg_types(self, name, inputs):
keyword = name.split('__')[1]
algo = self.make_algo(script=inputs[0])
with self.assertRaises(TypeError) as cm:
algo.run()
expected = "Expected %s argument to be of type %s%s" % (
keyword,
'or iterable of type ' if inputs[2] else '',
inputs[1]
)
self.assertEqual(expected, cm.exception.args[0])
def test_empty_asset_list_to_history(self):
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(
script=dedent("""
def initialize(context):
pass
def handle_data(context, data):
data.history([], "price", 5, '1d')
"""),
sim_params=params,
)
@parameterized.expand(
[('bad_kwargs', call_with_bad_kwargs_get_open_orders),
('good_kwargs', call_with_good_kwargs_get_open_orders),
('no_kwargs', call_with_no_kwargs_get_open_orders)]
)
def test_get_open_orders_kwargs(self, name, script):
algo = self.make_algo(script=script)
if name == 'bad_kwargs':
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual('Keyword argument `sid` is no longer '
'supported for get_open_orders. Use `asset` '
'instead.', cm.exception.args[0])
else:
algo.run()
def test_empty_positions(self):
"""
Test that when we try context.portfolio.positions[stock] on a stock
for which we have no positions, we return a Position with values 0
(but more importantly, we don't crash) and don't save this Position
to the user-facing dictionary PositionTracker._positions_store
"""
results = self.run_algorithm(script=empty_positions)
num_positions = results.num_positions
amounts = results.amounts
self.assertTrue(all(num_positions == 0))
self.assertTrue(all(amounts == 0))
def test_schedule_function_time_rule_positionally_misplaced(self):
"""
Test that when a user specifies a time rule for the date_rule argument,
but no rule in the time_rule argument
(e.g. schedule_function(func, <time_rule>)), we assume that means
assign a time rule but no date rule
"""
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-12', tz='UTC'),
end=pd.Timestamp('2006-01-13', tz='UTC'),
data_frequency='minute'
)
algocode = dedent("""
from zipline.api import time_rules, schedule_function
def do_at_open(context, data):
context.done_at_open.append(context.get_datetime())
def do_at_close(context, data):
context.done_at_close.append(context.get_datetime())
def initialize(context):
context.done_at_open = []
context.done_at_close = []
schedule_function(do_at_open, time_rules.market_open())
schedule_function(do_at_close, time_rules.market_close())
def handle_data(algo, data):
pass
""")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
algo = self.make_algo(script=algocode, sim_params=sim_params)
algo.run()
self.assertEqual(len(w), 2)
for i, warning in enumerate(w):
self.assertIsInstance(warning.message, UserWarning)
self.assertEqual(
warning.message.args[0],
'Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule'
)
# The warnings come from line 13 and 14 in the algocode
self.assertEqual(warning.lineno, 13 + i)
self.assertEqual(
algo.done_at_open,
[pd.Timestamp('2006-01-12 14:31:00', tz='UTC'),
pd.Timestamp('2006-01-13 14:31:00', tz='UTC')]
)
self.assertEqual(
algo.done_at_close,
[pd.Timestamp('2006-01-12 20:59:00', tz='UTC'),
pd.Timestamp('2006-01-13 20:59:00', tz='UTC')]
)
class TestCapitalChanges(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-09', tz='UTC')
# XXX: This suite only has daily data for sid 0 and only has minutely data
# for sid 1.
sids = ASSET_FINDER_EQUITY_SIDS = (0, 1)
DAILY_SID = 0
MINUTELY_SID = 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_minute_bar_data(cls):
minutes = cls.trading_calendar.minutes_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(100, 100 + len(minutes), 1)
opens = closes
highs = closes + 5
lows = closes - 5
frame = pd.DataFrame(
index=minutes,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.MINUTELY_SID, frame
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
days = cls.trading_calendar.sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(10.0, 10.0 + len(days), 1.0)
opens = closes
highs = closes + 0.5
lows = closes - 0.5
frame = pd.DataFrame(
index=days,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.DAILY_SID, frame
@parameterized.expand([
('target', 151000.0), ('delta', 50000.0)
])
def test_capital_changes_daily_mode(self, change_type, value):
capital_changes = {
pd.Timestamp('2006-01-06', tz='UTC'):
{'type': change_type, 'value': value}
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(0), 1000)
"""
algo = self.make_algo(
script=algocode,
capital_changes=capital_changes,
sim_params=SimulationParameters(
start_session=self.START_DATE,
end_session=self.END_DATE,
trading_calendar=self.nyse_calendar,
)
)
# We call get_generator rather than `run()` here because we care about
# the raw capital change packets.
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), 1)
self.assertEqual(
capital_change_packets[0],
{'date': pd.Timestamp('2006-01-06', tz='UTC'),
'type': 'cash',
'target': 151000.0 if change_type == 'target' else None,
'delta': 50000.0})
# 1/03: price = 10, place orders
# 1/04: orders execute at price = 11, place orders
# 1/05: orders execute at price = 12, place orders
# 1/06: +50000 capital change,
# orders execute at price = 13, place orders
# 1/09: orders execute at price = 14, place orders
expected_daily = {}
expected_capital_changes = np.array([
0.0, 0.0, 0.0, 50000.0, 0.0
])
# Day 1, no transaction. Day 2, we transact, but the price of our stock
# does not change. Day 3, we start getting returns
expected_daily['returns'] = np.array([
0.0,
0.0,
# 1000 shares * gain of 1
(100000.0 + 1000.0) / 100000.0 - 1.0,
# 2000 shares * gain of 1, capital change of +50000
(151000.0 + 2000.0) / 151000.0 - 1.0,
# 3000 shares * gain of 1
(153000.0 + 3000.0) / 153000.0 - 1.0,
])
expected_daily['pnl'] = np.array([
0.0,
0.0,
1000.00, # 1000 shares * gain of 1
2000.00, # 2000 shares * gain of 1
3000.00, # 3000 shares * gain of 1
])
expected_daily['capital_used'] = np.array([
0.0,
-11000.0, # 1000 shares at price = 11
-12000.0, # 1000 shares at price = 12
-13000.0, # 1000 shares at price = 13
-14000.0, # 1000 shares at price = 14
])
expected_daily['ending_cash'] = \
np.array([100000.0] * 5) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
expected_daily['starting_value'] = np.array([
0.0,
0.0,
11000.0, # 1000 shares at price = 11
24000.0, # 2000 shares at price = 12
39000.0, # 3000 shares at price = 13
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 5),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 5),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat],
err_msg='daily ' + stat,
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat],
err_msg='cumulative ' + stat,
)
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-06', tz='UTC'): 50000.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_daily_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {
pd.Timestamp(datestr, tz='UTC'): {
'type': change_type,
'value': value
}
for datestr, value in values
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
expected_daily = {}
expected_capital_changes = np.array([0.0, 1000.0, 0.0])
if change_loc == 'intraday':
# Fills at 491, +500 capital change comes at 638 (17:00) and
# 698 (18:00), ends day at 879
day2_return = (
(1388.0 + 149.0 + 147.0) / 1388.0 *
(2184.0 + 60.0 + 60.0) / 2184.0 *
(2804.0 + 181.0 + 181.0) / 2804.0 - 1.0
)
else:
# Fills at 491, ends day at 879, capital change +1000
day2_return = (2388.0 + 390.0 + 388.0) / 2388.0 - 1
expected_daily['returns'] = np.array([
# Fills at 101, ends day at 489
(1000.0 + 489 - 101) / 1000.0 - 1.0,
day2_return,
# Fills at 881, ends day at 1269
(3166.0 + 390.0 + 390.0 + 388.0) / 3166.0 - 1.0,
])
expected_daily['pnl'] = np.array([
388.0,
390.0 + 388.0,
390.0 + 390.0 + 388.0,
])
expected_daily['capital_used'] = np.array([
-101.0, -491.0, -881.0
])
expected_daily['ending_cash'] = \
np.array([1000.0] * 3) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
if change_loc == 'intraday':
# Capital changes come after day start
expected_daily['starting_cash'] -= expected_capital_changes
expected_daily['starting_value'] = np.array([
0.0, 489.0, 879.0 * 2
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 3),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 3),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-05 18:00', tz='UTC'): 500.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_minute_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
emission_rate='minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {pd.Timestamp(val[0], tz='UTC'): {
'type': change_type, 'value': val[1]} for val in values}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
minute_perf = [r['minute_perf'] for r in results if 'minute_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
# Minute perfs are cumulative for the day
expected_minute = {}
capital_changes_after_start = np.array([0.0] * 1170)
if change_loc == 'intraday':
capital_changes_after_start[539:599] = 500.0
capital_changes_after_start[599:780] = 1000.0
expected_minute['pnl'] = np.array([0.0] * 1170)
expected_minute['pnl'][:2] = 0.0
expected_minute['pnl'][2:392] = 1.0
expected_minute['pnl'][392:782] = 2.0
expected_minute['pnl'][782:] = 3.0
for start, end in ((0, 390), (390, 780), (780, 1170)):
expected_minute['pnl'][start:end] = \
np.cumsum(expected_minute['pnl'][start:end])
expected_minute['capital_used'] = np.concatenate((
[0.0] * 1, [-101.0] * 389,
[0.0] * 1, [-491.0] * 389,
[0.0] * 1, [-881.0] * 389,
))
# +1000 capital changes comes before the day start if interday
day2adj = 0.0 if change_loc == 'intraday' else 1000.0
expected_minute['starting_cash'] = np.concatenate((
[1000.0] * 390,
# 101 spent on 1/03
[1000.0 - 101.0 + day2adj] * 390,
# 101 spent on 1/03, 491 on 1/04, +1000 capital change on 1/04
[1000.0 - 101.0 - 491.0 + 1000] * 390
))
expected_minute['ending_cash'] = \
expected_minute['starting_cash'] + \
expected_minute['capital_used'] + \
capital_changes_after_start
expected_minute['starting_value'] = np.concatenate((
[0.0] * 390,
[489.0] * 390,
[879.0 * 2] * 390
))
expected_minute['ending_value'] = \
expected_minute['starting_value'] + \
expected_minute['pnl'] - \
expected_minute['capital_used']
expected_minute['portfolio_value'] = \
expected_minute['ending_value'] + \
expected_minute['ending_cash']
expected_minute['returns'] = \
expected_minute['pnl'] / \
(expected_minute['starting_value'] +
expected_minute['starting_cash'])
# If the change is interday, we can just calculate the returns from
# the pnl, starting_value and starting_cash. If the change is intraday,
# the returns after the change have to be calculated from two
# subperiods
if change_loc == 'intraday':
# The last packet (at 1/04 16:59) before the first capital change
prev_subperiod_return = expected_minute['returns'][538]
# From 1/04 17:00 to 17:59
cur_subperiod_pnl = \
expected_minute['pnl'][539:599] - expected_minute['pnl'][538]
cur_subperiod_starting_value = \
np.array([expected_minute['ending_value'][538]] * 60)
cur_subperiod_starting_cash = \
np.array([expected_minute['ending_cash'][538] + 500] * 60)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_minute['returns'][539:599] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last packet (at 1/04 17:59) before the second capital change
prev_subperiod_return = expected_minute['returns'][598]
# From 1/04 18:00 to 21:00
cur_subperiod_pnl = \
expected_minute['pnl'][599:780] - expected_minute['pnl'][598]
cur_subperiod_starting_value = \
np.array([expected_minute['ending_value'][598]] * 181)
cur_subperiod_starting_cash = \
np.array([expected_minute['ending_cash'][598] + 500] * 181)
cur_subperiod_returns = cur_subperiod_pnl / \
(cur_subperiod_starting_value + cur_subperiod_starting_cash)
expected_minute['returns'][599:780] = \
(cur_subperiod_returns + 1.0) * \
(prev_subperiod_return + 1.0) - \
1.0
# The last minute packet of each day
expected_daily = {
k: np.array([v[389], v[779], v[1169]])
for k, v in iteritems(expected_minute)
}
stats = [
'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value', 'returns'
]
expected_cumulative = deepcopy(expected_minute)
# "Add" daily return from 1/03 to minute returns on 1/04 and 1/05
# "Add" daily return from 1/04 to minute returns on 1/05
expected_cumulative['returns'][390:] = \
(expected_cumulative['returns'][390:] + 1) * \
(expected_daily['returns'][0] + 1) - 1
expected_cumulative['returns'][780:] = \
(expected_cumulative['returns'][780:] + 1) * \
(expected_daily['returns'][1] + 1) - 1
# Add daily pnl/capital_used from 1/03 to 1/04 and 1/05
# Add daily pnl/capital_used from 1/04 to 1/05
expected_cumulative['pnl'][390:] += expected_daily['pnl'][0]
expected_cumulative['pnl'][780:] += expected_daily['pnl'][1]
expected_cumulative['capital_used'][390:] += \
expected_daily['capital_used'][0]
expected_cumulative['capital_used'][780:] += \
expected_daily['capital_used'][1]
# starting_cash, starting_value are same as those of the first daily
# packet
expected_cumulative['starting_cash'] = \
np.repeat(expected_daily['starting_cash'][0:1], 1170)
expected_cumulative['starting_value'] = \
np.repeat(expected_daily['starting_value'][0:1], 1170)
# extra cumulative packet per day from the daily packet
for stat in stats:
for i in (390, 781, 1172):
expected_cumulative[stat] = np.insert(
expected_cumulative[stat],
i,
expected_cumulative[stat][i-1]
)
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in minute_perf]),
expected_minute[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-05 18:00', tz='UTC'): 500.0}
)
class TestGetDatetime(zf.WithMakeAlgo, zf.ZiplineTestCase):
SIM_PARAMS_DATA_FREQUENCY = 'minute'
START_DATE = to_utc('2014-01-02 9:31')
END_DATE = to_utc('2014-01-03 9:31')
ASSET_FINDER_EQUITY_SIDS = 0, 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@parameterized.expand(
[
('default', None,),
('utc', 'UTC',),
('us_east', 'US/Eastern',),
]
)
def test_get_datetime(self, name, tz):
algo = dedent(
"""
import pandas as pd
from zipline.api import get_datetime
def initialize(context):
context.tz = {tz} or 'UTC'
context.first_bar = True
def handle_data(context, data):
dt = get_datetime({tz})
if dt.tz.zone != context.tz:
raise ValueError("Mismatched Zone")
if context.first_bar:
if dt.tz_convert("US/Eastern").hour != 9:
raise ValueError("Mismatched Hour")
elif dt.tz_convert("US/Eastern").minute != 31:
raise ValueError("Mismatched Minute")
context.first_bar = False
""".format(tz=repr(tz))
)
algo = self.make_algo(script=algo)
algo.run()
self.assertFalse(algo.first_bar)
class TestTradingControls(zf.WithMakeAlgo,
zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sid = 133
sids = ASSET_FINDER_EQUITY_SIDS = 133, 134
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = True
@classmethod
def init_class_fixtures(cls):
super(TestTradingControls, cls).init_class_fixtures()
cls.asset = cls.asset_finder.retrieve_asset(cls.sid)
cls.another_asset = cls.asset_finder.retrieve_asset(134)
def _check_algo(self,
algo,
expected_order_count,
expected_exc):
with self.assertRaises(expected_exc) if expected_exc else nop_context:
algo.run()
self.assertEqual(algo.order_count, expected_order_count)
def check_algo_succeeds(self, algo, order_count=4):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, order_count, None)
def check_algo_fails(self, algo, order_count):
self._check_algo(algo,
order_count,
TradingControlViolation)
def test_set_max_position_size(self):
def initialize(self, asset, max_shares, max_notional):
self.set_slippage(FixedSlippage())
self.order_count = 0
self.set_max_position_size(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
# Buy one share four times. Should be fine.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
max_shares=10,
max_notional=500.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
# Buy three shares four times. Should bail on the fourth before it's
# placed.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
max_shares=10,
max_notional=500.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 3)
# Buy three shares four times. Should bail due to max_notional on the
# third attempt.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = self.make_algo(
asset=self.asset,
max_shares=10,
max_notional=67.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 2)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
asset=self.another_asset,
max_shares=10,
max_notional=67.0,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
# Set the trading control sid to None, then BUY ALL THE THINGS!. Should
# fail because setting sid to None makes the control apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
max_shares=10,
max_notional=61.0,
asset=None,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
def test_set_asset_restrictions(self):
def initialize(algo, sid, restrictions, on_error):
algo.order_count = 0
algo.set_asset_restrictions(restrictions, on_error)
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
# Set HistoricalRestrictions for one sid for the entire simulation,
# and fail.
rlm = HistoricalRestrictions([
Restriction(
self.sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN)
])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
# Set StaticRestrictions for one sid and fail.
rlm = StaticRestrictions([self.sid])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
# just log an error on the violation if we choose not to fail.
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='log',
initialize=initialize,
handle_data=handle_data,
)
with make_test_handler(self) as log_catcher:
self.check_algo_succeeds(algo)
logs = [r.message for r in log_catcher.records]
self.assertIn("Order for 100 shares of Equity(133 [A]) at "
"2006-01-04 21:00:00+00:00 violates trading constraint "
"RestrictedListOrder({})", logs)
self.assertFalse(algo.could_trade)
# set the restricted list to exclude the sid, and succeed
rlm = HistoricalRestrictions([
Restriction(
sid,
self.sim_params.start_session,
RESTRICTION_STATES.FROZEN) for sid in [134, 135, 136]
])
algo = self.make_algo(
sid=self.sid,
restrictions=rlm,
on_error='fail',
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_succeeds(algo)
self.assertTrue(algo.could_trade)
@parameterized.expand([
('order_first_restricted_sid', 0),
('order_second_restricted_sid', 1)
])
def test_set_multiple_asset_restrictions(self, name, to_order_idx):
def initialize(algo, restrictions1, restrictions2, on_error):
algo.order_count = 0
algo.set_asset_restrictions(restrictions1, on_error)
algo.set_asset_restrictions(restrictions2, on_error)
def handle_data(algo, data):
algo.could_trade1 = data.can_trade(algo.sid(self.sids[0]))
algo.could_trade2 = data.can_trade(algo.sid(self.sids[1]))
algo.order(algo.sid(self.sids[to_order_idx]), 100)
algo.order_count += 1
rl1 = StaticRestrictions([self.sids[0]])
rl2 = StaticRestrictions([self.sids[1]])
algo = self.make_algo(
restrictions1=rl1,
restrictions2=rl2,
initialize=initialize,
handle_data=handle_data,
on_error='fail',
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade1)
self.assertFalse(algo.could_trade2)
def test_set_do_not_order_list(self):
def initialize(self, restricted_list):
self.order_count = 0
self.set_do_not_order_list(restricted_list, on_error='fail')
def handle_data(algo, data):
algo.could_trade = data.can_trade(algo.sid(self.sid))
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
rlm = [self.sid]
algo = self.make_algo(
restricted_list=rlm,
initialize=initialize,
handle_data=handle_data,
)
self.check_algo_fails(algo, 0)
self.assertFalse(algo.could_trade)
def test_set_max_order_size(self):
def initialize(algo, asset, max_shares, max_notional):
algo.order_count = 0
algo.set_max_order_size(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
# Buy one share.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
max_shares=10,
max_notional=500.0,
)
self.check_algo_succeeds(algo)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed shares.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
max_shares=3,
max_notional=500.0,
)
self.check_algo_fails(algo, 3)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed notional.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.asset,
max_shares=10,
max_notional=40.0,
)
self.check_algo_fails(algo, 3)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=self.another_asset,
max_shares=1,
max_notional=1.0,
)
self.check_algo_succeeds(algo)
# Set the trading control sid to None, then BUY ALL THE THINGS!.
# Should fail because not specifying a sid makes the trading control
# apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
asset=None,
max_shares=1,
max_notional=1.0,
)
self.check_algo_fails(algo, 0)
def test_set_max_order_count(self):
def initialize(algo, count):
algo.order_count = 0
algo.set_max_order_count(count)
def handle_data(algo, data):
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo = self.make_algo(
count=3,
initialize=initialize,
handle_data=handle_data,
)
with self.assertRaises(TradingControlViolation):
algo.run()
self.assertEqual(algo.order_count, 3)
def test_set_max_order_count_minutely(self):
sim_params = self.make_simparams(data_frequency='minute')
def initialize(algo, max_orders_per_day):
algo.minute_count = 0
algo.order_count = 0
algo.set_max_order_count(max_orders_per_day)
# Order 5 times twice in a single day, and set a max order count of
# 9. The last order of the second batch should fail.
def handle_data(algo, data):
if algo.minute_count == 0 or algo.minute_count == 100:
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo.minute_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_orders_per_day=9,
sim_params=sim_params,
)
with self.assertRaises(TradingControlViolation):
algo.run()
self.assertEqual(algo.order_count, 9)
# Set a limit of 5 orders per day, and order 5 times in the first
# minute of each day. This should succeed because the counter gets
# reset each day.
def handle_data(algo, data):
if (algo.minute_count % 390) == 0:
for i in range(5):
algo.order(self.asset, 1)
algo.order_count += 1
algo.minute_count += 1
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_orders_per_day=5,
sim_params=sim_params,
)
algo.run()
# 5 orders per day times 4 days.
self.assertEqual(algo.order_count, 20)
def test_long_only(self):
def initialize(algo):
algo.order_count = 0
algo.set_long_only()
# Sell immediately -> fail immediately.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_fails(algo, 0)
# Buy on even days, sell on odd days. Never takes a short position, so
# should succeed.
def handle_data(algo, data):
if (algo.order_count % 2) == 0:
algo.order(algo.sid(self.sid), 1)
else:
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_succeeds(algo)
# Buy on first three days, then sell off holdings. Should succeed.
def handle_data(algo, data):
amounts = [1, 1, 1, -3]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_succeeds(algo)
# Buy on first three days, then sell off holdings plus an extra share.
# Should fail on the last sale.
def handle_data(algo, data):
amounts = [1, 1, 1, -4]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
self.check_algo_fails(algo, 3)
def test_register_post_init(self):
def initialize(algo):
algo.initialized = True
def handle_data(algo, data):
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_position_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_count(1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_long_only()
self.run_algorithm(initialize=initialize, handle_data=handle_data)
class TestAssetDateBounds(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2014-01-02', tz='UTC')
END_DATE = pd.Timestamp('2014-01-03', tz='UTC')
SIM_PARAMS_START_DATE = END_DATE # Only run for one day.
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
T = partial(pd.Timestamp, tz='UTC')
return pd.DataFrame.from_records([
{'sid': 1,
'symbol': 'OLD',
'start_date': T('1990'),
'end_date': T('1991'),
'exchange': 'TEST'},
{'sid': 2,
'symbol': 'NEW',
'start_date': T('2017'),
'end_date': T('2018'),
'exchange': 'TEST'},
{'sid': 3,
'symbol': 'GOOD',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST'},
])
def test_asset_date_bounds(self):
def initialize(algo):
algo.ran = False
algo.register_trading_control(AssetDateBounds(on_error='fail'))
def handle_data(algo, data):
# This should work because sid 3 is valid during the algo lifetime.
algo.order(algo.sid(3), 1)
# Sid already expired.
with self.assertRaises(TradingControlViolation):
algo.order(algo.sid(1), 1)
# Sid doesn't exist yet.
with self.assertRaises(TradingControlViolation):
algo.order(algo.sid(2), 1)
algo.ran = True
algo = self.make_algo(initialize=initialize, handle_data=handle_data)
algo.run()
self.assertTrue(algo.ran)
class TestAccountControls(zf.WithMakeAlgo,
zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
sidint, = ASSET_FINDER_EQUITY_SIDS = (133,)
BENCHMARK_SID = None
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(data={
'close': [10., 10., 11., 11.],
'open': [10., 10., 11., 11.],
'low': [9.5, 9.5, 10.45, 10.45],
'high': [10.5, 10.5, 11.55, 11.55],
'volume': [100, 100, 100, 300],
}, index=cls.equity_daily_bar_days)
yield cls.sidint, frame
def _check_algo(self, algo, expected_exc):
with self.assertRaises(expected_exc) if expected_exc else nop_context:
algo.run()
def check_algo_succeeds(self, algo):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, None)
def check_algo_fails(self, algo):
self._check_algo(algo, AccountControlViolation)
def test_set_max_leverage(self):
def initialize(algo, max_leverage):
algo.set_max_leverage(max_leverage=max_leverage)
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo.record(latest_time=algo.get_datetime())
# Set max leverage to 0 so buying one share fails.
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_leverage=0,
)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Set max leverage to 1 so buying one share passes
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
max_leverage=1,
)
self.check_algo_succeeds(algo)
def test_set_min_leverage(self):
def initialize(algo, min_leverage, grace_period):
algo.set_min_leverage(
min_leverage=min_leverage, grace_period=grace_period
)
def handle_data(algo, data):
algo.order_target_percent(algo.sid(self.sidint), .5)
algo.record(latest_time=algo.get_datetime())
# Helper for not having to pass init/handle_data at each callsite.
def make_algo(min_leverage, grace_period):
return self.make_algo(
initialize=initialize,
handle_data=handle_data,
min_leverage=min_leverage,
grace_period=grace_period,
)
# Set min leverage to 1.
# The algorithm will succeed because it doesn't run for more
# than 10 days.
offset = pd.Timedelta('10 days')
algo = make_algo(min_leverage=1, grace_period=offset)
self.check_algo_succeeds(algo)
# The algorithm will fail because it doesn't reach a min leverage of 1
# after 1 day.
offset = pd.Timedelta('1 days')
algo = make_algo(min_leverage=1, grace_period=offset)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Increase the offset to 2 days, and the algorithm fails a day later
offset = pd.Timedelta('2 days')
algo = make_algo(min_leverage=1, grace_period=offset)
self.check_algo_fails(algo)
self.assertEqual(
algo.recorded_vars['latest_time'],
pd.Timestamp('2006-01-05 21:00:00', tz='UTC'),
)
# Set the min_leverage to .0001 and the algorithm succeeds.
algo = make_algo(min_leverage=.0001, grace_period=offset)
self.check_algo_succeeds(algo)
class TestFuturesAlgo(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp('2016-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
TRADING_CALENDAR_STRS = ('us_futures',)
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
BENCHMARK_SID = None
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1: {
'symbol': 'CLG16',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2015-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2016-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2016-02-19', tz='UTC'),
'auto_close_date': pd.Timestamp('2016-01-18', tz='UTC'),
'exchange': 'TEST',
},
},
orient='index',
)
def test_futures_history(self):
algo_code = dedent(
"""
from datetime import time
from zipline.api import (
date_rules,
get_datetime,
schedule_function,
sid,
time_rules,
)
def initialize(context):
context.history_values = []
schedule_function(
make_history_call,
date_rules.every_day(),
time_rules.market_open(),
)
schedule_function(
check_market_close_time,
date_rules.every_day(),
time_rules.market_close(),
)
def make_history_call(context, data):
# Ensure that the market open is 6:31am US/Eastern.
open_time = get_datetime().tz_convert('US/Eastern').time()
assert open_time == time(6, 31)
context.history_values.append(
data.history(sid(1), 'close', 5, '1m'),
)
def check_market_close_time(context, data):
# Ensure that this function is called at 4:59pm US/Eastern.
# By default, `market_close()` uses an offset of 1 minute.
close_time = get_datetime().tz_convert('US/Eastern').time()
assert close_time == time(16, 59)
"""
)
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
algo.run()
# Assert that we were able to retrieve history data for minutes outside
# of the 6:31am US/Eastern to 5:00pm US/Eastern futures open times.
np.testing.assert_array_equal(
algo.history_values[0].index,
pd.date_range(
'2016-01-06 6:27',
'2016-01-06 6:31',
freq='min',
tz='US/Eastern',
),
)
np.testing.assert_array_equal(
algo.history_values[1].index,
pd.date_range(
'2016-01-07 6:27',
'2016-01-07 6:31',
freq='min',
tz='US/Eastern',
),
)
# Expected prices here are given by the range values created by the
# default `make_future_minute_bar_data` method.
np.testing.assert_array_equal(
algo.history_values[0].values, list(map(float, range(2196, 2201))),
)
np.testing.assert_array_equal(
algo.history_values[1].values, list(map(float, range(3636, 3641))),
)
@staticmethod
def algo_with_slippage(slippage_model):
return dedent(
"""
from zipline.api import (
commission,
order,
set_commission,
set_slippage,
sid,
slippage,
get_datetime,
)
def initialize(context):
commission_model = commission.PerFutureTrade(0)
set_commission(us_futures=commission_model)
slippage_model = slippage.{model}
set_slippage(us_futures=slippage_model)
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), 10)
context.ordered = True
context.order_price = data.current(sid(1), 'price')
"""
).format(model=slippage_model)
def test_fixed_future_slippage(self):
algo_code = self.algo_with_slippage('FixedSlippage(spread=0.10)')
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run()
# Flatten the list of transactions.
all_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
# Add 1 to the expected price because the order does not fill until the
# bar after the price is recorded.
expected_spread = 0.05
expected_price = (algo.order_price + 1) + expected_spread
self.assertEqual(txn['price'], expected_price)
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
def test_volume_contract_slippage(self):
algo_code = self.algo_with_slippage(
'VolumeShareSlippage(volume_limit=0.05, price_impact=0.1)',
)
algo = self.make_algo(
script=algo_code,
trading_calendar=get_calendar('us_futures'),
)
results = algo.run()
# There should be no commissions.
self.assertEqual(results['orders'][0][0]['commission'], 0.0)
# Flatten the list of transactions.
all_txns = [
val for sublist in results['transactions'].tolist()
for val in sublist
]
# With a volume limit of 0.05, and a total volume of 100 contracts
# traded per minute, we should require 2 transactions to order 10
# contracts.
self.assertEqual(len(all_txns), 2)
for i, txn in enumerate(all_txns):
# Add 1 to the order price because the order does not fill until
# the bar after the price is recorded.
order_price = algo.order_price + i + 1
expected_impact = order_price * 0.1 * (0.05 ** 2)
expected_price = order_price + expected_impact
self.assertEqual(txn['price'], expected_price)
class TestAnalyzeAPIMethod(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='utc')
END_DATE = pd.Timestamp('2016-01-05', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_analyze_called(self):
self.perf_ref = None
def initialize(context):
pass
def handle_data(context, data):
pass
def analyze(context, perf):
self.perf_ref = perf
algo = self.make_algo(
initialize=initialize, handle_data=handle_data, analyze=analyze,
)
results = algo.run()
self.assertIs(results, self.perf_ref)
class TestOrderCancelation(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-05', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
ASSET_FINDER_EQUITY_SIDS = (1,)
ASSET_FINDER_EQUITY_SYMBOLS = ('ASSET1',)
BENCHMARK_SID = None
code = dedent(
"""
from zipline.api import (
sid, order, set_slippage, slippage, VolumeShareSlippage,
set_cancel_policy, cancel_policy, EODCancel
)
def initialize(context):
set_slippage(
slippage.VolumeShareSlippage(
volume_limit=1,
price_impact=0
)
)
{0}
context.ordered = False
def handle_data(context, data):
if not context.ordered:
order(sid(1), {1})
context.ordered = True
""",
)
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(1, 1 + minutes_count)
# normal test data, but volume is pinned at 1 share per minute
yield 1, pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': np.full(minutes_count, 1.0),
},
index=asset_minutes,
)
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
yield 1, pd.DataFrame(
{
'open': np.full(3, 1, dtype=np.float64),
'high': np.full(3, 1, dtype=np.float64),
'low': np.full(3, 1, dtype=np.float64),
'close': np.full(3, 1, dtype=np.float64),
'volume': np.full(3, 1, dtype=np.float64),
},
index=cls.equity_daily_bar_days,
)
def prep_algo(self,
cancelation_string,
data_frequency="minute",
amount=1000,
minute_emission=False):
code = self.code.format(cancelation_string, amount)
return self.make_algo(
script=code,
sim_params=self.make_simparams(
data_frequency=data_frequency,
emission_rate='minute' if minute_emission else 'daily',
)
)
@parameter_space(
direction=[1, -1],
minute_emission=[True, False],
)
def test_eod_order_cancel_minute(self, direction, minute_emission):
"""
Test that EOD order cancel works in minute mode for both shorts and
longs, and both daily emission and minute emission
"""
# order 1000 shares of asset1. the volume is only 1 share per bar,
# so the order should be cancelled at the end of the day.
algo = self.prep_algo(
"set_cancel_policy(cancel_policy.EODCancel())",
amount=np.copysign(1000, direction),
minute_emission=minute_emission
)
log_catcher = TestHandler()
with log_catcher:
results = algo.run()
for daily_positions in results.positions:
self.assertEqual(1, len(daily_positions))
self.assertEqual(
np.copysign(389, direction),
daily_positions[0]["amount"],
)
self.assertEqual(1, results.positions[0][0]["sid"])
# should be an order on day1, but no more orders afterwards
np.testing.assert_array_equal([1, 0, 0],
list(map(len, results.orders)))
# should be 389 txns on day 1, but no more afterwards
np.testing.assert_array_equal([389, 0, 0],
list(map(len, results.transactions)))
the_order = results.orders[0][0]
self.assertEqual(ORDER_STATUS.CANCELLED, the_order["status"])
self.assertEqual(np.copysign(389, direction), the_order["filled"])
warnings = [record for record in log_catcher.records if
record.level == WARNING]
self.assertEqual(1, len(warnings))
if direction == 1:
self.assertEqual(
"Your order for 1000 shares of ASSET1 has been partially "
"filled. 389 shares were successfully purchased. "
"611 shares were not filled by the end of day and "
"were canceled.",
str(warnings[0].message)
)
elif direction == -1:
self.assertEqual(
"Your order for -1000 shares of ASSET1 has been partially "
"filled. 389 shares were successfully sold. "
"611 shares were not filled by the end of day and "
"were canceled.",
str(warnings[0].message)
)
def test_default_cancelation_policy(self):
algo = self.prep_algo("")
log_catcher = TestHandler()
with log_catcher:
results = algo.run()
# order stays open throughout simulation
np.testing.assert_array_equal([1, 1, 1],
list(map(len, results.orders)))
# one txn per minute. 389 the first day (since no order until the
# end of the first minute). 390 on the second day. 221 on the
# the last day, sum = 1000.
np.testing.assert_array_equal([389, 390, 221],
list(map(len, results.transactions)))
self.assertFalse(log_catcher.has_warnings)
def test_eod_order_cancel_daily(self):
# in daily mode, EODCancel does nothing.
algo = self.prep_algo(
"set_cancel_policy(cancel_policy.EODCancel())",
"daily"
)
log_catcher = TestHandler()
with log_catcher:
results = algo.run()
# order stays open throughout simulation
np.testing.assert_array_equal([1, 1, 1],
list(map(len, results.orders)))
# one txn per day
np.testing.assert_array_equal([0, 1, 1],
list(map(len, results.transactions)))
self.assertFalse(log_catcher.has_warnings)
class TestDailyEquityAutoClose(zf.WithMakeAlgo, zf.ZiplineTestCase):
"""
Tests if delisted equities are properly removed from a portfolio holding
positions in said equities.
"""
# January 2015
# Su Mo Tu We Th Fr Sa
# 1 2 3
# 4 5 6 7 8 9 10
# 11 12 13 14 15 16 17
# 18 19 20 21 22 23 24
# 25 26 27 28 29 30 31
START_DATE = pd.Timestamp('2015-01-05', tz='UTC')
END_DATE = pd.Timestamp('2015-01-13', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = None
@classmethod
def init_class_fixtures(cls):
super(TestDailyEquityAutoClose, cls).init_class_fixtures()
cls.assets = (
cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
)
@classmethod
def make_equity_info(cls):
cls.test_days = cls.trading_calendar.sessions_in_range(
cls.START_DATE, cls.END_DATE,
)
assert len(cls.test_days) == 7, "Number of days in test changed!"
cls.first_asset_expiration = cls.test_days[2]
# Assets start on start date and delist every two days:
#
# start_date end_date auto_close_date
# 0 2015-01-05 2015-01-07 2015-01-09
# 1 2015-01-05 2015-01-09 2015-01-13
# 2 2015-01-05 2015-01-13 2015-01-15
cls.asset_info = make_jagged_equity_info(
num_assets=3,
start_date=cls.test_days[0],
first_end=cls.first_asset_expiration,
frequency=cls.trading_calendar.day,
periods_between_ends=2,
auto_close_delta=2 * cls.trading_calendar.day,
)
return cls.asset_info
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
cls.daily_data = make_trade_data_for_asset_info(
dates=cls.test_days,
asset_info=cls.asset_info,
price_start=10,
price_step_by_sid=10,
price_step_by_date=1,
volume_start=100,
volume_step_by_sid=100,
volume_step_by_date=10,
)
return cls.daily_data.items()
def daily_prices_on_tick(self, row):
return [
trades.iloc[row].close for trades in itervalues(self.daily_data)
]
def final_daily_price(self, asset):
return self.daily_data[asset.sid].loc[asset.end_date].close
def default_initialize(self):
"""
Initialize function shared between test algos.
"""
def initialize(context):
context.ordered = False
context.set_commission(PerShare(0, 0))
context.set_slippage(FixedSlippage(spread=0))
context.num_positions = []
context.cash = []
return initialize
def default_handle_data(self, assets, order_size):
"""
Handle data function shared between test algos.
"""
def handle_data(context, data):
if not context.ordered:
for asset in assets:
context.order(asset, order_size)
context.ordered = True
context.cash.append(context.portfolio.cash)
context.num_positions.append(len(context.portfolio.positions))
return handle_data
@parameter_space(
order_size=[10, -10],
capital_base=[1, 100000],
__fail_fast=True,
)
def test_daily_delisted_equities(self,
order_size,
capital_base):
"""
Make sure that after an equity gets delisted, our portfolio holds the
correct number of equities and correct amount of cash.
"""
assets = self.assets
final_prices = {
asset.sid: self.final_daily_price(asset)
for asset in assets
}
# Prices at which we expect our orders to be filled.
initial_fill_prices = self.daily_prices_on_tick(1)
cost_basis = sum(initial_fill_prices) * order_size
# Last known prices of assets that will be auto-closed.
fp0 = final_prices[0]
fp1 = final_prices[1]
algo = self.make_algo(
initialize=self.default_initialize(),
handle_data=self.default_handle_data(assets, order_size),
sim_params=self.make_simparams(
capital_base=capital_base,
data_frequency='daily',
),
)
output = algo.run()
initial_cash = capital_base
after_fills = initial_cash - cost_basis
after_first_auto_close = after_fills + fp0 * (order_size)
after_second_auto_close = after_first_auto_close + fp1 * (order_size)
# Day 1: Order 10 shares of each equity; there are 3 equities.
# Day 2: Order goes through at the day 2 price of each equity.
# Day 3: End date of Equity 0.
# Day 4: Nothing happens.
# Day 5: End date of Equity 1. Auto close of equity 0.
# Add cash == (fp0 * size).
# Day 6: Nothing happens.
# Day 7: End date of Equity 2 and auto-close date of Equity 1.
# Add cash equal to (fp1 * size).
expected_cash = [
initial_cash,
after_fills,
after_fills,
after_fills,
after_first_auto_close,
after_first_auto_close,
after_second_auto_close,
]
expected_num_positions = [0, 3, 3, 3, 2, 2, 1]
# Check expected cash.
self.assertEqual(expected_cash, list(output['ending_cash']))
# The cash recorded by the algo should be behind by a day from the
# computed ending cash.
expected_cash.insert(3, after_fills)
self.assertEqual(algo.cash, expected_cash[:-1])
# Check expected long/short counts.
# We have longs if order_size > 0.
# We have shorts if order_size < 0.
if order_size > 0:
self.assertEqual(
expected_num_positions,
list(output['longs_count']),
)
self.assertEqual(
[0] * len(self.test_days),
list(output['shorts_count']),
)
else:
self.assertEqual(
expected_num_positions,
list(output['shorts_count']),
)
self.assertEqual(
[0] * len(self.test_days),
list(output['longs_count']),
)
# The number of positions recorded by the algo should be behind by a
# day from the computed long/short counts.
expected_num_positions.insert(3, 3)
self.assertEqual(algo.num_positions, expected_num_positions[:-1])
# Check expected transactions.
# We should have a transaction of order_size shares per sid.
transactions = output['transactions']
initial_fills = transactions.iloc[1]
self.assertEqual(len(initial_fills), len(assets))
last_minute_of_session = \
self.trading_calendar.session_close(self.test_days[1])
for asset, txn in zip(assets, initial_fills):
self.assertDictContainsSubset(
{
'amount': order_size,
'commission': None,
'dt': last_minute_of_session,
'price': initial_fill_prices[asset],
'sid': asset,
},
txn,
)
# This will be a UUID.
self.assertIsInstance(txn['order_id'], str)
def transactions_for_date(date):
return transactions.iloc[self.test_days.get_loc(date)]
# We should have exactly one auto-close transaction on the close date
# of asset 0.
(first_auto_close_transaction,) = transactions_for_date(
assets[0].auto_close_date
)
self.assertEqual(
first_auto_close_transaction,
{
'amount': -order_size,
'commission': None,
'dt': self.trading_calendar.session_close(
assets[0].auto_close_date,
),
'price': fp0,
'sid': assets[0],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
(second_auto_close_transaction,) = transactions_for_date(
assets[1].auto_close_date
)
self.assertEqual(
second_auto_close_transaction,
{
'amount': -order_size,
'commission': None,
'dt': self.trading_calendar.session_close(
assets[1].auto_close_date,
),
'price': fp1,
'sid': assets[1],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
def test_cancel_open_orders(self):
"""
Test that any open orders for an equity that gets delisted are
canceled. Unless an equity is auto closed, any open orders for that
equity will persist indefinitely.
"""
assets = self.assets
first_asset_end_date = assets[0].end_date
first_asset_auto_close_date = assets[0].auto_close_date
def initialize(context):
pass
def handle_data(context, data):
# The only order we place in this test should never be filled.
assert (
context.portfolio.cash == context.portfolio.starting_cash
)
today_session = self.trading_calendar.minute_to_session_label(
context.get_datetime()
)
day_after_auto_close = self.trading_calendar.next_session_label(
first_asset_auto_close_date,
)
if today_session == first_asset_end_date:
# Equity 0 will no longer exist tomorrow, so this order will
# never be filled.
assert len(context.get_open_orders()) == 0
context.order(context.sid(0), 10)
assert len(context.get_open_orders()) == 1
elif today_session == first_asset_auto_close_date:
# We do not cancel open orders until the end of the auto close
# date, so our open order should still exist at this point.
assert len(context.get_open_orders()) == 1
elif today_session == day_after_auto_close:
assert len(context.get_open_orders()) == 0
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
sim_params=self.make_simparams(
data_frequency='daily',
),
)
results = algo.run()
orders = results['orders']
def orders_for_date(date):
return orders.iloc[self.test_days.get_loc(date)]
original_open_orders = orders_for_date(first_asset_end_date)
assert len(original_open_orders) == 1
last_close_for_asset = \
algo.trading_calendar.session_close(first_asset_end_date)
self.assertDictContainsSubset(
{
'amount': 10,
'commission': 0.0,
'created': last_close_for_asset,
'dt': last_close_for_asset,
'sid': assets[0],
'status': ORDER_STATUS.OPEN,
'filled': 0,
},
original_open_orders[0],
)
orders_after_auto_close = orders_for_date(first_asset_auto_close_date)
assert len(orders_after_auto_close) == 1
self.assertDictContainsSubset(
{
'amount': 10,
'commission': 0.0,
'created': last_close_for_asset,
'dt': algo.trading_calendar.session_close(
first_asset_auto_close_date,
),
'sid': assets[0],
'status': ORDER_STATUS.CANCELLED,
'filled': 0,
},
orders_after_auto_close[0],
)
# NOTE: This suite is almost the same as TestDailyEquityAutoClose, except it
# uses minutely data instead of daily data, and the auto_close_date for
# equities is one day after their end_date instead of two.
class TestMinutelyEquityAutoClose(zf.WithMakeAlgo,
zf.ZiplineTestCase):
# January 2015
# Su Mo Tu We Th Fr Sa
# 1 2 3
# 4 5 6 7 8 9 10
# 11 12 13 14 15 16 17
# 18 19 20 21 22 23 24
# 25 26 27 28 29 30 31
START_DATE = pd.Timestamp('2015-01-05', tz='UTC')
END_DATE = pd.Timestamp('2015-01-13', tz='UTC')
BENCHMARK_SID = None
@classmethod
def init_class_fixtures(cls):
super(TestMinutelyEquityAutoClose, cls).init_class_fixtures()
cls.assets = (
cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
)
@classmethod
def make_equity_info(cls):
cls.test_days = cls.trading_calendar.sessions_in_range(
cls.START_DATE, cls.END_DATE,
)
cls.test_minutes = cls.trading_calendar.minutes_for_sessions_in_range(
cls.START_DATE, cls.END_DATE,
)
cls.first_asset_expiration = cls.test_days[2]
# Assets start on start date and delist every two days:
#
# start_date end_date auto_close_date
# 0 2015-01-05 2015-01-07 2015-01-09
# 1 2015-01-05 2015-01-09 2015-01-13
# 2 2015-01-05 2015-01-13 2015-01-15
cls.asset_info = make_jagged_equity_info(
num_assets=3,
start_date=cls.test_days[0],
first_end=cls.first_asset_expiration,
frequency=cls.trading_calendar.day,
periods_between_ends=2,
auto_close_delta=1 * cls.trading_calendar.day,
)
return cls.asset_info
# XXX: This test suite uses inconsistent data for minutely and daily bars.
@classmethod
def make_equity_minute_bar_data(cls):
cls.minute_data = make_trade_data_for_asset_info(
dates=cls.test_minutes,
asset_info=cls.asset_info,
price_start=10,
price_step_by_sid=10,
price_step_by_date=1,
volume_start=100,
volume_step_by_sid=100,
volume_step_by_date=10,
)
return cls.minute_data.items()
def minute_prices_on_tick(self, row):
return [
trades.iloc[row].close for trades in itervalues(self.minute_data)
]
def final_minute_price(self, asset):
return self.minute_data[asset.sid].loc[
self.trading_calendar.session_close(asset.end_date)
].close
def default_initialize(self):
"""
Initialize function shared between test algos.
"""
def initialize(context):
context.ordered = False
context.set_commission(PerShare(0, 0))
context.set_slippage(FixedSlippage(spread=0))
context.num_positions = []
context.cash = []
return initialize
def default_handle_data(self, assets, order_size):
"""
Handle data function shared between test algos.
"""
def handle_data(context, data):
if not context.ordered:
for asset in assets:
context.order(asset, order_size)
context.ordered = True
context.cash.append(context.portfolio.cash)
context.num_positions.append(len(context.portfolio.positions))
return handle_data
def test_minutely_delisted_equities(self):
assets = self.assets
final_prices = {
asset.sid: self.final_minute_price(asset)
for asset in assets
}
backtest_minutes = self.minute_data[0].index.tolist()
order_size = 10
capital_base = 100000
algo = self.make_algo(
initialize=self.default_initialize(),
handle_data=self.default_handle_data(assets, order_size),
sim_params=self.make_simparams(
capital_base=capital_base,
data_frequency='minute',
)
)
output = algo.run()
initial_fill_prices = self.minute_prices_on_tick(1)
cost_basis = sum(initial_fill_prices) * order_size
# Last known prices of assets that will be auto-closed.
fp0 = final_prices[0]
fp1 = final_prices[1]
initial_cash = capital_base
after_fills = initial_cash - cost_basis
after_first_auto_close = after_fills + fp0 * (order_size)
after_second_auto_close = after_first_auto_close + fp1 * (order_size)
expected_cash = [initial_cash]
expected_position_counts = [0]
# We have the rest of the first sim day, plus the second, third and
# fourth days' worth of minutes with cash spent.
expected_cash.extend([after_fills] * (389 + 390 + 390 + 390))
expected_position_counts.extend([3] * (389 + 390 + 390 + 390))
# We then have two days with the cash refunded from asset 0.
expected_cash.extend([after_first_auto_close] * (390 + 390))
expected_position_counts.extend([2] * (390 + 390))
# We then have one day with cash refunded from asset 1.
expected_cash.extend([after_second_auto_close] * 390)
expected_position_counts.extend([1] * 390)
# Check list lengths first to avoid expensive comparison
self.assertEqual(len(algo.cash), len(expected_cash))
# TODO find more efficient way to compare these lists
self.assertEqual(algo.cash, expected_cash)
self.assertEqual(
list(output['ending_cash']),
[
after_fills,
after_fills,
after_fills,
after_first_auto_close,
after_first_auto_close,
after_second_auto_close,
after_second_auto_close,
],
)
self.assertEqual(algo.num_positions, expected_position_counts)
self.assertEqual(
list(output['longs_count']),
[3, 3, 3, 2, 2, 1, 1],
)
# Check expected transactions.
# We should have a transaction of order_size shares per sid.
transactions = output['transactions']
# Note that the transactions appear on the first day rather than the
# second in minute mode, because the fills happen on the second tick of
# the backtest, which is still on the first day in minute mode.
initial_fills = transactions.iloc[0]
self.assertEqual(len(initial_fills), len(assets))
for asset, txn in zip(assets, initial_fills):
self.assertDictContainsSubset(
{
'amount': order_size,
'commission': None,
'dt': backtest_minutes[1],
'price': initial_fill_prices[asset],
'sid': asset,
},
txn,
)
# This will be a UUID.
self.assertIsInstance(txn['order_id'], str)
def transactions_for_date(date):
return transactions.iloc[self.test_days.get_loc(date)]
# We should have exactly one auto-close transaction on the close date
# of asset 0.
(first_auto_close_transaction,) = transactions_for_date(
assets[0].auto_close_date
)
self.assertEqual(
first_auto_close_transaction,
{
'amount': -order_size,
'commission': None,
'dt': algo.trading_calendar.session_close(
assets[0].auto_close_date,
),
'price': fp0,
'sid': assets[0],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
(second_auto_close_transaction,) = transactions_for_date(
assets[1].auto_close_date
)
self.assertEqual(
second_auto_close_transaction,
{
'amount': -order_size,
'commission': None,
'dt': algo.trading_calendar.session_close(
assets[1].auto_close_date,
),
'price': fp1,
'sid': assets[1],
'order_id': None, # Auto-close txns emit Nones for order_id.
},
)
class TestOrderAfterDelist(zf.WithMakeAlgo, zf.ZiplineTestCase):
start = pd.Timestamp('2016-01-05', tz='utc')
day_1 = pd.Timestamp('2016-01-06', tz='utc')
day_4 = pd.Timestamp('2016-01-11', tz='utc')
end = pd.Timestamp('2016-01-15', tz='utc')
# FIXME: Pass a benchmark source here.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.DataFrame.from_dict(
{
# Asset whose auto close date is after its end date.
1: {
'start_date': cls.start,
'end_date': cls.day_1,
'auto_close_date': cls.day_4,
'symbol': "ASSET1",
'exchange': "TEST",
},
# Asset whose auto close date is before its end date.
2: {
'start_date': cls.start,
'end_date': cls.day_4,
'auto_close_date': cls.day_1,
'symbol': 'ASSET2',
'exchange': 'TEST',
},
},
orient='index',
)
# XXX: This suite doesn't use the data in its DataPortal; it uses a
# FakeDataPortal with different mock data.
def init_instance_fixtures(self):
super(TestOrderAfterDelist, self).init_instance_fixtures()
self.data_portal = FakeDataPortal(self.asset_finder)
@parameterized.expand([
('auto_close_after_end_date', 1),
('auto_close_before_end_date', 2),
])
def test_order_in_quiet_period(self, name, sid):
asset = self.asset_finder.retrieve_asset(sid)
algo_code = dedent("""
from zipline.api import (
sid,
order,
order_value,
order_percent,
order_target,
order_target_percent,
order_target_value
)
def initialize(context):
pass
def handle_data(context, data):
order(sid({sid}), 1)
order_value(sid({sid}), 100)
order_percent(sid({sid}), 0.5)
order_target(sid({sid}), 50)
order_target_percent(sid({sid}), 0.5)
order_target_value(sid({sid}), 50)
""").format(sid=sid)
# run algo from 1/6 to 1/7
algo = self.make_algo(
script=algo_code,
sim_params=SimulationParameters(
start_session=pd.Timestamp("2016-01-06", tz='UTC'),
end_session= | pd.Timestamp("2016-01-07", tz='UTC') | pandas.Timestamp |
#!/usr/bin/env python
import os
import sys
import pandas as pd
import numpy as np
import time
from sqlalchemy import create_engine
from itertools import repeat
import multiprocessing
import tqdm
import genes
import eqtls
def find_snps(
inter_df,
gene_info_df,
tissues,
output_dir,
C,
genotypes_fp,
_eqtl_project_db,
covariates_dir,
expression_dir,
pval_threshold,
maf_threshold,
fdr_threshold,
num_processes,
_db,
logger,
suppress_intermediate_files=False
):
start_time = time.time()
global eqtl_project_db
eqtl_project_db = _eqtl_project_db
global db
db = _db
enzymes = inter_df['enzyme'].drop_duplicates().tolist()
hic_libs = genes.fetch_hic_libs(db)
hic_libs = hic_libs.rename(columns={'rep_count': 'cell_line_replicates'})
inter_df = inter_df.merge(
hic_libs, how='left',
left_on=['cell_line', 'enzyme'], right_on=['library', 'enzyme'])
default_chrom = ['chr' + str(i)
for i in list(range(1, 23))] + ['X', 'Y', 'M']
chrom_list = inter_df['fragment_chr'].drop_duplicates().tolist()
chrom_list = [i for i in default_chrom if i in chrom_list]
inter_df = inter_df[inter_df['fragment_chr'].isin(default_chrom)]
inter_df = inter_df.astype({'fragment': int})
gene_info_df = gene_info_df.rename(
columns={
'name': 'gene',
'chr': 'gene_chr',
'start': 'gene_start',
'end': 'gene_end',
'fragment': 'gene_fragment',
'id': 'gene_id'})
all_snps = []
all_genes = []
all_eqtls = []
logger.write('Finding SNPs within fragments interacting with genes in...')
for chrom in sorted(chrom_list):
chrom_dir = os.path.join(output_dir, chrom)
#if os.path.exists(os.path.join(chrom_dir, 'eqtls.txt')):
# logger.write(' Warning: {} already exists. Skipping.'.format(
# os.path.join(chrom_dir, 'eqtls.txt')))
# continue
logger.write(' Chromosome {}'.format(chrom))
snp_cols = ['snp', 'variant_id', 'chr',
'locus', 'id', 'fragment', 'enzyme']
chrom_df = inter_df[inter_df['fragment_chr'] == chrom]
chrom_df = chrom_df.astype({'fragment': int,
'fragment_chr': object})
enzymes = chrom_df['enzyme'].drop_duplicates().tolist()
snp_df = []
for enzyme in enzymes:
enzyme_df = chrom_df[chrom_df['enzyme'] == enzyme]
enzyme_df = enzyme_df.merge(
gene_info_df, how='inner',
left_on=['query_chr', 'query_fragment', 'enzyme'],
right_on=['chrom', 'gene_fragment', 'enzyme'])
fragment_df = enzyme_df[
['gencode_id', 'fragment_chr', 'fragment']].drop_duplicates()
enzyme_df = enzyme_df.sort_values(by=['fragment'])
chunksize = 20000
enzyme_chunks = [enzyme_df[i:i+chunksize]
for i in range(0, enzyme_df.shape[0], chunksize)]
manager = multiprocessing.Manager()
snps = manager.list()
desc = ' * Hi-C libraries restricted with {}'.format(
enzyme)
bar_format = '{desc}: {percentage:3.0f}% |{bar}| {n_fmt}/{total_fmt} {unit}'
'''
for df in tqdm.tqdm(enzyme_chunks, desc=desc, unit='batches',
ncols=80, bar_format=bar_format):
find_gene_snps(
df,
enzyme,
snps)
'''
with multiprocessing.Pool(processes=4) as pool:
for _ in tqdm.tqdm(
pool.istarmap(
find_gene_snps,
zip(enzyme_chunks,
repeat(enzyme),
repeat(snps))
),
total=len(enzyme_chunks), desc=desc, unit='batches',
ncols=80, bar_format=bar_format):
pass
for df in snps:
df['enzyme'] = enzyme
snp_df.append(df)
if len(snp_df) == 0:
continue
snp_df = pd.concat(snp_df)
logger.verbose = False
gene_df, snp_df = filter_snp_fragments(
snp_df, logger)
snp_df.sort_values(by=['variant_id'], inplace=True)
snp_list = snp_df['variant_id'].drop_duplicates().tolist()
batchsize = 2000
snp_batches = [snp_list[i:i + batchsize]
for i in range(0, len(snp_list), batchsize)]
chrom_eqtl_df = []
for batch_num, snp_batch in enumerate(snp_batches):
if len(snp_batches) > 1:
logger.verbose = True
logger.write(' Mapping eQTLs batch {} of {}'.format(
batch_num+1, len(snp_batches)))
logger.verbose = False
batch_gene_df = gene_df[gene_df['variant_id'].isin(snp_batch)]
eqtl_df = eqtls.map_eqtls(
batch_gene_df,
tissues,
output_dir,
C,
genotypes_fp,
num_processes,
eqtl_project_db,
covariates_dir,
expression_dir,
pval_threshold,
maf_threshold,
fdr_threshold,
logger)
if eqtl_df is None:
continue
chrom_eqtl_df.append(eqtl_df)
if len(chrom_eqtl_df) > 0:
chrom_eqtl_df = pd.concat(chrom_eqtl_df)
else:
chrom_eqtl_df = pd.DataFrame()
if not suppress_intermediate_files:
os.makedirs(chrom_dir, exist_ok=True)
snp_df.to_csv(os.path.join(chrom_dir, 'snps.txt'),
sep='\t', index=False)
gene_df.to_csv(os.path.join(chrom_dir, 'genes.txt'),
sep='\t', index=False)
chrom_eqtl_df.to_csv(os.path.join(chrom_dir, 'eqtls.txt'),
sep='\t', index=False)
all_eqtls.append(chrom_eqtl_df)
all_snps.append(snp_df)
all_genes.append(gene_df)
logger.verbose = True
if len(all_eqtls) == 0:
snp_df = | pd.DataFrame() | pandas.DataFrame |
# Author: <NAME>
import pandas as pd
import sys
def create_means_contralateral_average(means_input_file, contralateral_means_output_file):
means_df = | pd.read_csv(means_input_file) | pandas.read_csv |
"""
Tests whether ColumnPropagation works
"""
from inspect import cleandoc
import pandas
from pandas import DataFrame
from mlinspect._pipeline_inspector import PipelineInspector
from mlinspect.inspections import ColumnPropagation
def test_propagation_merge():
"""
Tests whether ColumnPropagation works for joins
"""
test_code = cleandoc("""
import pandas as pd
df_a = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c', 'cat_b'], 'B': [1, 2, 4, 5, 7]})
df_b = pd.DataFrame({'B': [1, 2, 3, 4, 5], 'C': [1, 5, 4, 11, None]})
df_merged = df_a.merge(df_b, on='B')
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_required_inspection(ColumnPropagation(["A"], 2)) \
.execute()
inspection_results = list(inspector_result.dag_node_to_inspection_results.values())
propagation_output = inspection_results[0][ColumnPropagation(["A"], 2)]
expected_df = DataFrame([['cat_a', 1, 'cat_a'], ['cat_b', 2, 'cat_b']], columns=['A', 'B', 'mlinspect_A'])
pandas.testing.assert_frame_equal(propagation_output.reset_index(drop=True), expected_df.reset_index(drop=True))
propagation_output = inspection_results[1][ColumnPropagation(["A"], 2)]
expected_df = DataFrame([[1, 1., None], [2, 5., None]], columns=['B', 'C', 'mlinspect_A'])
pandas.testing.assert_frame_equal(propagation_output.reset_index(drop=True), expected_df.reset_index(drop=True))
propagation_output = inspection_results[2][ColumnPropagation(["A"], 2)]
expected_df = DataFrame([['cat_a', 1, 1., 'cat_a'], ['cat_b', 2, 5., 'cat_b']],
columns=['A', 'B', 'C', 'mlinspect_A'])
pandas.testing.assert_frame_equal(propagation_output.reset_index(drop=True), expected_df.reset_index(drop=True))
def test_propagation_projection():
"""
Tests whether ColumnPropagation works for projections
"""
test_code = cleandoc("""
import pandas as pd
pandas_df = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c', 'cat_b'],
'B': [1, 2, 4, 5, 7], 'C': [2, 2, 10, 5, 7]})
pandas_df = pandas_df[['B', 'C']]
pandas_df = pandas_df[['C']]
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_required_inspection(ColumnPropagation(["A"], 2)) \
.execute()
inspection_results = list(inspector_result.dag_node_to_inspection_results.values())
propagation_output = inspection_results[0][ColumnPropagation(["A"], 2)]
expected_df = | DataFrame([['cat_a', 1, 2, 'cat_a'], ['cat_b', 2, 2, 'cat_b']], columns=['A', 'B', 'C', 'mlinspect_A']) | pandas.DataFrame |
import pandas as pd
from functools import reduce
from pathlib import Path
def merge_benefits(cps, year, data_path, export=True):
"""
Merge the benefit variables onto the CPS files. TaxData use the
following variables imputed by C-TAM:
Medicaid: MedicaidX
Medicare: MedicareX
Veterans Benefits: vb_impute
SNAP: SNAP_Imputation
SSI: ssi_impute
Social Security: ss_val (renamed to ss_impute)
Housing assistance: housing_impute
TANF: tanf_impute
Unemployment Insurance: UI_impute
WIC: (women, children, infants): wic_impute (renamed wic_women,
wic_children, and wic_infants)
"""
def read_ben(path_prefix, usecols):
path = Path(data_path, path_prefix + str(year) + ".csv")
return pd.read_csv(path, usecols=usecols)
start_len = len(cps)
# read in benefit imputations
mcaid = read_ben("medicaid", ["MedicaidX", "peridnum"])
mcare = read_ben("medicare", ["MedicareX", "peridnum"])
vb = read_ben("VB_Imputation", ["vb_impute", "peridnum"])
snap = read_ben("SNAP_Imputation_", ["h_seq", "snap_impute"])
ssi = read_ben("SSI_Imputation", ["ssi_impute", "peridnum"])
ss = read_ben("SS_augmentation_", ["ss_val", "peridnum"]).rename(
columns={"ss_val": "ss_impute"}
)
housing = read_ben(
"Housing_Imputation_logreg_", ["fh_seq", "ffpos", "housing_impute"]
)
tanf = read_ben("TANF_Imputation_", ["peridnum", "tanf_impute"])
# drop duplicated people in tanf
tanf.drop_duplicates("peridnum", inplace=True)
ui = read_ben("UI_imputation_logreg_", ["peridnum", "UI_impute"])
WIC_STR = "WIC_imputation_{}_logreg_"
wic_children = read_ben(
WIC_STR.format("children"), ["peridnum", "WIC_impute"]
).rename(columns={"WIC_impute": "wic_children"})
wic_infants = read_ben(
WIC_STR.format("infants"), ["peridnum", "WIC_impute"]
).rename(columns={"WIC_impute": "wic_infants"})
wic_women = read_ben(WIC_STR.format("women"), ["peridnum", "WIC_impute"]).rename(
columns={"WIC_impute": "wic_women"}
)
# combine all WIC imputation into one variable
wic = reduce(
lambda left, right: pd.merge(left, right, on="peridnum"),
[wic_children, wic_infants, wic_women],
)
wic["wic_impute"] = wic[["wic_women", "wic_infants", "wic_children"]].sum(axis=1)
# merge housing and snap
cps_merged = cps.merge(housing, on=["fh_seq", "ffpos"], how="left")
cps_merged = cps_merged.merge(snap, on="h_seq", how="left")
# merge other variables
peridnum_dfs = [cps_merged, vb, ssi, ss, tanf, ui, wic, mcaid, mcare]
cps_merged = reduce(
lambda left, right: | pd.merge(left, right, on="peridnum", how="left") | pandas.merge |
import pandas as pd
from skimage.measure import regionprops
from .compute_fsd_features import compute_fsd_features
from .compute_gradient_features import compute_gradient_features
from .compute_haralick_features import compute_haralick_features
from .compute_intensity_features import compute_intensity_features
from .compute_morphometry_features import compute_morphometry_features
from ..segmentation import label as htk_label
def compute_nuclei_features(im_label, im_nuclei, im_cytoplasm=None,
fsd_bnd_pts=128, fsd_freq_bins=6, cyto_width=8,
num_glcm_levels=32,
morphometry_features_flag=True,
fsd_features_flag=True,
intensity_features_flag=True,
gradient_features_flag=True,
haralick_features_flag=True
):
"""
Calculates features for nuclei classification
Parameters
----------
im_label : array_like
A labeled mask image wherein intensity of a pixel is the ID of the
object it belongs to. Non-zero values are considered to be foreground
objects.
im_nuclei : array_like
Nucleus channel intensity image.
im_cytoplasm : array_like
Cytoplasm channel intensity image.
fsd_bnd_pts : int, optional
Number of points for boundary resampling to calculate fourier
descriptors. Default value = 128.
fsd_freq_bins : int, optional
Number of frequency bins for calculating FSDs. Default value = 6.
cyto_width : float, optional
Estimated width of the ring-like neighborhood region around each
nucleus to be considered as its cytoplasm. Default value = 8.
num_glcm_levels: int, optional
An integer specifying the number of gray levels For example, if
`NumLevels` is 32, the intensity values of the input image are
scaled so they are integers between 0 and 31. The number of gray
levels determines the size of the gray-level co-occurrence matrix.
Default: 32
morphometry_features_flag : bool, optional
A flag that can be used to specify whether or not to compute
morphometry (size and shape) features.
See histomicstk.features.compute_morphometry_features for more details.
fsd_features_flag : bool, optional
A flag that can be used to specify whether or not to compute
Fouried shape descriptor (FSD) features.
See `histomicstk.features.compute_fsd_features` for more details.
intensity_features_flag : bool, optional
A flag that can be used to specify whether or not to compute
intensity features from the nucleus and cytoplasm channels.
See `histomicstk.features.compute_fsd_features` for more details.
gradient_features_flag : bool, optional
A flag that can be used to specify whether or not to compute
gradient/edge features from intensity and cytoplasm channels.
See `histomicstk.features.compute_gradient_features` for more details.
haralick_features_flag : bool, optional
A flag that can be used to specify whether or not to compute
haralick features from intensity and cytoplasm channels.
See `histomicstk.features.compute_haralick_features` for more details.
Returns
-------
fdata : pandas.DataFrame
A pandas data frame containing the features listed below for each
object/label
Notes
-----
List of features computed by this function
Morphometry (size and shape) features of the nuclei
See histomicstk.features.compute_morphometry_features for more details.
Feature names prefixed by *Size.* or *Shape.*.
Fourier shape descriptor features
See `histomicstk.features.compute_fsd_features` for more details.
Feature names are prefixed by *FSD*.
Intensity features for the nucleus and cytoplasm channels
See `histomicstk.features.compute_fsd_features` for more details.
Feature names are prefixed by *Nucleus.Intensity.* for nucleus features
and *Cytoplasm.Intensity.* for cytoplasm features.
Gradient/edge features for the nucleus and cytoplasm channels
See `histomicstk.features.compute_gradient_features` for more details.
Feature names are prefixed by *Nucleus.Gradient.* for nucleus features
and *Cytoplasm.Gradient.* for cytoplasm features.
Haralick features for the nucleus and cytoplasm channels
See `histomicstk.features.compute_haralick_features` for more details.
Feature names are prefixed by *Nucleus.Haralick.* for nucleus features
and *Cytoplasm.Haralick.* for cytoplasm features.
See Also
--------
histomicstk.features.compute_morphometry_features,
histomicstk.features.compute_fsd_features,
histomicstk.features.compute_intensity_features,
histomicstk.features.compute_gradient_features,
histomicstk.features.compute_haralick_features
"""
feature_list = []
# get the number of objects in im_label
nuclei_props = regionprops(im_label)
# compute cytoplasm mask
if im_cytoplasm is not None:
cyto_mask = htk_label.dilate_xor(im_label, neigh_width=cyto_width)
cytoplasm_props = regionprops(cyto_mask)
# compute morphometry features
if morphometry_features_flag:
fmorph = compute_morphometry_features(im_label, rprops=nuclei_props)
feature_list.append(fmorph)
# compute FSD features
if fsd_features_flag:
ffsd = compute_fsd_features(im_label, fsd_bnd_pts, fsd_freq_bins,
cyto_width, rprops=nuclei_props)
feature_list.append(ffsd)
# compute nuclei intensity features
if intensity_features_flag:
fint_nuclei = compute_intensity_features(im_label, im_nuclei,
rprops=nuclei_props)
fint_nuclei.columns = ['Nucleus.' + col
for col in fint_nuclei.columns]
feature_list.append(fint_nuclei)
# compute cytoplasm intensity features
if intensity_features_flag and im_cytoplasm is not None:
fint_cytoplasm = compute_intensity_features(cyto_mask, im_cytoplasm,
rprops=cytoplasm_props)
fint_cytoplasm.columns = ['Cytoplasm.' + col
for col in fint_cytoplasm.columns]
feature_list.append(fint_cytoplasm)
# compute nuclei gradient features
if gradient_features_flag:
fgrad_nuclei = compute_gradient_features(im_label, im_nuclei,
rprops=nuclei_props)
fgrad_nuclei.columns = ['Nucleus.' + col
for col in fgrad_nuclei.columns]
feature_list.append(fgrad_nuclei)
# compute cytoplasm gradient features
if gradient_features_flag and im_cytoplasm is not None:
fgrad_cytoplasm = compute_gradient_features(cyto_mask, im_cytoplasm,
rprops=cytoplasm_props)
fgrad_cytoplasm.columns = ['Cytoplasm.' + col
for col in fgrad_cytoplasm.columns]
feature_list.append(fgrad_cytoplasm)
# compute nuclei haralick features
if haralick_features_flag:
fharalick_nuclei = compute_haralick_features(
im_label, im_nuclei,
num_levels=num_glcm_levels,
rprops=nuclei_props
)
fharalick_nuclei.columns = ['Nucleus.' + col
for col in fharalick_nuclei.columns]
feature_list.append(fharalick_nuclei)
# compute cytoplasm haralick features
if haralick_features_flag and im_cytoplasm is not None:
fharalick_cytoplasm = compute_haralick_features(
cyto_mask, im_cytoplasm,
num_levels=num_glcm_levels,
rprops=cytoplasm_props
)
fharalick_cytoplasm.columns = ['Cytoplasm.' + col
for col in fharalick_cytoplasm.columns]
feature_list.append(fharalick_cytoplasm)
# Merge all features
fdata = | pd.concat(feature_list, axis=1) | pandas.concat |
### mkwc_util.py : Contains utilities for extracting and processing data from the MKWC website
### Author : <NAME>
### Date : 6/1/2021
import os
import numpy as np
import pandas as pd
from . import times
### NOTE: Seeing data is STORED by UT, with data in HST
### CFHT data is STORED by HST, with data in HST
mkwc_url = 'http://mkwc.ifa.hawaii.edu/'
year_url = mkwc_url+'archive/wx/cfht/cfht-wx.{}.dat'
# Cutoff for daily-vs-yearly CFHT files on MKWC website
cfht_cutoff = 55927.41666667 # 01/01/2012 12:00 am HST
# Time columns from MKWC data
time_cols = ['year', 'month', 'day', 'hour', 'minute', 'second']
# Data-specific fields
data_types = {
'cfht': {
'web_pat': mkwc_url+'archive/wx/cfht/indiv-days/cfht-wx.{}.dat',
'file_pat': "{}cfht-wx.{}.dat",
'data_cols': ['wind_speed', 'wind_direction', 'temperature',
'relative_humidity', 'pressure'],
},
'mass': {
'web_pat': mkwc_url+'current/seeing/mass/{}.mass.dat',
'file_pat': '{}mass/{}.mass.dat',
'data_cols': ['mass'],
},
'dimm': {
'web_pat': mkwc_url+'current/seeing/dimm/{}.dimm.dat',
'file_pat': '{}dimm/{}.dimm.dat',
'data_cols': ['dimm'],
},
'masspro': {
'web_pat': mkwc_url+'current/seeing/masspro/{}.masspro.dat',
'file_pat': '{}masspro/{}.masspro.dat',
'data_cols': ['masspro_half', 'masspro_1', 'masspro_2',
'masspro_4', 'masspro_8', 'masspro_16', 'masspro'],
},
}
# Mix and match data & time columns
for dtype in data_types:
# CFHT files don't have seconds
tcols = time_cols if dtype != 'cfht' else time_cols[:-1]
# Format web columns
data_types[dtype]['web_cols'] = tcols+data_types[dtype]['data_cols']
# Format file columns
data_types[dtype]['cols'] = [dtype+'_mjd']+data_types[dtype]['data_cols']
# Different file storage timezones
data_types[dtype]['file_zone'] = 'hst' if dtype=='cfht' else 'utc'
#############################
######### Functions #########
#############################
def cfht_from_year(datestrings, year):
"""
Gets pre-2012 MKWC data from the year-long file instead of the by-date files.
datestrings: dates to pull data from, as {yyyymmdd} strings
year: the year to pull datestring data from
returns: dataframe with cfht data from requested dates
"""
# Get year-long file URL
url = year_url.format(year)
# Read in data
try:
web_cols = data_types['cfht']['web_cols']
year_data = pd.read_csv(url, delim_whitespace=True, header=None,
names=web_cols, usecols=range(len(web_cols)))
except: # No data, return blank
return pd.DataFrame(columns=data_types['cfht']['cols'])
# Full dataset
all_data = [pd.DataFrame(columns=data_types['cfht']['cols'])]
# Slice up dataframe
for ds in datestrings:
month, day = int(ds[4:6]), int(ds[6:])
# Get data by month and day
df = year_data.loc[(year_data.month==month) & (year_data.day==day)].copy()
# Format columns
if not df.empty:
format_columns(df, 'cfht')
# Add to full dataset
all_data.append(df)
return | pd.concat(all_data) | pandas.concat |
# Generate max, mean, and std from computed feature value comparison
from __future__ import print_function
import csv
import pandas as pd
# input_file = 'output.csv'
# output_file = 'validation.csv'
input_file = 'output1.csv'
output_file = 'validation1.csv'
df1 = | pd.read_csv(input_file) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
#导入数据集
def read_data(base_info_path,
annual_report_info_path,
tax_info_path,
change_info_path,
news_info_path,
other_info_path,
entprise_info_path,
):
base_info = pd.read_csv(base_info_path) # 企业基本信息
annual_report_info = pd.read_csv(annual_report_info_path)
tax_info = pd.read_csv(annual_report_info_path)
change_info = pd.read_csv(change_info_path)
news_info = pd.read_csv(news_info_path)
other_info = pd.read_csv(other_info_path)
entprise_info = | pd.read_csv(entprise_info_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 18 11:31:56 2021
@author: nguy0936
I increased the number of layers from conv1 to embedding to see if more layers
could result in better performance. I did this for only set 1 - Hallett
"""
# load packages
import pandas as pd
import umap
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost import XGBClassifier
from xgboost import cv
from sklearn.metrics import roc_auc_score
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve
##========== load low-high level features
mdir1 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set1\\'
mdir2 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set2\\'
def load_feature(mdir): # function to load data
conv1 = pd.read_csv(mdir + 'result_conv1.csv', header=None) # conv1
conv2 = pd.read_csv(mdir + 'result_conv2.csv', header=None) # conv2
conv3 = pd.read_csv(mdir + 'result_conv3.csv', header=None) # conv3
conv4 = pd.read_csv(mdir + 'result_conv4.csv', header=None) # conv4
embedding = pd.read_csv(mdir + 'result_embedding.csv', header=None) # embedding
X_hand = pd.read_csv(mdir + 'X_hand.csv') # bias features
X_hand = X_hand.fillna(0)
Y = pd.read_csv(mdir + 'Y.csv', header=None) # score
y = Y
y[:]=np.where(y<3,0,1)
# combine data
lowd_conv1 = PCA(n_components=10).fit_transform(conv1)
lowd_conv2 = PCA(n_components=10).fit_transform(conv2)
lowd_embedding = PCA(n_components=20).fit_transform(embedding)
lowd_frames = [pd.DataFrame(lowd_conv1), pd.DataFrame(lowd_conv2), pd.DataFrame(lowd_embedding)]
lowd_df = | pd.concat(lowd_frames, axis=1) | pandas.concat |
# Import python modules
import os, sys
# data handling libraries
import pandas as pd
import numpy as np
import pickle
import json
import dask
from multiprocessing import Pool
# graphical control libraries
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# shape and layer libraries
import fiona
from shapely.geometry import MultiPolygon, shape, point, box
from descartes import PolygonPatch
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import geopandas as gpd
# data wrangling libraries
import ftplib, urllib, wget, bz2
from bs4 import BeautifulSoup as bs
class ogh_meta:
"""
The json file that describes the Gridded climate data products
"""
def __init__(self):
self.__meta_data = dict(json.load(open('ogh_meta.json','rb')))
# key-value retrieval
def __getitem__(self, key):
return(self.__meta_data[key])
# key list
def keys(self):
return(self.__meta_data.keys())
# value list
def values(self):
return(self.__meta_data.values())
# print('Version '+datetime.fromtimestamp(os.path.getmtime('ogh.py')).strftime('%Y-%m-%d %H:%M:%S')+' jp')
def saveDictOfDf(outfilename, dictionaryObject):
# write a dictionary of dataframes to a json file using pickle
with open(outfilename, 'wb') as f:
pickle.dump(dictionaryObject, f)
f.close()
def readDictOfDf(infilename):
# read a dictionary of dataframes from a json file using pickle
with open(infilename, 'rb') as f:
dictionaryObject = pickle.load(f)
f.close()
return(dictionaryObject)
def reprojShapefile(sourcepath, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None):
"""
sourcepath: (dir) the path to the .shp file
newprojdictionary: (dict) the new projection definition in the form of a dictionary (default provided)
outpath: (dir) the output path for the new shapefile
"""
# if outpath is none, treat the reprojection as a file replacement
if isinstance(outpath, type(None)):
outpath = sourcepath
shpfile = gpd.GeoDataFrame.from_file(sourcepath)
shpfile = shpfile.to_crs(newprojdictionary)
shpfile.to_file(outpath)
def getFullShape(shapefile):
"""
Generate a MultiPolygon to represent each shape/polygon within the shapefile
shapefile: (dir) a path to the ESRI .shp shapefile
"""
shp = fiona.open(shapefile)
mp = [shape(pol['geometry']) for pol in shp]
mp = MultiPolygon(mp)
shp.close()
return(mp)
def getShapeBbox(polygon):
"""
Generate a geometric box to represent the bounding box for the polygon, shapefile connection, or MultiPolygon
polygon: (geometry) a geometric polygon, MultiPolygon, or shapefile connection
"""
# identify the cardinal bounds
minx, miny, maxx, maxy = polygon.bounds
bbox = box(minx, miny, maxx, maxy, ccw=True)
return(bbox)
def readShapefileTable(shapefile):
"""
read in the datatable captured within the shapefile properties
shapefile: (dir) a path to the ESRI .shp shapefile
"""
#cent_df = gpd.read_file(shapefile)
shp = fiona.open(shapefile)
centroid = [eachpol['properties'] for eachpol in shp]
cent_df = pd.DataFrame.from_dict(centroid, orient='columns')
shp.close()
return(cent_df)
def filterPointsinShape(shape, points_lat, points_lon, points_elev=None, buffer_distance=0.06, buffer_resolution=16,
labels=['LAT', 'LONG_', 'ELEV']):
"""
filter for datafiles that can be used
shape: (geometry) a geometric polygon or MultiPolygon
points_lat: (series) a series of latitude points in WGS84 projection
points_lon: (series) a series of longitude points in WGS84 projection
points_elev: (series) a series of elevation points in meters; optional - default is None
buffer_distance: (float64) a numerical multiplier to increase the geodetic boundary area
buffer_resolution: (float64) the increments between geodetic longlat degrees
labels: (list) a list of preferred labels for latitude, longitude, and elevation
"""
# add buffer region
region = shape.buffer(buffer_distance, resolution=buffer_resolution)
# construct points_elev if null
if isinstance(points_elev, type(None)):
points_elev=np.repeat(np.nan, len(points_lon))
# Intersection each coordinate with the region
limited_list = []
for lon, lat, elev in zip(points_lon, points_lat, points_elev):
gpoint = point.Point(lon, lat)
if gpoint.intersects(region):
limited_list.append([lat, lon, elev])
maptable = pd.DataFrame.from_records(limited_list, columns=labels)
## dask approach ##
#intersection=[]
#for lon, lat, elev in zip(points_lon, points_lat, points_elev):
# gpoint = point.Point(lon, lat)
# intersection.append(dask.delayed(gpoint.intersects(region)))
# limited_list.append([intersection, lat, lon, elev])
# convert to dataframe
#maptable = pd.DataFrame({labels[0]:points_lat, labels[1]:points_lon, labels[2]:points_elev}
# .loc[dask.compute(intersection)[0],:]
# .reset_index(drop=True)
return(maptable)
def scrapeurl(url, startswith=None, hasKeyword=None):
"""
scrape the gridded datafiles from a url of interest
url: (str) the web folder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
hasKeyword: (str) keywords represented in a webpage element; default is None
"""
# grab the html of the url, and prettify the html structure
page = urllib2.urlopen(url).read()
page_soup = bs(page, 'lxml')
page_soup.prettify()
# loop through and filter the hyperlinked lines
if pd.isnull(startswith):
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if hasKeyword in anchor['href']]
else:
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if anchor['href'].startswith(startswith)]
# convert to dataframe then separate the lon and lat as float coordinate values
temp = pd.DataFrame(temp, columns = ['filenames'])
return(temp)
def treatgeoself(shapefile, NAmer, folder_path=os.getcwd(), outfilename='mappingfile.csv', buffer_distance=0.06):
"""
TreatGeoSelf to some [data] lovin'!
shapefile: (dir) the path to an ESRI shapefile for the region of interest
Namer: (dir) the path to an ESRI shapefile, which has each 1/16th coordinate and elevation information from a DEM
folder_path: (dir) the destination folder path for the mappingfile output; default is the current working directory
outfilename: (str) the name of the output file; default name is 'mappingfile.csv'
buffer_distance: (float64) the multiplier to be applied for increasing the geodetic boundary area; default is 0.06
"""
# conform projections to longlat values in WGS84
reprojShapefile(shapefile, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None)
# read shapefile into a multipolygon shape-object
shape_mp = getFullShape(shapefile)
# read in the North American continental DEM points for the station elevations
NAmer_datapoints = readShapefileTable(NAmer).rename(columns={'Lat':'LAT','Long':'LONG_','Elev':'ELEV'})
# generate maptable
maptable = filterPointsinShape(shape_mp,
points_lat=NAmer_datapoints.LAT,
points_lon=NAmer_datapoints.LONG_,
points_elev=NAmer_datapoints.ELEV,
buffer_distance=buffer_distance, buffer_resolution=16, labels=['LAT', 'LONG_', 'ELEV'])
maptable.reset_index(inplace=True)
maptable = maptable.rename(columns={"index":"FID"})
print(maptable.shape)
print(maptable.tail())
# print the mappingfile
mappingfile=os.path.join(folder_path, outfilename)
maptable.to_csv(mappingfile, sep=',', header=True, index=False)
return(mappingfile)
def mapContentFolder(resid):
"""
map the content folder within HydroShare
resid: (str) a string hash that represents the hydroshare resource that has been migrated
"""
path = os.path.join('/home/jovyan/work/notebooks/data', str(resid), str(resid), 'data/contents')
return(path)
# ### CIG (DHSVM)-oriented functions
def compile_bc_Livneh2013_locations(maptable):
"""
compile a list of file URLs for bias corrected Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/Livneh/bcLivneh_WWA_2013/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_Livneh2013_locations(maptable):
"""
compile a list of file URLs for Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://www.cses.washington.edu/rocinante/Livneh/Livneh_WWA_2013/forcs_dhsvm/',basename]
locations.append(''.join(url))
return(locations)
### VIC-oriented functions
def compile_VICASCII_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 VIC.ASCII outputs
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Fluxes_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/VIC.ASCII/latitude.",str(row['LAT']),'/',loci,'.bz2']
locations.append(''.join(url))
return(locations)
def compile_VICASCII_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 VIC.ASCII outputs for the USA
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2/',
startswith='fluxes')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['VIC_fluxes_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
### Climate (Meteorological observations)-oriented functions
def canadabox_bc():
"""
Establish the Canadian (north of the US bounding boxes) Columbia river basin bounding box
"""
# left, bottom, right top
return(box(-138.0, 49.0, -114.0, 53.0))
def scrape_domain(domain, subdomain, startswith=None):
"""
scrape the gridded datafiles from a url of interest
domain: (str) the web folder path
subdomain: (str) the subfolder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
"""
# connect to domain
ftp = ftplib.FTP(domain)
ftp.login()
ftp.cwd(subdomain)
# scrape for data directories
tmp = [dirname for dirname in ftp.nlst() if dirname.startswith(startswith)]
geodf = pd.DataFrame(tmp, columns=['dirname'])
# conform to bounding box format
tmp = geodf['dirname'].apply(lambda x: x.split('.')[1:])
tmp = tmp.apply(lambda x: list(map(float,x)) if len(x)>2 else x)
# assemble the boxes
geodf['bbox']=tmp.apply(lambda x: box(x[0]*-1, x[2]-1, x[1]*-1, x[3]) if len(x)>2 else canadabox_bc())
return(geodf)
def mapToBlock(df_points, df_regions):
for index, eachblock in df_regions.iterrows():
for ind, row in df_points.iterrows():
if point.Point(row['LONG_'], row['LAT']).intersects(eachblock['bbox']):
df_points.loc[ind, 'blocks'] = str(eachblock['dirname'])
return(df_points)
def compile_dailyMET_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2/',
startswith='data')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
def compile_dailyMET_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/ascii/daily/latitude.", str(row['LAT']),"/",loci,".bz2"]
locations.append(''.join(url))
return(locations)
# ### WRF-oriented functions
def compile_wrfnnrp_raw_Salathe2014_locations(maptable):
"""
compile a list of file URLs for Salathe et al., 2014 raw WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/raw/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_wrfnnrp_bc_Salathe2014_locations(maptable):
"""
compile a list of file URLs for the Salathe et al., 2014 bias corrected WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/bc/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
# ## Data file migration functions
def ensure_dir(f):
"""
check if the destination folder directory exists; if not, create it and set it as the working directory
f: (dir) the directory to create and/or set as working directory
"""
if not os.path.exists(f):
os.makedirs(f)
os.chdir(f)
def wget_download(listofinterest):
"""
Download files from an http domain
listofinterest: (list) a list of urls to request
"""
# check and download each location point, if it doesn't already exist in the download directory
for fileurl in listofinterest:
basename = os.path.basename(fileurl)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
# Download the files to the subdirectory
def wget_download_one(fileurl):
"""
Download a file from an http domain
fileurl: (url) a url to request
"""
# check and download each location point, if it doesn't already exist in the download directory
basename=os.path.basename(fileurl)
# if it exists, remove for new download (overwrite mode)
if os.path.isfile(basename):
os.remove(basename)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
def wget_download_p(listofinterest, nworkers=20):
"""
Download files from an http domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 10
"""
pool = Pool(int(nworkers))
pool.map(wget_download_one, listofinterest)
pool.close()
pool.terminate()
def ftp_download(listofinterest):
"""
Download and decompress files from an ftp domain
listofinterest: (list) a list of urls to request
"""
for loci in listofinterest:
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_one(loci):
"""
Download and decompress a file from an ftp domain
loci: (url) a url to request
"""
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_p(listofinterest, nworkers=5):
"""
Download and decompress files from an ftp domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 5
"""
pool = Pool(int(nworkers))
pool.map(ftp_download_one, listofinterest)
pool.close()
pool.terminate()
def decompbz2(filename):
"""
Extract a file from a bz2 file of the same name, then remove the bz2 file
filename: (dir) the file path for a bz2 compressed file
"""
with open(filename.split(".bz2",1)[0], 'wb') as new_file, open(filename, 'rb') as zipfile:
decompressor = bz2.BZ2Decompressor()
for data in iter(lambda : zipfile.read(100 * 1024), b''):
new_file.write(decompressor.decompress(data))
os.remove(filename)
zipfile.close()
new_file.close()
print(os.path.splitext(filename)[0] + ' unzipped')
def catalogfiles(folderpath):
"""
make a catalog of the gridded files within a folderpath
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
"""
# read in downloaded files
temp = [eachfile for eachfile in os.listdir(folderpath) if not os.path.isdir(eachfile)]
if len(temp)==0:
# no files were available; setting default catalog output structure
catalog = pd.DataFrame([], columns=['filenames','LAT','LONG_'])
else:
# create the catalog dataframe and extract the filename components
catalog = pd.DataFrame(temp, columns=['filenames'])
catalog[['LAT','LONG_']] = catalog['filenames'].apply(lambda x: pd.Series(str(x).rsplit('_',2))[1:3]).astype(float)
# convert the filenames column to a filepath
catalog['filenames'] = catalog['filenames'].apply(lambda x: os.path.join(folderpath, x))
return(catalog)
def addCatalogToMap(outfilepath, maptable, folderpath, catalog_label):
"""
Update the mappingfile with a new column, a vector of filepaths for the downloaded files
outfilepath: (dir) the path for the output file
maptable: (dataframe) a dataframe containing the FID, LAT, LONG_, and ELEV information
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# assert catalog_label as a string-object
catalog_label = str(catalog_label)
# catalog the folder directory
catalog = catalogfiles(folderpath).rename(columns={'filenames':catalog_label})
# drop existing column
if catalog_label in maptable.columns:
maptable = maptable.drop(labels=catalog_label, axis=1)
# update with a vector for the catalog of files
maptable = maptable.merge(catalog, on=['LAT','LONG_'], how='left')
# remove blocks, if they were needed
if 'blocks' in maptable.columns:
maptable = maptable.drop(labels=['blocks'], axis=1)
# write the updated mappingfile
maptable.to_csv(outfilepath, header=True, index=False)
# Wrapper scripts
def getDailyMET_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/raw', catalog_label='dailymet_livneh2013'):
"""
Get the Livneh el al., 2013 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate DailyMET livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_MET_1950_2013/raw', catalog_label='dailymet_livneh2015'):
"""
Get the Livneh el al., 2015 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily MET livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_bcLivneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/bc', catalog_label='dailymet_bclivneh2013'):
"""
Get the Livneh el al., 2013 bias corrected Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate baseline_corrected livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_bc_Livneh2013_locations(maptable)
# download the files
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_VIC_1915_2011', catalog_label='dailyvic_livneh2013'):
"""
Get the Livneh el al., 2013 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# FIRST RUN
# check and generate VIC_ASCII Flux model livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points for USA
locations = compile_VICASCII_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_VIC_1950_2013', catalog_label='dailyvic_livneh2015'):
"""
Get the Livneh el al., 2015 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily VIC.ASCII Flux model livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_VICASCII_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_salathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/raw', catalog_label='dailywrf_salathe2014'):
"""
Get the Salathe el al., 2014 raw Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology raw WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_raw_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_bcsalathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/bc', catalog_label='dailywrf_bcsalathe2014'):
"""
Get the Salathe el al., 2014 bias corrected Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology bias corrected WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_bc_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
# # Data Processing libraries
def filesWithPath(folderpath):
"""
Create a list of filepaths for the files
folderpath: (dir) the folder of interest
"""
files =[os.path.join(folderpath, eachfile) for eachfile in os.listdir(folderpath)
if not eachfile.startswith('.') and not os.path.isdir(eachfile)] # exclude hidden files
return(files)
def compareonvar(map_df, colvar='all'):
"""
subsetting a dataframe based on some columns of interest
map_df: (dataframe) the dataframe of the mappingfile table
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# apply row-wise inclusion based on a subset of columns
if isinstance(colvar, type(None)):
return(map_df)
if colvar is 'all':
# compare on all columns except the station info
return(map_df.dropna())
else:
# compare on only the listed columns
return(map_df.dropna(subset=colvar))
def mappingfileToDF(mappingfile, colvar='all'):
"""
read in a dataframe and subset based on columns of interest
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# Read in the mappingfile as a data frame
map_df = pd.read_csv(mappingfile)
# select rows (datafiles) based on the colvar(s) chosen, default is
map_df = compareonvar(map_df=map_df, colvar=colvar)
# compile summaries
print(map_df.head())
print('Number of gridded data files:'+ str(len(map_df)))
print('Minimum elevation: ' + str(np.min(map_df.ELEV))+ 'm')
print('Mean elevation: '+ str(np.mean(map_df.ELEV))+ 'm')
print('Maximum elevation: '+ str(np.max(map_df.ELEV))+ 'm')
return(map_df, len(map_df))
def read_in_all_files(map_df, dataset, metadata, file_start_date, file_end_date, file_time_step, file_colnames, file_delimiter, subset_start_date, subset_end_date):
"""
Read in files based on dataset label
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
dataset: (str) the name of the dataset catalogged into map_df
metadata (str) the dictionary that contains the metadata explanations; default is None
file_colnames: (list) the list of shorthand variables; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# extract metadata if the information are not provided
if pd.notnull(metadata):
if isinstance(file_start_date, type(None)):
file_start_date = metadata[dataset]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[dataset]['end_date']
if isinstance(file_time_step, type(None)):
file_time_step = metadata[dataset]['temporal_resolution']
if isinstance(file_colnames, type(None)):
file_colnames = metadata[dataset]['variable_list']
if isinstance(file_delimiter, type(None)):
file_delimiter = metadata[dataset]['delimiter']
#initialize dictionary and time sequence
df_dict=dict()
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step) # daily
# import data for all climate stations
for ind, row in map_df.iterrows():
tmp = pd.read_table(row[dataset], header=None, delimiter=file_delimiter, names=file_colnames)
tmp.set_index(met_daily_dates, inplace=True)
# subset to the date range of interest (default is file date range)
tmp = tmp.iloc[(met_daily_dates>=subset_start_date) & (met_daily_dates<=subset_end_date),:]
# set row indices
df_dict[tuple(row[['FID','LAT','LONG_']].tolist())] = tmp
return(df_dict)
def read_files_to_vardf(map_df, df_dict, gridclimname, dataset, metadata,
file_start_date, file_end_date, file_delimiter, file_time_step, file_colnames,
subset_start_date, subset_end_date, min_elev, max_elev):
"""
# reads in the files to generate variables dataframes
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
df_dict: (dict) an existing dictionary where new computations will be stored
gridclimname: (str) the suffix for the dataset to be named; if None is provided, default to the dataset name
dataset: (str) the name of the dataset catalogged into map_df
metadata: (str) the dictionary that contains the metadata explanations; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_delimiter: (str) a file parsing character to be used for file reading
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# start time
starttime = pd.datetime.now()
# date range from ogh_meta file
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step)
met_daily_subdates=pd.date_range(subset_start_date, subset_end_date, freq=file_time_step)
# omit null entries or missing data file
map_df = map_df.loc[pd.notnull(map_df[dataset]),:]
print('Number of data files within elevation range ('+str(min_elev)+':'+str(max_elev)+'): '+str(len(map_df)))
# iterate through each data file
for eachvar in metadata[dataset]['variable_list']:
# identify the variable column index
usecols = [metadata[dataset]['variable_list'].index(eachvar)]
# initiate df as a list
df_list=[]
# loop through each file
for ind, row in map_df.iterrows():
# consider rewriting the params to just select one column by index at a time
var_series = dask.delayed(pd.read_table)(filepath_or_buffer=row[dataset],
delimiter=file_delimiter,header=None,usecols=usecols,
names=[tuple(row[['FID','LAT','LONG_']])])
# append the series into the list of series
df_list.append(var_series)
# concatenate list of series (axis=1 is column-wise) into a dataframe
df1 = dask.delayed(pd.concat)(df_list, axis=1)
# set and subset date_range index
df2 = df1.set_index(met_daily_dates, inplace=False).loc[met_daily_subdates]
# end of variable table
print(eachvar+ ' dataframe reading to start: ' + str(pd.datetime.now()-starttime))
# assign dataframe to dictionary object
df_dict['_'.join([eachvar, gridclimname])] = dask.compute(df2)[0]
print(eachvar+ ' dataframe complete:' + str(pd.datetime.now()-starttime))
return(df_dict)
def read_daily_streamflow(file_name, drainage_area_m2, file_colnames=None, delimiter='\t', header='infer'):
# read in a daily streamflow data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, delimiter=delimiter, header=header)
# set columns, if header=None
if file_colnames is not None:
daily_data.columns=file_colnames
else:
file_colnames=list(daily_data.columns)
# calculate cfs to cms conversion, or vice versa
if 'flow_cfs' in daily_data.columns:
flow_cfs=daily_data['flow_cfs']
flow_cms=flow_cfs/(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
elif 'flow_cms' in daily_data.columns:
flow_cms=daily_data['flow_cms']
flow_cfs=flow_cms*(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
# determine the datetime
date_index=[file_colnames.index(each) for each in ['year','month','day']]
row_dates=pd.to_datetime(daily_data[date_index])
# generate the daily_flow and set the datetime as row indices
daily_flow= | pd.concat([flow_cfs, flow_cms, flow_mmday],axis=1) | pandas.concat |
"""
Authors: <NAME>, <NAME>, <NAME>, <NAME>
"""
import pandas as pd
import numpy as np
from scipy.linalg import eig
import matplotlib.pyplot as plt
import quantecon as qe
# == model parameters == #
a_0 = 100
a_1 = 0.5
ρ = 0.9
σ_d = 0.05
β = 0.95
c = 2
γ = 50.0
θ = 0.002
ac = (a_0 - c) / 2.0
# == Define LQ matrices == #
R = np.array([[0., ac, 0.],
[ac, -a_1, 0.5],
[0., 0.5, 0.]])
R = -R # For minimization
Q = γ / 2
A = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., ρ]])
B = np.array([[0.],
[1.],
[0.]])
C = np.array([[0.],
[0.],
[σ_d]])
# -------------------------------------------------------------------------- #
# Functions
# -------------------------------------------------------------------------- #
def evaluate_policy(θ, F):
"""
Given θ (scalar, dtype=float) and policy F (array_like), returns the
value associated with that policy under the worst case path for {w_t}, as
well as the entropy level.
"""
rlq = qe.robustlq.RBLQ(Q, R, A, B, C, β, θ)
K_F, P_F, d_F, O_F, o_F = rlq.evaluate_F(F)
x0 = np.array([[1.], [0.], [0.]])
value = - x0.T @ P_F @ x0 - d_F
entropy = x0.T @ O_F @ x0 + o_F
return list(map(float, (value, entropy)))
def value_and_entropy(emax, F, bw, grid_size=1000):
"""
Compute the value function and entropy levels for a θ path
increasing until it reaches the specified target entropy value.
Parameters
==========
emax: scalar
The target entropy value
F: array_like
The policy function to be evaluated
bw: str
A string specifying whether the implied shock path follows best
or worst assumptions. The only acceptable values are 'best' and
'worst'.
Returns
=======
df: pd.DataFrame
A pandas DataFrame containing the value function and entropy
values up to the emax parameter. The columns are 'value' and
'entropy'.
"""
if bw == 'worst':
θs = 1 / np.linspace(1e-8, 1000, grid_size)
else:
θs = -1 / np.linspace(1e-8, 1000, grid_size)
df = | pd.DataFrame(index=θs, columns=('value', 'entropy')) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import scipy.io as sio
import matconv
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
'''################### Set direcotories and open files #####################'''
bhalla_paths = matconv.set_paths.Paths('projects/biophys_glm_show', 'bhalla')
alon_paths = matconv.set_paths.Paths('projects/biophys_glm_show', 'alon')
bhalla_glm0 = sio.loadmat(os.path.join(bhalla_paths.survival, 'kA_1_CV.mat'))
bhalla_glm0['model_list'][0, 0]['channel_scalar']
bhalla_glm0['model_list'][0].shape
'''notes on how the matlab file is organized
matlab['model_list'][0, scale_index][data_type]
RIGHT HERE
'''
structure = matlab['model_list'][0]
n_scales = len(structure)
stim_time_index = pd.DataFrame({'parameter': 'kt', 'scale': 1.0}, index=range(1))
stim_time_index = pd.MultiIndex.from_frame(stim_time_index)
stim_time = pd.DataFrame(matlab['model_list'][0, 0]['kt'][0][0], columns=stim_time_index)
hist_time_index = pd.DataFrame({'parameter': 'ht', 'scale': 1.0}, index=range(1))
hist_time_index = pd.MultiIndex.from_frame(hist_time_index)
hist_time = pd.DataFrame(matlab['model_list'][0, 0]['ht'][0][0], columns=hist_time_index)
data_list = ['k', 'h', 'dc']
glm_df = pd.DataFrame()
for data in data_list:
data_df = pd.DataFrame()
labels_df = pd.DataFrame()
for i in range(n_scales):
parameter = matlab['model_list'][0, i][data][0][0]
parameter = pd.DataFrame(parameter)
scale = matlab['model_list'][0, i]['channel_scalar'][0][0][0][0]
label = pd.DataFrame({'parameter': data, 'scale': scale}, index=range(1))
labels_df = pd.concat([labels_df, label])
data_df = pd.concat([data_df, parameter], axis=1)
data_index = pd.MultiIndex.from_frame(labels_df)
data_df.columns = data_index
glm_df = pd.concat([glm_df, data_df], axis=1)
glm_df = | pd.concat([stim_time, hist_time, glm_df], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import hail as hl
'''Module with helper functions used in both projects (and for mt + tRNA).'''
def find_subset(df, column_name, factor, condition):
'''Returns df subsetted by factor in specified column (==, !=, <, >, <=, >=).'''
if condition == "=":
return df[df[column_name] == factor]
elif condition == "!=":
return df[df[column_name] != factor]
elif condition == "<":
return df[df[column_name] < factor]
elif condition == ">":
return df[df[column_name] > factor]
elif condition == "<=":
return df[df[column_name] <= factor]
elif condition == ">=":
return df[df[column_name] >= factor]
def get_geneIDs(file, gene_type, column_name, sheet=None):
'''Extracts genes names as series, writes them into csv, and returns it'''
#out_file = Path("../data/{}_gene_IDs.csv".format(gene_type))
if sheet is None:
df = pd.read_excel(file)
else:
df = pd.read_excel(file, sheet_name=sheet)
gene_IDs = df[column_name].dropna().reset_index(drop=True)
#gene_IDs.to_csv(out_file, index=False)
return gene_IDs
def get_genes_of_interest(dataset_df, gene_df):
'''Finds genes of interest, writes them into csv, and returns df'''
gene_file = Path(gene_file)
out_file = Path("../data/{}_gene_variants.csv".format(gene_type))
gene = get_geneIDs(gene_file, gene_type, column_name, sheet)
gene.rename("gene", inplace=True)
gene = df.merge(gene, how="inner", on="gene")
gene.to_csv(out_file, index=False)
return gene
def generate_table(df, group_list, index, columns):
'''Groups data together based on columns of interest TODO explain index and columns.'''
# ex. of how to use: print(generate_table(df, ["gene", "case_control"], "gene", "case_control"))
new_df = pd.DataFrame({"count": df.groupby(group_list).size()}).reset_index()
return new_df.pivot(index=index, columns=columns).droplevel(0, axis=1)
def get_case_control_per_variant(df, file_name=None):
'''Returns df with number of cases and controls per gene (and number of samples for mutation type).'''
# find case count for each variant
case_control = pd.DataFrame({"count": df.groupby(["gene", "consequence", "case_control"]).size()}).reset_index()
# merge consequence and case_control column and add to the df
case_control["consequence_case_control"] = case_control["case_control"].str.lower() + "_" + case_control["consequence"]
# remove unneeded labels, switch consequence_case_control to column header
# then drop multilevel index (ie count and consequence_case_control)
# rename columns as needed and reorder them
case_control = (case_control.drop(labels=["consequence", "case_control"], axis=1)
.pivot(index="gene", columns="consequence_case_control")
.droplevel(0, axis=1)
.rename({"case_lof": "case_PTVs", "control_lof": "control_PTVs"}, axis=1)
[["case_synonymous", "control_synonymous", "case_missense", "control_missense", "case_PTVs", "control_PTVs"]]
)
# get counts per mutations
#mutation_count = pd.DataFrame({"count": df.groupby(["gene", "consequence"]).size()}).reset_index()
mutation_count = (generate_table(df, ["gene", "consequence"], "gene", "consequence")
.rename({"missense": "sample_missense", "lof": "sample_PTVs", "synonymous": "sample_synonymous"}, axis=1)
[["sample_synonymous", "sample_missense", "sample_PTVs"]] )
# merge, fill NaN as 0, and convert everything to type int
complete_df = pd.merge(case_control, mutation_count, on="gene").fillna(0).astype(int)
# if given a gene_type, write output to file with gene_type as name
if gene_type is not None:
out_file = Path("../data/{}_case_control_variants_per_gene.csv".format(file_name))
complete_df.to_csv(out_file)
return complete_df
def get_chr_num(chrom):
'''Returns chrom number extracted from hg19 chrom string.'''
if "_" in chrom:
return chrom[3:chrom.index("_")]
return chrom[3:]
def fishers_test(df, variant):
'''Runs fisher's test on specified column and returns tuple with a list of pvalues, OR, and CI.'''
p_list = []
oddsratio_list = []
lowci_list = []
highci_list = []
case = "cases_" + variant
control = "control_" + variant
for i in range(df.shape[0]):
case_carrier = df[case].iloc[i].astype(np.int32)
control_carrier = df[control].iloc[i].astype(np.int32)
case_noncarrier = int(3864 - df[case].iloc[i].astype(np.int32))
control_noncarrier = int(7839 - df[control].iloc[i].astype(np.int32))
result = hl.eval(hl.fisher_exact_test(case_carrier, case_noncarrier, control_carrier, control_noncarrier))
p_list.append(hl.eval(result["p_value"]))
oddsratio_list.append(hl.eval(result["odds_ratio"]))
lowci_list.append(hl.eval(result["ci_95_lower"]))
highci_list.append(hl.eval(result["ci_95_upper"]))
return (p_list, oddsratio_list, lowci_list, highci_list)
def all_stats(df, variants=["synonymous", "missense", "PTVs"], out_file=None):
'''Runs fisher's on inputted df and returns it'''
df_copy = df
p_list = []
oddsratio_list = []
lowci_list = []
highci_list = []
for variant in variants:
col_p = "pval_" + variant
col_or = "OR_" + variant
col_lowci = "lowci_" + variant
col_highci = "highci_" + variant
p_variant, or_variant, low_ci, high_ci = fishers_test(df_copy, variant)
df_copy[col_p] = p_variant
df_copy[col_or] = or_variant
df_copy[col_lowci] = low_ci
df_copy[col_highci] = high_ci
df_copy.sort_values(by="gene", inplace=True)
if out_file is not None:
out_file = Path("../data/summaryData/{}_fishers.csv")
df_copy.to_csv(out_file, index=False)
return df_copy
def plot_qq(df, name, variant):
'''plots QQ graph of expected vs observed using plt scatter plot. doesn't include CI rn'''
# finds relevant column names
col_p = "pval_" + variant
col_low = "lowci_" + variant
col_high = "highci_" + variant
# removes all NaN
cleaned = df.dropna(subset=[col_p, col_low, col_high])
cleaned = cleaned.sort_values(col_p)
# -log to get observed values
p_list = cleaned[col_p].to_numpy()
obs = -1 * np.log10(p_list)
exp = -1 * np.log10(np.arange(1, p_list.shape[0]+1) / p_list.shape[0])
fig, ax = plt.subplots()
ax.scatter(exp, obs)
xpoints = ypoints = ax.get_xlim()
ax.plot(xpoints, ypoints, color='black', scalex=False, scaley=False)
plt.title("{} {} Q-Q Plot".format(name, variant))
plt.xlabel("Expected value")
plt.ylabel("Observed value")
plt.savefig("../data/figures/{}_{}_QQ.png".format(name, variant))
plt.show()
def find_significant_genes(df, variant, alpha=0.05, file_name=None):
'''find genes less than p value based on mutation variant'''
# multiple test correction, divide alpha by # of genes
adjusted_p = alpha/df.shape[0]
col = "pval_" + variant
df.sort_values(col, ignore_index=True, inplace=True)
significant_genes = find_subset(df, col, adjusted_p, "<=")
significant_genes = find_subset(significant_genes, "pval_synonymous", adjusted_p, ">")
if file_name is not None:
out_file = Path("../data/datasets/{}_{}_significant_genes.csv".format(file_name, variant))
significant_genes.to_csv(out_file, index=False)
return significant_genes
def find_sample_variants_for_genes(df, filename, name, variant):
all_genes = pd.read_csv(filename, usecols=["gene", "variant", "consequence", "case_control"])
df = df[["gene", "pval_{}".format(variant), "OR_{}".format(variant), "lowci_{}".format(variant), "highci_{}".format(variant)]]
signficant_gene_variants = | pd.merge(all_genes, df, how="inner", on="gene") | pandas.merge |
import util
import numpy as np
import pandas as pd
# model_1 = pd.read_csv('fold1_boostdt.csv')
# model_1 = pd.read_csv('small_boostdt.csv')
model_2 = pd.read_csv('~/Desktop/predictions_stiebels/full/predictions_xgboost_fold1.csv', names = ["pred"])
model_1 = pd.read_csv('~/Desktop/predictions_stiebels/full/predictions_nn_fold1.csv', names = ["pred"])
# model_2 = pd.read_csv("~/Desktop/predictions_stiebels/full/predictions_nn_fold1.csv", names = ["pred"])
model_1_label = 'Neural Net'
model_2_label = 'XGBoost'
if (len(model_1) + len(model_2)) == 2400 * 2:
df_all = pd.read_csv('small_boostdt.csv')
qid = df_all['qid']
true = df_all['label']
elif (len(model_1) + len(model_2)) == 4800 * 2:
df_all = | pd.read_csv('large_boostdt.csv') | pandas.read_csv |
"""
This module contains methods related to validation of csv data contained
in the CSVFile model.
"""
from collections import namedtuple
from marshmallow import fields, post_dump, Schema, validate
from pandas import Index, to_numeric
from viime.cache import region
SEVERITY_VALUES = ['error', 'warning']
CONTEXT_VALUES = ['table', 'column', 'row']
TYPE_VALUES = [
'primary-key-missing', 'header-missing', 'group-missing',
'invalid-primary-key', 'invalid-header', 'non-numeric-data', 'invalid-group',
'missing-data', 'low-variance'
]
_ValidationTuple = namedtuple(
'_ValidationTuple', 'type_ title, severity context row_index column_index data')
GROUP_MISSING_THRESHOLD = 0.25
LOW_VARIANCE_THRESHOLD = 1e-8
MAX_NAN_THRESHOLD = 0.95
class ValidationTuple(_ValidationTuple):
def __new__(cls, **kwargs):
kw = cls.defaults.copy()
kw.update(kwargs)
if kw.get('context') in ('table', 'column'):
kw.setdefault('row_index', None)
if kw.get('context') in ('table', 'row'):
kw.setdefault('column_index', None)
return _ValidationTuple.__new__(cls, **kw)
class PrimaryKeyMissing(ValidationTuple):
defaults = {
'title': 'No identifer column',
'type_': 'primary-key-missing',
'severity': 'error',
'context': 'table',
'data': None
}
class HeaderMissing(ValidationTuple):
defaults = {
'title': 'No header row',
'type_': 'header-missing',
'severity': 'error',
'context': 'table',
'data': None
}
class GroupMissing(ValidationTuple):
defaults = {
'title': 'No group column',
'type_': 'group-missing',
'severity': 'error',
'context': 'table',
'data': None
}
class InvalidPrimaryKey(ValidationTuple):
defaults = {
'title': 'Invalid identifier',
'type_': 'invalid-primary-key',
'severity': 'error',
'context': 'column'
}
class InvalidHeader(ValidationTuple):
defaults = {
'title': 'Invalid header',
'type_': 'invalid-header',
'severity': 'error',
'context': 'row'
}
class InvalidGroup(ValidationTuple):
defaults = {
'title': 'Invalid group',
'type_': 'invalid-group',
'severity': 'error',
'context': 'column'
}
class NonNumericRow(ValidationTuple):
defaults = {
'title': 'Non-numeric row',
'type_': 'non-numeric-row',
'severity': 'error',
'context': 'row'
}
class NonNumericColumn(ValidationTuple):
defaults = {
'title': 'Non-numeric column',
'type_': 'non-numeric-column',
'severity': 'error',
'context': 'column'
}
class NonNumericData(ValidationTuple):
defaults = {
'title': 'Non-numeric data',
'type_': 'non-numeric-data',
'severity': 'warning',
'context': 'table'
}
class MissingData(ValidationTuple):
defaults = {
'title': 'Missing data',
'type_': 'missing-data',
'severity': 'warning'
}
class LowVariance(ValidationTuple):
defaults = {
'title': 'Low data variance',
'type_': 'low-variance',
'severity': 'warning'
}
class ValidationSchema(Schema):
type_ = fields.Str(required=True, data_key='type')
title = fields.Str(required=True)
severity = fields.Str(required=True, validate=validate.OneOf(SEVERITY_VALUES))
context = fields.Str(required=True, validate=validate.OneOf(CONTEXT_VALUES))
row_index = fields.Int(required=False)
column_index = fields.Int(required=False)
group = fields.Str(required=False)
data = fields.Raw(required=False)
@post_dump
def remove_null_values(self, data, **kwargs):
return {k: v for k, v in data.items() if v is not None}
def get_validation_list(csv_file):
errors = get_fatal_index_errors(csv_file)
if not errors:
errors = get_warnings(csv_file)
return errors
def get_missing_index_errors(csv_file):
errors = []
if csv_file.key_column_index is None:
errors.append(PrimaryKeyMissing())
if csv_file.header_row_index is None:
errors.append(HeaderMissing())
if csv_file.group_column_index is None:
errors.append(GroupMissing())
return errors
@region.cache_on_arguments()
def get_fatal_index_errors(csv_file):
errors = get_missing_index_errors(csv_file)
if not errors:
errors = get_invalid_index_errors(csv_file)
if not errors:
errors = get_non_numeric_errors(csv_file)
return errors
def check_valid_index(series):
"""Check if pandas series can be a valid index."""
index = Index(series)
if index.hasnans:
return "Contains NaN's"
if not index.is_unique:
return 'Values are not unique'
def check_valid_groups(groups):
index = Index(groups.iloc[:, 0])
if index.hasnans or '' in index:
return 'Contains empty values'
def get_invalid_index_errors(csv_file):
from viime.models import TABLE_COLUMN_TYPES, TABLE_ROW_TYPES
errors = []
table = csv_file.filter_table_by_types(
TABLE_ROW_TYPES.DATA, TABLE_COLUMN_TYPES.INDEX).iloc[:, 0]
error_data = check_valid_index(table)
if error_data:
errors.append(InvalidPrimaryKey(column_index=csv_file.key_column_index, data=error_data))
table = csv_file.filter_table_by_types(
TABLE_ROW_TYPES.INDEX, TABLE_COLUMN_TYPES.DATA).iloc[0, :]
error_data = check_valid_index(table)
if error_data:
errors.append(InvalidHeader(row_index=csv_file.header_row_index, data=error_data))
error_data = check_valid_groups(csv_file.groups)
if error_data:
errors.append(InvalidGroup(column_index=csv_file.group_column_index, data=error_data))
return errors
def get_non_numeric_errors(csv_file):
errors = []
raw_table = csv_file.raw_measurement_table
for index in range(raw_table.shape[0]):
row = to_numeric(raw_table.iloc[index, :], errors='coerce')
nans = (row != row).sum()
if nans / raw_table.shape[1] > MAX_NAN_THRESHOLD:
row = csv_file.get_row_by_name(raw_table.index[index])
errors.append(
NonNumericRow(row_index=row.row_index,
data=f'Contains {nans} non-numeric values')
)
for index in range(raw_table.shape[1]):
column = | to_numeric(raw_table.iloc[:, index], errors='coerce') | pandas.to_numeric |
import os
import unittest
import pandas as pd
from context import technical as ti
# Change working directory
# This enable running tests from repository root
if os.getcwd() != os.path.abspath(os.path.dirname(__file__)):
os.chdir('tests/')
# Test results
class ResultsRSI(unittest.TestCase):
# Input data
test_data = pd.read_csv('test_data/correct_series.csv')
test_data_df = pd.read_csv('test_data/correct_ohlc.csv').drop(columns=['Close'])
# Expected results output
results_rsi = pd.Series([None, None, None, None, None, None,
None, None, None, None, None, None,
47.25280022804261, 46.88077998698816, 47.101267162264016, 47.00929322452039, 46.7165184370504, 47.15738539063636,
46.77617492442675, 46.89141568802567, 47.055962508174325, 46.75436640536967, 47.22730953343007, 48.15725812972951,
48.388571703427004, 48.80045436339292, 49.799615719433724, 50.749816167931876, 51.71693204342112, 52.62985533413443,
53.758601079486596, 53.89482377914421, 54.17495518260089, 54.736926654922605, 55.13423749963049, 55.442951738396005,
56.739537559474186, 57.26885485742762, 57.500127463438524, 57.14236479747808, 57.54149746743077, 57.905820742390425,
58.17383724213553, 59.059331336307984, 60.457448003690054, 61.11872163570835, 61.66694808843557, 62.074080067528016,
62.20694317850205, 63.45076515475267, 64.61890382304387, 66.27248731535836, 67.42124873044372, 67.9395808355508,
68.37488505254538, 68.84210071524612, 69.17189954309868, 70.12209998782939, 70.28368853923217, 70.03506574509129,
68.80640030241051, 67.91980536038358, 66.48142225875074, 64.7679765608068, 62.68677670273754, 61.158316243822775,
59.26206075171424, 57.9066342975084, 56.56970803414393, 55.33354873756501, 54.63870450844905])
# Input paramter
ma = 12
def test_result_rsi(self):
'''rsi function must return series equal to expected'''
results = ti.RSI(self.test_data, self.ma)
| pd.testing.assert_series_equal(self.results_rsi, results, check_names=False) | pandas.testing.assert_series_equal |
import numpy as np
import pandas as pd
import scipy as sc
import scipy.spatial as spatial
from anndata import AnnData
from .het import create_grids
def lr(
adata: AnnData,
use_lr: str = "cci_lr",
distance: float = None,
verbose: bool = True,
) -> AnnData:
"""Calculate the proportion of known ligand-receptor co-expression among the neighbouring spots or within spots
Parameters
----------
adata: AnnData The data object to scan
use_lr: str object to keep the result (default: adata.uns['cci_lr'])
distance: float Distance to determine the neighbours (default: closest), distance=0 means within spot
Returns
-------
adata: AnnData The data object including the results
"""
# automatically calculate distance if not given, won't overwrite distance=0 which is within-spot
if not distance and distance != 0:
# for arranged-spots
scalefactors = next(iter(adata.uns["spatial"].values()))["scalefactors"]
library_id = list(adata.uns["spatial"].keys())[0]
distance = (
scalefactors["spot_diameter_fullres"]
* scalefactors[
"tissue_" + adata.uns["spatial"][library_id]["use_quality"] + "_scalef"
]
* 2
)
df = adata.to_df()
# expand the LR pairs list by swapping ligand-receptor positions
lr_pairs = adata.uns["lr"].copy()
lr_pairs += [item.split("_")[1] + "_" + item.split("_")[0] for item in lr_pairs]
# get neighbour spots for each spot according to the specified distance
coor = adata.obs[["imagerow", "imagecol"]]
point_tree = spatial.cKDTree(coor)
neighbours = []
for spot in adata.obs_names:
if distance == 0:
neighbours.append([spot])
else:
n_index = point_tree.query_ball_point(
np.array(
[adata.obs["imagerow"].loc[spot], adata.obs["imagecol"].loc[spot]]
),
distance,
)
neighbours.append(
[item for item in df.index[n_index] if not (item == spot)]
)
# filter out those LR pairs that do not exist in the dataset
lr1 = [item.split("_")[0] for item in lr_pairs]
lr2 = [item.split("_")[1] for item in lr_pairs]
avail = [
i for i, x in enumerate(lr1) if lr1[i] in df.columns and lr2[i] in df.columns
]
spot_lr1 = df[[lr1[i] for i in avail]]
spot_lr2 = df[[lr2[i] for i in avail]]
if verbose:
print("Altogether " + str(len(avail)) + " valid L-R pairs")
# function to calculate mean of lr2 expression between neighbours or within spot (distance==0) for each spot
def mean_lr2(x):
# get lr2 expressions from the neighbour(s)
nbs = spot_lr2.loc[neighbours[df.index.tolist().index(x.name)], :]
if nbs.shape[0] > 0: # if neighbour exists
return (nbs > 0).sum() / nbs.shape[0]
else:
return 0
# mean of lr2 expressions from neighbours of each spot
nb_lr2 = spot_lr2.apply(mean_lr2, axis=1)
# check whether neighbours exist
try:
nb_lr2.shape[1]
except:
raise ValueError("No neighbours found within given distance.")
# keep value of nb_lr2 only when lr1 is also expressed on the spots
spot_lr = pd.DataFrame(
spot_lr1.values * (nb_lr2.values > 0) + (spot_lr1.values > 0) * nb_lr2.values,
index=df.index,
columns=[lr_pairs[i] for i in avail],
).sum(axis=1)
adata.obsm[use_lr] = spot_lr.values / 2
if verbose:
print(
"L-R interactions with neighbours are counted and stored into adata.obsm['"
+ use_lr
+ "']"
)
# return adata
def lr_grid(
adata: AnnData,
num_row: int = 10,
num_col: int = 10,
use_lr: str = "cci_lr_grid",
radius: int = 1,
verbose: bool = True,
) -> AnnData:
"""Calculate the proportion of known ligand-receptor co-expression among the neighbouring grids or within each grid
Parameters
----------
adata: AnnData The data object to scan
num_row: int Number of grids on height
num_col: int Number of grids on width
use_lr: str object to keep the result (default: adata.uns['cci_lr'])
radius: int Distance to determine the neighbour grids (default: 1=nearest), radius=0 means within grid
Returns
-------
adata: AnnData The data object with the cci_lr grid result updated
"""
# prepare data as pd.dataframe
df = adata.to_df()
if not isinstance(df, pd.DataFrame):
if sc.sparse.issparse(df):
df = pd.DataFrame(
df.toarray(), index=adata.obs_names, columns=adata.var_names
)
else:
df = | pd.DataFrame(df, index=adata.obs_names, columns=adata.var_names) | pandas.DataFrame |
# routes related to the boba run monitor
import os
import time
import pandas as pd
import numpy as np
from flask import jsonify, request
from .util import read_csv, read_json, write_json
from bobaserver import app, socketio, scheduler
from bobaserver.bobastats import sampling, sensitivity
import bobaserver.common as common
class BobaWatcher:
# static attributes
header_outcome = ['n_samples', 'mean', 'lower', 'upper']
def __init__(self, order, weights=None):
self.start_time = None
self.prev_time = 0 # for resume
# sampling order and weights
self.order = [uid - 1 for uid in order] # convert to 0-indexed
self.weights = weights
# results
self.last_merge_index = 0
self.outcomes = []
self.decision_scores = []
@staticmethod
def get_fn_outcome():
return os.path.join(app.bobarun.dir_log, 'outcomes.csv')
@staticmethod
def get_fn_save():
return os.path.join(app.bobarun.dir_log, 'execution_plan.json')
@staticmethod
def get_fn_sensitivity():
return os.path.join(app.bobarun.dir_log, 'sensitivity.csv')
@staticmethod
def get_header_sensitivity():
return ['n_samples', 'type'] + common.get_decision_list()
def _append_csv(self, fn, header, data):
# append to the csv if it exists, or create one
if os.path.exists(fn):
f = open(fn, 'a')
else:
f = open(fn, 'w')
f.write(','.join(header) + '\n')
for r in data:
f.write(','.join([str(i) for i in r]) + '\n')
f.close()
def _impute_null_CI(self, data, previous, col=0):
# impute NaN in CIs, assuming data is a 2D list [..., mean, lower, upper]
# where col is the column index of mean. Modify data in place.
for i, d in enumerate(data):
for j in [col + 1, col + 2]:
if np.isnan(d[j]):
d[j] = data[i - 1][j] if i > 0 else (previous[-1][j] \
if len(previous) else d[col])
def _NaN_to_string(self, arr):
# convert NaN to string 'nan'. It will happen in place in the input arr.
for i in range(len(arr)):
arr[i] = ['nan' if isinstance(r, float) and np.isnan(r) else r \
for r in arr[i]]
def _compute_dec_CI(self, df, col, indices, dec_list, i):
""" Compute bootstrap CI of decision sensitivity """
res = sampling.bootstrap_sensitivity(df, col, indices, dec_list)
out = [[i, c] + res[f'score_{c}'].tolist() for c in ['lower', 'upper']]
# convert NaN to string
self._NaN_to_string(out)
self.decision_scores += out
# write results to disk
self._append_csv(BobaWatcher.get_fn_sensitivity(),
BobaWatcher.get_header_sensitivity(), out)
# send to client
socketio.emit('update-sensitivity', {'data': self.decision_scores,
'header': BobaWatcher.get_header_sensitivity()})
def update_outcome(self, done):
step = min(5, max(1, int(app.bobarun.size / 50)))
if len(done) - self.last_merge_index <= step:
return
# merge result file
app.bobarun.run_after_execute()
df = common.read_results('point_estimate', float)
df = pd.merge(app.summary, df, on='uid', how='left')
col = common.get_field_name('point_estimate')
dec_list = common.get_decision_list()
# compute results since the last index
start = (int(self.last_merge_index / step) + 1) * step
self.last_merge_index = len(done) - 1
res = []
sen = []
indices = None
for i in range(start, len(done), step):
indices = self.order[:i+1]
# outcome mean
out = sampling.bootstrap_outcome(df, col, indices, self.weights)
res.append([i] + out)
# decision sensitivity, without CI
# FIXME: hard coded for AD test
ad = [sensitivity.ad_wrapper(df.iloc[indices], dec, col) \
for dec in dec_list]
sen.append([i, 'score'] + [s[0] for s in ad])
sen.append([i, 'p'] + [s[1] for s in ad])
# schedule a job to compute the decision CI, for the last index
if (not scheduler.get_job('compute_CI')) and (indices is not None):
scheduler.add_job(self._compute_dec_CI, id='compute_CI',
args=[df, col, indices, dec_list, i])
# impute null in CI and remove null in mean
self._impute_null_CI(res, self.outcomes, 1)
res = [r for r in res if not np.isnan(r[1])]
self.outcomes += res
# convert NaN in decision sensitivity to string 'nan'
self._NaN_to_string(sen)
self.decision_scores += sen
# write results to disk
self._append_csv(BobaWatcher.get_fn_outcome(), self.header_outcome, res)
self._append_csv(BobaWatcher.get_fn_sensitivity(),
BobaWatcher.get_header_sensitivity(), sen)
# send to client
socketio.emit('update-outcome', {'data': self.outcomes,
'header': self.header_outcome})
socketio.emit('update-sensitivity', {'data': self.decision_scores,
'header': BobaWatcher.get_header_sensitivity()})
def check_progress(self):
# remove self from scheduled jobs if boba run has finished
if not app.bobarun.is_running():
scheduler.remove_job('watcher')
print('check progress')
# estimate remaining time
logs = app.bobarun.exit_code
done = max(1, len(logs)) # avoid division by 0
elapsed = self.get_elapsed()
remain = app.bobarun.size - done
remain = int(elapsed * remain / done)
# schedule jobs to compute results
if not scheduler.get_job('update_outcome'):
scheduler.add_job(self.update_outcome, args=[logs], id='update_outcome')
res = {'status': 'success',
'logs': logs,
'time_left': remain,
'is_running': app.bobarun.is_running()}
socketio.emit('update', res)
def stop(self):
# stop timer
t = 0 if self.start_time is None else time.time() - self.start_time
self.prev_time += t
self.start_time = None
def start(self):
# start timer and add job
self.start_time = time.time()
scheduler.add_job(self.check_progress, 'interval', seconds=5,
id='watcher', replace_existing=True)
def get_elapsed(self):
t = 0 if self.start_time is None else time.time() - self.start_time
return t + self.prev_time
def save_to_file(self):
# save data to file, so it is possible to resume later
data = {'order': list(self.order), 'elapsed': self.get_elapsed()}
if self.weights is not None:
data['weights'] = list(self.weights)
write_json(data, self.get_fn_save())
def init_from_file(self):
# resume from the previous save
err, data = read_json(self.get_fn_save())
if not err:
self.order = data['order']
self.weights = np.asarray(data['weights']) if 'weights' in data else None
self.prev_time = data['elapsed']
# read outcome and sensitivity progress
fn = BobaWatcher.get_fn_outcome()
if os.path.exists(fn):
df = pd.read_csv(fn)
self.last_merge_index = df['n_samples'].max()
self.outcomes = df.values.tolist()
fn = BobaWatcher.get_fn_sensitivity()
if os.path.exists(fn):
# convert NaN to string 'nan'; client needs to convert it back to js NaN
df = pd.read_csv(fn).fillna('nan')
self.decision_scores = df.values.tolist()
def check_stopped():
# after client issued stop command, check if boba has indeed stopped
if not app.bobarun.is_running():
scheduler.remove_job('check_stopped')
socketio.emit('stopped')
def merge_error ():
""" Merge the error logs into errors.csv """
fn = os.path.join(app.bobarun.dir_log, 'errors.csv')
logs = []
merged = []
df = None
# exit code
if os.path.exists(app.bobarun.file_log):
status = pd.read_csv(app.bobarun.file_log, index_col='uid')
logs = status.index.tolist()
# previous merged error
if os.path.exists(fn):
df = | pd.read_csv(fn, na_filter=False) | pandas.read_csv |
import pandas as pd
import numpy as np
import argparse
import random
def create_context_to_id_map(df, df_sent):
context_to_id = {}
c_context_id = 0
context_ids = []
relevant_sentence_ids_arr = []
df = df.reset_index()
for index, row in df.iterrows():
# add the relevant sentences to the main df
relevant_sentence_ids = df_sent.iloc[index]['ranked_matching_sentence_ids']
relevant_sentence_ids_arr.append(relevant_sentence_ids)
# map the ids
if not row['context'] in context_to_id:
context_id = c_context_id
context_to_id[row['context']] = c_context_id
c_context_id += 1
else:
context_id = context_to_id[row['context']]
context_ids.append(context_id)
print('Num context texts: ', len(context_to_id.keys()))
return context_ids, relevant_sentence_ids_arr
def train_val_split(df, frac):
train_context_ids = []
val_context_ids = []
df_train = pd.DataFrame()
df_val = pd.DataFrame()
num_in_val = int(np.floor(len(df) * frac))
print('num in validation set: ', num_in_val)
num = 0
while num < num_in_val:
for index, row in df.iterrows():
context_id = row['context_id']
n = random.random()
if context_id in train_context_ids:
df_train = df_train.append(row, ignore_index=True)
elif context_id in val_context_ids:
df_val = df_val.append(row, ignore_index=True)
num += 1
elif n < frac:
df_val = df_val.append(row, ignore_index=True)
val_context_ids.append(context_id)
num += 1
else:
df_train = df_train.append(row, ignore_index=True)
train_context_ids.append(context_id)
if num == num_in_val:
break
return df_train, df_val
def main(args):
df = pd.read_pickle(args.data_path)
df_sent = | pd.read_pickle(args.sent_data_path) | pandas.read_pickle |
"""Functions for testing by means of pytest
"""
import sys
sys.path.append("/home/daniel/Schreibtisch/Projekte/avalanche-risk")
import pandas as pd
import numpy as np
from model.functions_model import preprocess_X_values, get_shifted_features
import pytest
@pytest.fixture
def df():
df = | pd.DataFrame([["a", "1"], ["b", "2"], ["c", "3"], ["d", "4"]], index = [1, 2, 3, 4], columns = ["A", "B"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error as MSE
from sklearn import preprocessing
import math
import re
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
acquired_dict = {
'tr':'tr',
'fa':'fa',
'dr':'dr',
"Traded": "tr",
"Free Agency": "fa",
"Amateur Draft": "dr",
"Amateur Free Agent": "fa",
"Waivers": "tr",
"Purchased":"tr",
"Rule 5 Draft": "dr",
"Expansion Draft": "dr",
"Conditional Deal": "tr",
"Amateur Draft--no sign": "dr",
"MinorLg Draft": "dr",
"Rune 5 returned": "tr"
}
def inflation_calc(row):
inf_dict = {
2017: 1.0,
2016: 1.021299290023666,
2015: 1.0341874211554445,
2014: 1.0354149770208165,
2013: 1.0522113523096537,
2012: 1.0676237183898534,
2011: 1.089717656786951,
2010: 1.1241149062626115,
2009: 1.1425534989302544,
2008: 1.1384885486964882,
2007: 1.1822013870802828,
2006: 1.215873015873016,
2005: 1.2550947260624679,
2004: 1.297617787188989,
2003: 1.3324635790389214,
2002: 1.3626862352679565,
2001: 1.3843112893206078,
2000: 1.4234610917537749
}
return int(row['salary']*inf_dict[row['year']])
def fixtm(t):
if t == '2TM' or t == '3TM' or t == '4TM':
return 'multiple'
elif t == 'TBD':
return 'TBR'
elif t == 'MON':
return "WSN"
elif t == 'ANA':
return 'LAA'
elif t == 'FLA':
return 'MIA'
else: return t
def fix_name(n):
n1 = (' ').join(n.split('\xa0'))
n2 = re.sub(r'[^\w\s]','',n1)
return n2
def train_and_test(cutoff = 1000000):
train_X,train_y,test_X,test_y = load_and_split_data(cutoff)
lr = LinearRegression()
lr.fit(train_X, train_y)
preds = lr.predict(test_X)
error = np.sqrt(MSE(test_y,preds))
return round(10**error,2)
def train_and_test(cutoff = 1000000):
train_X,train_y,test_X,test_y = load_and_split_data(cutoff)
lr = LinearRegression()
lr.fit(train_X, train_y)
preds = lr.predict(test_X)
error = np.sqrt(MSE(test_y,preds))
return round(10**error,2)
def cutoff_df(df,cutoff):
log_10_cut = math.log10(cutoff)
df = df[df['log10_adj'] >= log_10_cut]
return df
def test_cutoffs():
test_cutoffs = [(i+1)*100000 for i in range(20)]
error_list = []
for i in test_cutoffs:
error = train_and_test(i)
error_list.append(error)
return test_cutoffs,error_list
def test_elastic_cutoffs():
test_cutoffs = [(i+1)*100000 for i in range(20)]
error_list = []
for i in test_cutoffs:
error = elastic(i)
error_list.append(error)
return test_cutoffs,error_list
def load_data():
train = pd.read_pickle('batting_00_16.pkl')
test = pd.read_pickle('batting_17.pkl')
return pd.concat([train,test])
def ordered(row):
if row['name'] == row['np']:
return row['next_sal']
else:
return np.nan
def get_salary_for_next_year():
df = load_data()
df = engineer_features(df)
df = df.sort_values(by = ['name','year'])
df['next_sal'] = df['log10_adj'].shift(-1)
df['np'] = df['name'].shift(-1)
df['next_sal'] = df.apply(ordered,axis=1)
df = df.dropna()
df['log10_adj'] = df['next_sal']
df = df.drop(['next_sal','np'],axis=1)
train = df[df['year']<2016]
test = df[df['year']==2016]
return train,test
def engineer_features(df):
df = df[df.pa>200]
df = df.reset_index()
df['pa/g'] = df['pa']/df['g']
df['name'] = df['name'].apply(fix_name)
#adjust team names
df['tm'] = df['tm'].apply(fixtm)
#drop position summary (too many classes), log_sal (unscaled by inflation), rk (same as index)
df.drop(['pos\xa0summary','log_sal','rk','index'],axis=1,inplace=True)
#map values in acquired to 3 classes
df['acquired'] = df['acquired'].map(acquired_dict)
#adjust salary for inflation and take the log-10 for target column
df['adj_salary'] = df.apply(inflation_calc,axis=1)
df['log10_adj'] = np.log10(df['adj_salary'])
#get dummy variables for team, hand, and acquired columns
df = pd.get_dummies(df,columns = ['acquired','bat_hand','tm']).drop(['tm_multiple','bat_hand_rhb','acquired_tr'],axis=1)
#filter datasets for only batters with more than 200 plate appearances in season
return df
def new_features(df):
df['ba'] = df['h']/df['ab']
df['obp'] = (df['h']+df['bb']+df['hbp'])/(df['ab']+df['bb']+df['hbp']+df['sh'])
df['slg'] = (df['h']+df['2b']+2*df['3b']+3*df['hr'])/df['ab']
return df
def scaleColumns(df, cols_to_scale):
min_max_scaler = preprocessing.MinMaxScaler()
for col in cols_to_scale:
df[col] = pd.DataFrame(min_max_scaler.fit_transform(pd.DataFrame(df[col])),columns=[col])
return df
def rescale_numeric(df):
df = df.reset_index().drop(['index'],axis=1)
cols = ['g','pa','rbat','rbaser','rdp',
'rfield',
'rpos',
'raa',
'waa',
'rrep',
'rar',
'war',
'waawl%',
'162wl%',
'owar',
'dwar',
'orar',
'year',
'ab', 'r', 'h', '2b', '3b', 'hr', 'rbi', 'sb', 'cs', 'bb', 'so', 'ibb',
'hbp', 'sh', 'sf', 'gidp', 'years_in_mlb','pa/g','ba','obp','slg']
df = scaleColumns(df,cols)
return df
def combine_with_lehman_data(df):
players = pd.read_csv('baseballdatabank-master/core/People.csv')
#players = players.set_index('playerID')
drop_cols = ['deathYear','deathMonth','deathDay','deathCountry','deathState','deathCity',
'birthYear','birthMonth','birthDay','birthCountry','birthState','birthCity',
'nameGiven','weight','height','bats','throws','finalGame','retroID','bbrefID']
players = players.drop(drop_cols,axis=1)
players['fullname'] = players['nameFirst'] + ' ' + players['nameLast']
players = players.dropna()
players['fullname'] = players['fullname'].apply(lambda x: ''.join(re.sub(r'[^\w\s]','',x).split(' ')).lower())
batting = | pd.read_csv('baseballdatabank-master/core/Batting.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import csv
from collections import defaultdict
import numpy as np
import re
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.tokenize.regexp import RegexpTokenizer
import pandas as pd
def clean_tokens(tokens, to_replace='[^\w\-\+\&\.\'\"]+'):
lemma = WordNetLemmatizer()
tokens = [re.sub(to_replace, ' ', token) for token in tokens]
tokens = [lemma.lemmatize(token) for token in tokens]
return tokens
def tokenize(mystr):
tokenizer = RegexpTokenizer('[^ ]+')
return tokenizer.tokenize(mystr)
def make_causal_input(lod, map_, silent=True):
"""
:param lod: list of dictionaries
:param map_: mapping of tags and values of interest, i.e. [('cause', 'C'), ('effect', 'E')]. The silent tags are by default taggerd as 'O'
:return: dict of list of tuples for each sentence
"""
dd = defaultdict(list)
dd_ = []
rx = re.compile(r"(\b[-']\b)|[\W_]")
rxlist = [r'("\\)', r'(\\")']
rx = re.compile('|'.join(rxlist))
for i in range(len(lod)):
line_ = lod[i]['sentence']
line = re.sub(rx, '', line_)
caus = lod[i]['cause']
caus = re.sub(rx, '', caus)
effe = lod[i]['effect']
effe = re.sub(rx, '', effe)
d = defaultdict(list)
index = 0
for idx, w in enumerate(word_tokenize(line)):
index = line.find(w, index)
if not index == -1:
d[idx].append([w, index])
index += len(w)
d_ = defaultdict(list)
for idx in d:
d_[idx].append([tuple([d[idx][0][0], 'O']), d[idx][0][1]])
init_e = line.find(effe)
init_e = 0 if init_e == -1 else init_e
init_c = line.find(caus)
init_c = 0 if init_c == -1 else init_c
for c, cl in enumerate(word_tokenize(caus)):
init_c = line.find(cl, init_c)
stop = line.find(cl, init_c) + len(cl)
word = line[init_c:stop]
for idx in d_:
if int(init_c) == int(d_[idx][0][1]):
und_ = defaultdict(list)
und_[idx].append([tuple([word, 'C']), line.find(word, init_c)])
d_[idx] = und_[idx]
init_c += len(cl)
for e, el in enumerate(word_tokenize(effe)):
init_e = line.find(el, init_e)
stop = line.find(el, init_e) + len(el)
word = line[init_e:stop]
for idx in d_:
if int(init_e) == int(d_[idx][0][1]):
und_ = defaultdict(list)
und_[idx].append([tuple([word, 'E']), line.find(word, init_e)])
d_[idx] = und_[idx]
init_e += len(word)
dd[i].append(d_)
for dict_ in dd:
dd_.append([item[0][0] for sub in [[j for j in i.values()] for i in lflatten(dd[dict_])] for item in sub])
return dd_
def s2dict(lines, lot):
d = defaultdict(list)
for line_, tag_ in zip(lines, lot):
d[tag_] = line_
return d
def make_data(df):
lodict_ = []
for rows in df.itertuples():
list_ = [rows[2], rows[3], rows[4]]
map1 = ['sentence', 'cause', 'effect']
dict_ = s2dict(list_, map1)
lodict_.append(dict_)
map_ = [('cause', 'C'), ('effect', 'E')]
return zip(*[tuple(zip(*x)) for x in make_causal_input(lodict_, map_)])
def make_data2(df):
lodict_ = []
for rows in df.itertuples():
list_ = [rows[2], rows[3], rows[4]]
map1 = ['sentence', 'cause', 'effect']
dict_ = s2dict(list_, map1)
lodict_.append(dict_)
map_ = [('cause', 'C'), ('effect', 'E')]
import itertools
return list(itertools.chain(*make_causal_input(lodict_, map_)))
def create_data_files(input_file_path, validation=False):
df = pd.read_csv(input_file_path, delimiter='; ', engine='python', header=0)
# Make train and test sets keeping multiple cause / effects blocks together.
df['IdxSplit'] = df.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
df.set_index('IdxSplit', inplace=True)
np.random.seed(0)
testrows = np.random.choice(df.index.values, int(len(df) / 4))
test_sents = df.loc[testrows].drop_duplicates(subset='Index')
train_sents = df.drop(test_sents.index)
if validation is True:
validrows = np.random.choice(train_sents.index.values, int(len(train_sents) / 4))
valid_sents = train_sents.loc[validrows]
train_sents = df.drop(valid_sents.index)
pairs = make_data2(valid_sents)
pd.DataFrame(pairs).to_csv('valid_data.csv', sep=' ', index=None, header=False)
pairs = make_data2(train_sents)
pd.DataFrame(pairs).to_csv('train_data.csv', sep=' ', index=None, header=False)
pairs = make_data2(test_sents)
pd.DataFrame(pairs).to_csv('test_data.csv', sep=' ', index=None, header=False)
def create_data_files2(input_file_path, validation=False):
def write_list(lst, outfile):
with open(outfile, 'w') as f:
for item in lst:
f.write("%s\n" % item)
df = pd.read_csv(input_file_path, delimiter='; ', engine='python', header=0)
# Make train and test sets keeping multiple cause / effects blocks together.
df['IdxSplit'] = df.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
df.set_index('IdxSplit', inplace=True)
np.random.seed(0)
testrows = np.random.choice(df.index.values, int(len(df) / 4))
test_sents = df.loc[testrows].drop_duplicates(subset='Index')
train_sents = df.drop(test_sents.index)
if validation is True:
validrows = np.random.choice(train_sents.index.values, int(len(train_sents) / 4))
valid_sents = train_sents.loc[validrows]
train_sents = train_sents.drop(valid_sents.index)
sentences, tags = make_data(valid_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'testa.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'testa.tags.txt')
sentences, tags = make_data(train_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'train.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'train.tags.txt')
sentences, tags = make_data(test_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'testb.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'testb.tags.txt')
def create_data_files3(input_file_path, test_file_path, validation=False):
def write_list(lst, outfile):
with open(outfile, 'w') as f:
for item in lst:
f.write("%s\n" % item)
train_sents = pd.read_csv(input_file_path, delimiter='; ', engine='python', header=0)
train_sents['IdxSplit'] = train_sents.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
train_sents.set_index('IdxSplit', inplace=True)
test_sents = pd.read_csv(test_file_path, delimiter='; ', engine='python', header=0)
test_sents['IdxSplit'] = test_sents.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
test_sents.set_index('IdxSplit', inplace=True)
np.random.seed(0)
if validation is True:
validrows = np.random.choice(train_sents.index.values, int(len(train_sents) / 4))
valid_sents = train_sents.loc[validrows]
train_sents = train_sents.drop(valid_sents.index)
sentences, tags = make_data(valid_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'testa.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'testa.tags.txt')
sentences, tags = make_data(train_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'train.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'train.tags.txt')
sentences = [' '.join([ word for idx, word in enumerate(word_tokenize(row[2]))]) for row in test_sents.itertuples()]
write_list(sentences, 'testb.words.txt')
# Just temp tags
tags = [' '.join('O' for _ in word_tokenize(row[2])) for row in test_sents.itertuples()]
write_list(tags, 'testb.tags.txt')
def evaluate(test_file_path, modelpath='', args_idx = 1):
pred_file = '/mnt/DATA/python/tf_ner/models/chars_lstm_lstm_crf/results/score/testb.preds.txt'
with open(pred_file, 'r') as f:
predicted = []
sent_data = []
for line in f:
line = line.strip()
if len(line) > 0:
items = line.split(' ')
sent_data.append((items[0], items[1], items[2]))
else:
predicted.append(sent_data)
sent_data = []
if len(sent_data) > 0:
predicted.append(sent_data)
labels = {"C": 1, "E": 2, "O": 0}
predictions = np.array([labels[pred] for sent in predicted for _, _, pred in sent])
truths = np.array([labels[t] for sent in predicted for _, t, _ in sent])
print(np.sum(truths == predictions) / len(truths))
y_test = [[t for _, t, _ in sent] for sent in predicted]
y_pred = [[pred for __, _, pred in sent] for sent in predicted]
tokens_test = [[token for token, _, _ in sent] for sent in predicted]
ll = []
for i, (pred, token) in enumerate(zip(y_pred, tokens_test)):
l = defaultdict(list)
for j, (y, word) in enumerate(zip(pred, token)):
print(y, word)
l[j] = (word, y)
ll.append(l)
nl = []
for line, yt, yp in zip(ll, y_test, y_pred):
d_ = defaultdict(list)
d_["truth"] = yt
d_["pred"] = yp
d_["diverge"] = 0
for k, v in line.items():
d_[v[1]].append(''.join(v[0]))
if d_["truth"] != d_["pred"]:
d_["diverge"] = 1
d_['Cause'] = ' '.join(el for el in d_['C'])
cause_extend = len(d_['Cause']) + 1 # add 1 extra space at start
d_[' Cause'] = d_['Cause'].rjust(cause_extend)
d_['_'] = ' '.join(el for el in d_['_'])
d_['Effect'] = ' '.join(el for el in d_['E'])
effect_extend = len(d_['Effect']) + 1
d_[' Effect'] = d_['Effect'].rjust(effect_extend)
nl.append(d_)
fieldn = sorted(list(set(k for d in nl for k in d)))
with open(os.path.join(modelpath, ("controls_" + str(args_idx)) + ".csv"), "w+", encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldn, delimiter="~")
writer.writeheader()
for line in nl:
writer.writerow(line)
test = pd.read_csv(test_file_path, delimiter='; ', engine='python', header=0)
test['IdxSplit'] = test.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
test.set_index('IdxSplit', inplace=True)
tmp = pd.DataFrame.from_records(nl)[['Cause', 'Effect']].reset_index()
idx = pd.DataFrame(test['Index']).reset_index()
text = | pd.DataFrame(test['Text']) | pandas.DataFrame |
import datetime
import os
import tempfile
from collections import OrderedDict
import boto3
import pandas as pd
import pytest
import yaml
from moto import mock_s3
from numpy.testing import assert_almost_equal
from pandas.testing import assert_frame_equal
from unittest import mock
from triage.component.catwalk.storage import (
MatrixStore,
CSVMatrixStore,
FSStore,
S3Store,
ProjectStorage,
ModelStorageEngine,
)
from tests.utils import CallSpy
class SomeClass:
def __init__(self, val):
self.val = val
def test_S3Store():
with mock_s3():
client = boto3.client("s3")
client.create_bucket(Bucket="test_bucket", ACL="public-read-write")
store = S3Store(f"s3://test_bucket/a_path")
assert not store.exists()
store.write("val".encode("utf-8"))
assert store.exists()
newVal = store.load()
assert newVal.decode("utf-8") == "val"
store.delete()
assert not store.exists()
@mock_s3
def test_S3Store_large():
client = boto3.client('s3')
client.create_bucket(Bucket='test_bucket', ACL='public-read-write')
store = S3Store('s3://test_bucket/a_path')
assert not store.exists()
# NOTE: The issue under test (currently) arises when too large a "part"
# NOTE: is sent to S3 for upload -- greater than its 5 GiB limit on any
# NOTE: single upload request.
#
# NOTE: Though s3fs uploads file parts as soon as its buffer reaches
# NOTE: 5+ MiB, it does not ensure that its buffer -- and resulting
# NOTE: upload "parts" -- remain under this limit (as the result of a
# NOTE: single "write()").
#
# NOTE: Therefore, until s3fs adds handling to ensure it never attempts
# NOTE: to upload such large payloads, we'll handle this in S3Store,
# NOTE: by chunking out writes to s3fs.
#
# NOTE: This is all not only to explain the raison d'etre of this test,
# NOTE: but also as context for the following warning: The
# NOTE: payload we'll attempt to write, below, is far less than 5 GiB!!
# NOTE: (Attempting to provision a 5 GiB string in RAM just for this
# NOTE: test would be an ENORMOUS drag on test runs, and a conceivable
# NOTE: disruption, depending on the test environment's resources.)
#
# NOTE: As such, this test *may* fall out of sync with either the code
# NOTE: that it means to test or with the reality of the S3 API -- even
# NOTE: to the point of self-invalidation. (But, this should do the
# NOTE: trick; and, we can always increase the payload size here, or
# NOTE: otherwise tweak configuration, as necessary.)
one_mb = 2 ** 20
payload = b"0" * (10 * one_mb) # 10MiB text of all zeros
with CallSpy('botocore.client.BaseClient._make_api_call') as spy:
store.write(payload)
call_args = [call[0] for call in spy.calls]
call_methods = [args[1] for args in call_args]
assert call_methods == [
'CreateMultipartUpload',
'UploadPart',
'UploadPart',
'CompleteMultipartUpload',
]
upload_args = call_args[1]
upload_body = upload_args[2]['Body']
# NOTE: Why is this a BufferIO rather than the underlying buffer?!
# NOTE: (Would have expected the result of BufferIO.read() -- str.)
body_length = len(upload_body.getvalue())
assert body_length == 5 * one_mb
assert store.exists()
assert store.load() == payload
store.delete()
assert not store.exists()
def test_FSStore():
with tempfile.TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tmpfile")
store = FSStore(tmpfile)
assert not store.exists()
store.write("val".encode("utf-8"))
assert store.exists()
newVal = store.load()
assert newVal.decode("utf-8") == "val"
store.delete()
assert not store.exists()
def test_ModelStorageEngine_nocaching(project_storage):
mse = ModelStorageEngine(project_storage)
mse.write('testobject', 'myhash')
assert mse.exists('myhash')
assert mse.load('myhash') == 'testobject'
assert 'myhash' not in mse.cache
def test_ModelStorageEngine_caching(project_storage):
mse = ModelStorageEngine(project_storage)
with mse.cache_models():
mse.write('testobject', 'myhash')
with mock.patch.object(mse, "_get_store") as get_store_mock:
assert mse.load('myhash') == 'testobject'
assert not get_store_mock.called
assert 'myhash' in mse.cache
# when cache_models goes out of scope the cache should be empty
assert 'myhash' not in mse.cache
DATA_DICT = OrderedDict(
[
("entity_id", [1, 2]),
("as_of_date", [datetime.date(2017, 1, 1), datetime.date(2017, 1, 1)]),
("k_feature", [0.5, 0.4]),
("m_feature", [0.4, 0.5]),
("label", [0, 1]),
]
)
METADATA = {"label_name": "label"}
def matrix_stores():
df = pd.DataFrame.from_dict(DATA_DICT).set_index(MatrixStore.indices)
with tempfile.TemporaryDirectory() as tmpdir:
project_storage = ProjectStorage(tmpdir)
tmpcsv = os.path.join(tmpdir, "df.csv.gz")
tmpyaml = os.path.join(tmpdir, "df.yaml")
with open(tmpyaml, "w") as outfile:
yaml.dump(METADATA, outfile, default_flow_style=False)
df.to_csv(tmpcsv, compression="gzip")
csv = CSVMatrixStore(project_storage, [], "df")
# first test with caching
with csv.cache():
yield csv
# with the caching out of scope they will be nuked
# and this last version will not have any cache
yield csv
def test_MatrixStore_empty():
for matrix_store in matrix_stores():
assert not matrix_store.empty
def test_MatrixStore_metadata():
for matrix_store in matrix_stores():
assert matrix_store.metadata == METADATA
def test_MatrixStore_columns():
for matrix_store in matrix_stores():
assert matrix_store.columns() == ["k_feature", "m_feature"]
def test_MatrixStore_resort_columns():
for matrix_store in matrix_stores():
result = matrix_store.matrix_with_sorted_columns(
["m_feature", "k_feature"]
).values.tolist()
expected = [[0.4, 0.5], [0.5, 0.4]]
assert_almost_equal(expected, result)
def test_MatrixStore_already_sorted_columns():
for matrix_store in matrix_stores():
result = matrix_store.matrix_with_sorted_columns(
["k_feature", "m_feature"]
).values.tolist()
expected = [[0.5, 0.4], [0.4, 0.5]]
assert_almost_equal(expected, result)
def test_MatrixStore_sorted_columns_subset():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(["m_feature"]).values.tolist()
def test_MatrixStore_sorted_columns_superset():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(
["k_feature", "l_feature", "m_feature"]
).values.tolist()
def test_MatrixStore_sorted_columns_mismatch():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(
["k_feature", "l_feature"]
).values.tolist()
def test_MatrixStore_labels_idempotency():
for matrix_store in matrix_stores():
assert matrix_store.labels.tolist() == [0, 1]
assert matrix_store.labels.tolist() == [0, 1]
def test_MatrixStore_save():
data = {
"entity_id": [1, 2],
"as_of_date": [pd.Timestamp(2017, 1, 1), pd.Timestamp(2017, 1, 1)],
"feature_one": [0.5, 0.6],
"feature_two": [0.5, 0.6],
"label": [1, 0]
}
df = pd.DataFrame.from_dict(data)
labels = df.pop("label")
for matrix_store in matrix_stores():
matrix_store.metadata = METADATA
matrix_store.matrix_label_tuple = df, labels
matrix_store.save()
assert_frame_equal(
matrix_store.design_matrix,
df
)
def test_MatrixStore_caching():
for matrix_store in matrix_stores():
with matrix_store.cache():
matrix = matrix_store.design_matrix
with mock.patch.object(matrix_store, "_load") as load_mock:
assert_frame_equal(matrix_store.design_matrix, matrix)
assert not load_mock.called
def test_as_of_dates(project_storage):
data = {
"entity_id": [1, 2, 1, 2],
"feature_one": [0.5, 0.6, 0.5, 0.6],
"feature_two": [0.5, 0.6, 0.5, 0.6],
"as_of_date": [
pd.Timestamp(2016, 1, 1),
pd.Timestamp(2016, 1, 1),
pd.Timestamp(2017, 1, 1),
pd.Timestamp(2017, 1, 1),
],
"label": [1, 0, 1, 0]
}
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
'''
(c) 2014 <NAME> and <NAME>
This module contains functions for parsing various ldsc-defined file formats.
'''
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import logging
def series_eq(x, y):
'''Compare series, return False if lengths not equal.'''
return len(x) == len(y) and (x == y).all()
def read_csv(fh, **kwargs):
if fh.endswith('.hdf'):
df = pd.read_hdf(fh)
if 'usecols' in kwargs.keys():
df = df[kwargs['usecols']]
elif fh.endswith('.parquet'):
df = pd.read_parquet(fh)
if 'usecols' in kwargs.keys():
df = df[kwargs['usecols']]
else:
df = pd.read_csv(fh, delim_whitespace=True, na_values='.', **kwargs)
return df
def set_snpid_index(df):
df['A1_first'] = (df['A1'] < df['A2']) | (df['A1'].str.len()>1) | (df['A2'].str.len()>1)
df['A1s'] = df['A2'].copy()
df.loc[df['A1_first'], 'A1s'] = df.loc[df['A1_first'], 'A1'].copy()
df['A2s'] = df['A1'].copy()
df.loc[df['A1_first'], 'A2s'] = df.loc[df['A1_first'], 'A2'].copy()
df.index = df['CHR'].astype(str) + '.' + df['BP'].astype(str) + '.' + df['A1s'] + '.' + df['A2s']
df.index.name = 'snpid'
df.drop(columns=['A1_first', 'A1s', 'A2s'], inplace=True)
return df
def sub_chr(s, chr):
'''Substitute chr for @, else append chr to the end of str.'''
if '@' not in s:
s += '@'
return s.replace('@', str(chr))
def which_compression(fh):
'''Given a file prefix, figure out what sort of compression to use.'''
#import ipdb; ipdb.set_trace()
if os.access(fh + '.parquet', 4):
suffix = '.parquet'
compression = 'parquet'
elif os.access(fh + '.hdf', 4):
suffix = '.hdf'
compression = 'hdf'
elif os.access(fh + '.bz2', 4):
suffix = '.bz2'
compression = 'bz2'
elif os.access(fh + '.gz', 4):
suffix = '.gz'
compression = 'gzip'
elif os.access(fh, 4):
suffix = ''
compression = None
else:
raise IOError('Could not open {F}[./gz/bz2/.hdf/.parquet]'.format(F=fh))
return suffix, compression
def get_compression(fh):
'''Which sort of compression should we use with read_csv?'''
if fh.endswith('gz'):
compression = 'gzip'
elif fh.endswith('bz2'):
compression = 'bz2'
else:
compression = None
return compression
def read_cts(fh, match_snps):
'''Reads files for --cts-bin.'''
compression = get_compression(fh)
cts = read_csv(fh, compression=compression, header=None, names=['SNP', 'ANNOT'])
if not series_eq(cts.SNP, match_snps):
raise ValueError('--cts-bin and the .bim file must have identical SNP columns.')
return cts.ANNOT.values
def sumstats(fh, alleles=True, dropna=True):
'''Parses .sumstats files. See docs/file_formats_sumstats.txt.'''
dtype_dict = {'SNP': str, 'Z': float, 'N': float, 'A1': str, 'A2': str}
compression = get_compression(fh)
usecols = ['SNP', 'CHR', 'BP', 'Z', 'N']
#if alleles:
usecols += ['A1', 'A2']
try:
x = read_csv(fh, usecols=usecols, dtype=dtype_dict, compression=compression)
except (AttributeError, ValueError) as e:
raise ValueError('Improperly formatted sumstats file: ' + str(e.args))
if dropna:
x = x.dropna(how='any')
x = set_snpid_index(x)
x.drop(columns=['CHR', 'BP'], inplace=True)
return x
def ldscore_fromlist(flist, num=None):
'''Sideways concatenation of a list of LD Score files.'''
ldscore_array = []
for fh_i, fh in enumerate(flist):
y = ldscore(fh, num)
if len(ldscore_array)>0:
if (not series_eq(y.SNP, ldscore_array[0].SNP)) or (not series_eq(y.index, ldscore_array[0].index)):
raise ValueError('LD Scores for concatenation must have identical SNP columns (and A1/A2 columns if such columns exist).')
else: # keep SNP and CHR column from only the first file
y = y.drop(columns=['SNP', 'CHR'], axis=1)
new_col_dict = {c: c + '_' + str(fh_i) for c in y.columns if c not in ['SNP', 'CHR']}
y.rename(columns=new_col_dict, inplace=True)
ldscore_array.append(y)
if len(ldscore_array)==1:
ldscores_all = ldscore_array[0]
else:
#ldscores_all = pd.concat(ldscore_array, axis=1)
ldscores_all = | pd.concat(ldscore_array, axis=1) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from nltk import word_tokenize
stopwords = stopwords.words('russian')
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
x["weekinmonday"] = x["monthday"] // 7
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
test_df['deal_probability']=10.0
############################
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
train_test = pd.concat((train_df, test_df), axis = 'rows')
## After cleaning => then find intersection
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
train_test["params_clean"]= list(train_test[["params"]].apply(lambda x: preprocess_data(x["params"]), axis=1))
train_test['count_common_words_title_desc'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
train_test['count_common_words_title_params'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['params_clean']).lower().split()))), axis=1)
train_test['count_common_words_params_desc'] = train_test.apply(lambda x: len(set(str(x['params_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
print("Cleaned texts..")
###################
# Count Nouns
import pymorphy2
morph = pymorphy2.MorphAnalyzer(result_type=None)
from fastcache import clru_cache as lru_cache
@lru_cache(maxsize=1000000)
def lemmatize_pos(word):
_, tag, norm_form, _, _ = morph.parse(word)[0]
return norm_form, tag.POS
def getPOS(x, pos1 = 'NOUN'):
lemmatized = []
x = clean_text(x)
#x = re.sub(u'[.]', ' ', x)
for s in x.split():
s, pos = lemmatize_pos(s)
if pos != None:
if pos1 in pos:
lemmatized.append(s)
return ' '.join(lemmatized)
train_test['get_nouns_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'NOUN'), axis=1))
train_test['get_nouns_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'NOUN'), axis=1))
train_test['get_adj_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'ADJ'), axis=1))
train_test['get_adj_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'ADJ'), axis=1))
train_test['get_verb_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'VERB'), axis=1))
train_test['get_verb_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'VERB'), axis=1))
# Count digits
def count_digit(x):
x = clean_text(x)
return len(re.findall(r'\b\d+\b', x))
train_test['count_of_digit_in_title'] = list(train_test.apply(lambda x: count_digit(x['title']), axis=1))
train_test['count_of_digit_in_desc'] = list(train_test.apply(lambda x: count_digit(x['description']), axis=1))
train_test['count_of_digit_in_params'] = list(train_test.apply(lambda x: count_digit(x['params']), axis=1))
## get unicode features
count_unicode = lambda x: len([c for c in x if ord(c) > 1105])
count_distunicode = lambda x: len({c for c in x if ord(c) > 1105})
train_test['count_of_unicode_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_unicode_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
train_test['count_of_distuni_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_distuni_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
###
count_caps = lambda x: len([c for c in x if c.isupper()])
train_test['count_caps_in_title'] = list(train_test.apply(lambda x: count_caps(x['title']), axis=1))
train_test['count_caps_in_desc'] = list(train_test.apply(lambda x: count_caps(x['description']), axis=1))
import string
count_punct = lambda x: len([c for c in x if c in string.punctuation])
train_test['count_punct_in_title'] = list(train_test.apply(lambda x: count_punct(x['title']), axis=1))
train_test['count_punct_in_desc'] = list(train_test.apply(lambda x: count_punct(x['description']), axis=1))
print("Computed POS Features and others..")
train_test['count_common_nouns'] = train_test.apply(lambda x: len(set(str(x['get_nouns_title']).lower().split()).intersection(set(str(x['get_nouns_desc']).lower().split()))), axis=1)
train_test['count_common_adj'] = train_test.apply(lambda x: len(set(str(x['get_adj_title']).lower().split()).intersection(set(str(x['get_adj_desc']).lower().split()))), axis=1)
train_test['ratio_of_unicode_in_title'] = train_test['count_of_unicode_in_title'] / train_test['len_title']
train_test['ratio_of_unicode_in_desc'] = train_test['count_of_unicode_in_desc'] / train_test['len_description']
train_test['ratio_of_punct_in_title'] = train_test['count_punct_in_title'] / train_test['len_title']
train_test['ratio_of_punct_in_desc'] = train_test['count_punct_in_desc'] / train_test['len_description']
train_test['ratio_of_cap_in_title'] = train_test['count_caps_in_title'] / train_test['len_title']
train_test['ratio_of_cap_in_desc'] = train_test['count_caps_in_desc'] / train_test['len_description']
train_test['count_nouns_in_title'] = train_test["get_nouns_title"].apply(lambda x: len(x.split()))
train_test['count_nouns_in_desc'] = train_test['get_nouns_desc'].apply(lambda x: len(x.split()))
train_test['count_adj_in_title'] = train_test["get_adj_title"].apply(lambda x: len(x.split()))
train_test['count_adj_in_desc'] = train_test['get_adj_desc'].apply(lambda x: len(x.split()))
train_test['count_verb_title'] = train_test['get_verb_title'].apply(lambda x: len(x.split()))
train_test['count_verb_desc'] = train_test['get_verb_desc'].apply(lambda x: len(x.split()))
train_test['ratio_nouns_in_title'] = train_test["count_nouns_in_title"] / train_test["title_nwords"]
train_test['ratio_nouns_in_desc'] = train_test["count_nouns_in_desc"] / train_test["desc_nwords"]
train_test['ratio_adj_in_title'] = train_test["count_adj_in_title"] / train_test["title_nwords"]
train_test['ratio_adj_in_desc'] = train_test["count_adj_in_desc"] / train_test["desc_nwords"]
train_test['ratio_vrb_in_title'] = train_test["count_verb_title"] / train_test["title_nwords"]
train_test['ratio_vrb_in_desc'] = train_test["count_verb_desc"] / train_test["desc_nwords"]
train_test["title"]= list(train_test[["title"]].apply(lambda x: clean_text(x["title"]), axis=1))
train_test["description"]= list(train_test[["description"]].apply(lambda x: clean_text(x["description"]), axis=1))
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
#######################
### Save
#######################
train_df = train_test.loc[train_test.deal_probability != 10].reset_index(drop = True)
test_df = train_test.loc[train_test.deal_probability == 10].reset_index(drop = True)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
train_df.to_feather('../train_basic_features.pkl')
test_df.to_feather('../test__basic_features.pkl')
#######################
### Label Enc
#######################
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
cat_vars = ["user_id", "region", "city", "parent_category_name", "category_name", "user_type", "param_1", "param_2", "param_3"]
for col in cat_vars:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
train_df.to_feather('../train_basic_features_lblencCats.pkl')
test_df.to_feather('../test__basic_features_lblencCats.pkl')
#######################
### One hots
#######################
train_df=pd.read_feather('../train_basic_features_lblencCats.pkl')
test_df=pd.read_feather('../test__basic_features_lblencCats.pkl')
from sklearn.externals import joblib
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.user_id.values.tolist() + test_df.user_id.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_id_onehot.pkl")
X = le.fit_transform(np.array(train_df.region.values.tolist() + test_df.region.values.tolist()).reshape(-1,1))
joblib.dump(X, "../region_onehot.pkl")
X = le.fit_transform(np.array(train_df.city.values.tolist() + test_df.city.values.tolist()).reshape(-1,1))
joblib.dump(X, "../city_onehot.pkl")
X = le.fit_transform(np.array(train_df.parent_category_name.values.tolist() + test_df.parent_category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../parent_category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.category_name.values.tolist() + test_df.category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.user_type.values.tolist() + test_df.user_type.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_type_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_1.values.tolist() + test_df.param_1.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_1_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_2.values.tolist() + test_df.param_2.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_2_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_3.values.tolist() + test_df.param_3.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_3_onehot.pkl")
train_df.drop(cat_vars, inplace = True, axis = 'columns')
test_df.drop(cat_vars, inplace = True, axis = 'columns')
train_df.to_feather('../train_basic_features_woCats.pkl')
test_df.to_feather('../test__basic_features_woCats.pkl')
#######################
### Tfidf
#######################
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf.pkl")
### TFIDF Vectorizer ###
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, #,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
### TFIDF Vectorizer ###
train_df['get_nouns_title'] = train_df['get_nouns_title'].fillna(' ')
test_df['get_nouns_title'] = test_df['get_nouns_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_title'].values.tolist() + test_df['get_nouns_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_title_tfidf.pkl")
del full_tfidf
print("TDIDF Title Noun..")
### TFIDF Vectorizer ###
train_df['get_nouns_desc'] = train_df['get_nouns_desc'].fillna(' ')
test_df['get_nouns_desc'] = test_df['get_nouns_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_desc'].values.tolist() + test_df['get_nouns_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Noun..")
### TFIDF Vectorizer ###
train_df['get_adj_title'] = train_df['get_adj_title'].fillna(' ')
test_df['get_adj_title'] = test_df['get_adj_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_title'].values.tolist() + test_df['get_adj_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Adj..")
### TFIDF Vectorizer ###
train_df['get_adj_desc'] = train_df['get_adj_desc'].fillna(' ')
test_df['get_adj_desc'] = test_df['get_adj_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_desc'].values.tolist() + test_df['get_adj_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Adj..")
### TFIDF Vectorizer ###
train_df['get_verb_title'] = train_df['get_verb_title'].fillna(' ')
test_df['get_verb_title'] = test_df['get_verb_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_title'].values.tolist() + test_df['get_verb_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Verb..")
### TFIDF Vectorizer ###
train_df['get_verb_desc'] = train_df['get_verb_desc'].fillna(' ')
test_df['get_verb_desc'] = test_df['get_verb_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_desc'].values.tolist() + test_df['get_verb_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Verb..")
###############################
# Sentence to seq
###############################
print('Generate Word Sequences')
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
df = pd.concat((train_df, test_df), axis = 'rows')
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['title'].tolist())
sequences = tokenizer.texts_to_sequences(df['title'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../titleSequences.pkl")
MAX_NUM_OF_WORDS = 10000
TIT_MAX_SEQUENCE_LENGTH = 20
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['params'].tolist())
sequences = tokenizer.texts_to_sequences(df['params'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../paramSequences.pkl")
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['description'].tolist())
sequences = tokenizer.texts_to_sequences(df['description'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../descSequences.pkl")
#######OHC WeekDay
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.activation_weekday.values.tolist() + test_df.activation_weekday.values.tolist()).reshape(-1,1))
################################################
# Cat encoding
################################################
train_df=pd.read_feather('../train_basic_features.pkl')
test_df=pd.read_feather('../test__basic_features.pkl')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst) + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind), 'y_median': ('y_median_' + ind),}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=colLst, how='left').fillna(np.mean(y))
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
print(train_df.columns)
print(test_df.columns)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
return train_df, test_df
catCols = ['user_id', 'region', 'city', 'parent_category_name',
'category_name', 'user_type']
train_df, test_df = catEncode(train_df[catCols].copy(), test_df[catCols].copy(), train_df.deal_probability.values, nbag = 10, nfold = 10, minCount = 0)
train_df.to_feather('../train_cat_targetenc.pkl')
test_df.to_feather('../test_cat_targetenc.pkl')
################################################################
# Tfidf - part 2
################################################################
import os; os.environ['OMP_NUM_THREADS'] = '1'
from sklearn.decomposition import TruncatedSVD
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
import time
from typing import List, Dict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer as Tfidf
from sklearn.model_selection import KFold
from sklearn.externals import joblib
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import gc
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
from sklearn import model_selection
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
stopwords = stopwords.words('russian')
train_per=pd.read_csv('../input/train_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test['params'] = train_test['param_1'].fillna('') + ' ' + train_test['param_2'].fillna('') + ' ' + train_test['param_3'].fillna('')
import re
train_test.drop(['param_1', 'param_2', 'param_3'], axis = 'columns', inplace=True)
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
import re
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf2.pkl")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 10000,max_df=.4,#min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf3.pkl")
del(train_test); gc.collect()
train_per=pd.read_csv('../input/train_active.csv', usecols = ['title'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['title'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test.fillna('NA', inplace=True)
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf2.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf3.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
del(train_test); gc.collect()
###Too slow###
'''
train_per=pd.read_csv('../input/train_active.csv', usecols = ['description'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['description'])#,'title','description'])
train_per.fillna(' ', inplace=True)
test_per.fillna(' ', inplace=True)
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, stop_words = stopwords#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf2.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf3.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
'''
##########################################
# 13. Chargram -- too slow
##########################################
from collections import Counter
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
def char_ngrams(s):
s = s.lower()
s = s.replace(u' ', '')
result = Counter()
len_s = len(s)
for n in [3, 4, 5]:
result.update(s[i:i+n] for i in range(len_s - n + 1))
return ' '.join(list(result))
data = pd.concat((train_df, test_df), axis = 'rows')
data['param_chargram'] = list(data[['params']].apply(lambda x: char_ngrams(x['params']), axis=1))
data['title_chargram'] = list(data[['title']].apply(lambda x: char_ngrams(x['title']), axis=1))
#data['desc_chargram'] = list(data[['description']].apply(lambda x: char_ngrams(x['description']), axis=1))
#data['count_common_chargram'] = data.apply(lambda x: len(set(str(x['title_chargram']).lower().split()).intersection(set(str(x['desc_chargram']).lower().split()))), axis=1)
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
#####Chargram -TFIDF
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['title_chargram'].values.tolist() + test_df['title_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../title_chargram_tfidf.pkl')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['param_chargram'].values.tolist() + test_df['param_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['param_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['param_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../param_chargram_tfidf.pkl')
#######Chargram of Cat and Parent cat
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
train_df = pd.read_feather('../train_basic_features.pkl')
test_df = pd.read_feather('../test__basic_features.pkl')
data = pd.concat([train_df, test_df], axis= 'rows')
data['categories'] = data["parent_category_name"].fillna(' ') + data["category_name"].fillna(' ')
data['cat_chargram'] = list(data[['categories']].apply(lambda x: char_ngrams(x['categories']), axis=1))
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 1000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['cat_chargram'].values.tolist() + test_df['cat_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['cat_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['cat_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../cat_chargram_tfidf.pkl')
##############################
## New Kaggle Ftr
##############################
import pandas as pd
import gc
used_cols = ['item_id', 'user_id']
train = pd.read_csv('../input/train.csv', usecols=used_cols)
train_active = pd.read_csv('../input/train_active.csv', usecols=used_cols)
test = pd.read_csv('../input/test.csv', usecols=used_cols)
test_active = pd.read_csv('../input/test_active.csv', usecols=used_cols)
train_periods = pd.read_csv('../input/periods_train.csv', parse_dates=['date_from', 'date_to'])
test_periods = pd.read_csv('../input/periods_test.csv', parse_dates=['date_from', 'date_to'])
train.head()
all_samples = pd.concat([
train,
train_active,
test,
test_active
]).reset_index(drop=True)
all_samples.drop_duplicates(['item_id'], inplace=True)
del train_active
del test_active
gc.collect()
all_periods = pd.concat([
train_periods,
test_periods
])
del train_periods
del test_periods
gc.collect()
all_periods.head()
all_periods['days_up'] = (all_periods['date_to'] - all_periods['date_from']).dt.days
gp = all_periods.groupby(['item_id'])[['days_up']]
gp_df = pd.DataFrame()
gp_df['days_up_sum'] = gp.sum()['days_up']
gp_df['times_put_up'] = gp.count()['days_up']
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={'index': 'item_id'})
gp_df.head()
all_periods.drop_duplicates(['item_id'], inplace=True)
all_periods = all_periods.merge(gp_df, on='item_id', how='left')
all_periods.head()
del gp
del gp_df
gc.collect()
all_periods = all_periods.merge(all_samples, on='item_id', how='left')
all_periods.head()
gp = all_periods.groupby(['user_id'])[['days_up_sum', 'times_put_up']].mean().reset_index() \
.rename(index=str, columns={
'days_up_sum': 'avg_days_up_user',
'times_put_up': 'avg_times_up_user'
})
gp.head()
n_user_items = all_samples.groupby(['user_id'])[['item_id']].count().reset_index() \
.rename(index=str, columns={
'item_id': 'n_user_items'
})
gp = gp.merge(n_user_items, on='user_id', how='left')
gp.head()
train = pd.read_csv('../input/train.csv')
test = | pd.read_csv('../input/test.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-.
"""
doi of according publication [preprint]:
https://doi.org/10.5194/hess-2021-403
Contact: <EMAIL>
ORCID: 0000-0002-0585-9549
https://github.com/AndreasWunsch/CNN_KarstSpringModeling/
MIT License
large parts opf the code from <NAME> (https://github.com/andersonsam/cnn_lstm_era)
see also: Anderson & Radic (2021): Evaluation and interpretation of convolutional-recurrent networks for regional hydrological modelling
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
import pickle
from random import seed
import os
from bayes_opt import BayesianOptimization
from bayes_opt.util import load_logs
from scipy import interpolate
#%% functions
def bayesOpt_function():
# just a placeholder needed to load json logs
return
#%%
def make_heat(model, x_test, y_test, style_dict, timesteps, iters_total, iters_one_pass, verbose, tol, p_channel, batch_size):
"""
Function by <NAME> (2021), slightly modified by <NAME> (2021).
model:
keras model
x_test:
tf tensor; test set of ERA data, input to model (shape = Ntest x 365 x height x width x channels)
y_test:
tf tensor; test set of streamflow data, target output of model (shape = Ntest x 1)
style_dict:
dictionary: {'style' : 'RISE' or 'gauss',
'params' : [h,w,p_1] or sigma}
where [h,w,p_1] are the height/width/probability of perturbation of low-res mask (for RISE algorithm); sigma is the gaussian RMS width
timesteps:
rangef timesteps in test set to perturb (e.g. timesteps = range(0,365) will perturb the first 365 timesteps in the test set)
iters_total:
number of total iterations of perturbation to do for each day in timesteps
iters_one_pass:
number of iterations to do at one time (typically less than iters_total for memory limits)
verbose:
0: print nothing
1: print every 50th day
tol:
relative error threshold (when to stop pertubing model)
batch_size:
batchsize of the model training process
p_channel:
number that defines the channel that will be perturbed (others will be used as is)
"""
#initialize heatmap as 3D numpy array: lat x lon x 1
heat_mean = np.zeros((np.size(x_test[0,0,:,:,0]), 1))
H = np.shape(x_test)[2] #height of input video, in pixels (latitude)
W = np.shape(x_test)[3] #width of input video, in pixels (longitude)
heat_prev = np.zeros((H*W,1)) #initially, the previous heatmap is 0's (for first pass)
heat_curr = np.zeros((H*W,1)) #also initialize the current heatmap as 0's (will fill once calculated at end of first pass)
kk = 0
err = tol+1 #just to enter while loop
while err > tol:
print(kk)
#loop through specified timesteps to generate mean sensitivity
for ts in timesteps: #for each day that we will perturb
#state progress
if verbose:
if np.mod(ts,iters_one_pass)==0:
print(' Timestep ' + str(ts) + '/' + str(len(ts)))
#number of iterations of perturbations for one forward pass through model
iters = iters_one_pass
#define perturbation: rectangular as from RISE, or gaussian
if style_dict['style'] == 'RISE':
h = style_dict['params'][0]
w = style_dict['params'][1]
p_1 = style_dict['params'][2]
x_int = np.linspace(0,W,w) #low-res x indices
y_int = np.linspace(0,H,h) #low-res y indices
xnew = np.arange(W)
ynew = np.arange(H)
perturb_small = np.random.choice([0,1],size = (iters,1,h,w), p = [1-p_1,p_1]) #binary perturbation on coarse grid
perturb = np.half([interpolate.interp2d(x_int,y_int,perturb_small[iter][0])(xnew,ynew) for iter in range(iters)]) #perturbation is interpolated to finer grid
elif style_dict['style'] == 'gauss':
sigma = style_dict['params']
x_int = np.arange(W)
y_int = np.arange(H)
x_mesh, y_mesh = np.meshgrid(x_int, y_int)
#define gaussian perturbation for each iteration being passed
perturb = np.half([np.exp( - ( (x_mesh - np.random.randint(0,W))**2 + (y_mesh - np.random.randint(0,H))**2 ) / (2*sigma**2) ) for iter in range(iters)])
#copy/expand dimensions of the perturbation to be added to weather video
perturb_2D = np.copy(perturb) #the 2D perturbation for each iteration of this pass
perturb = tf.repeat(tf.expand_dims(tf.convert_to_tensor(perturb),3),nchannels, axis = 3) #expand along channels in one image
perturb = tf.repeat(tf.expand_dims(tf.convert_to_tensor(perturb),1),steps_in, axis = 1) #expand along images in one video
# only perturb one channel
mask = np.zeros((perturb.shape))
mask[:,:,:,:,p_channel] = 1
mask = np.half(mask)
mask = tf.convert_to_tensor(mask)
perturb = perturb*mask
xday = x_test[ts] #current timestep in test set
xday_iters = [xday for val in range(iters)] #repeat for each iteration (e.g. make copy for each perturbation)
factor = np.random.choice([-1,1],p = [0.5,0.5]).astype('float16') #whether to add or subtract perturbation from input video, 50-50 chance of each
perturb = factor*perturb
x1 = perturb
x2 = tf.convert_to_tensor(xday_iters)
xday_iters_perturb = tf.math.add(x1,x2)
x_all = tf.squeeze(tf.concat((tf.expand_dims(xday, axis = 0),xday_iters_perturb), axis = 0)) #'all' refers to original (xday) and perturbed (xday_iters_perturb)
x_all_ds = tf.data.Dataset.from_tensor_slices(x_all).batch(batch_size = batch_size)
y_all = model.predict(x_all_ds)
yday = y_all[0] #first element is unperturbed model prediction
yday_perturb = y_all[1:] #all others are perturbed model predictions for each iteration of perturbation
ydiffs = np.abs(np.reshape(yday - yday_perturb[:iters],(-1,1))) #magnitude difference between perturbed and unperturbed streamflow
delta = np.ones((len(ydiffs),H,W)) * ydiffs[:,None] #get dimensions to match so delta can be multiplied by perturbation
heat_iters = np.asarray(delta[:iters]) * np.asarray(perturb_2D)
heat = np.mean(heat_iters[:iters], axis=0)
heat_mean[:,0] += heat.flatten()
del heat, heat_iters, delta, ydiffs, x_all, xday_iters #delete for memory
n_iters = iters_one_pass*(kk+1)
heat_curr = np.copy(heat_mean) / n_iters
err = np.mean(np.abs(heat_curr - heat_prev)) / np.mean(heat_prev)
heat_prev = np.copy(heat_curr)
kk += 1
heat_mean = heat_mean /(iters_total * len(timesteps))
return heat_mean
#%% Set directories and load data
dir_data = './data_pickle' #where to save trained model outputs
dir_models = './Results' #where to save trained model outputs
dir_output = './heatmaps'
# os.chdir(dir_output)
# load data, which is already preprocessed and is a pickled dictionary with format:
# 'date': Datetimeindex (No_of_timesteps,)
# 'Variable': list (No_of_timesteps,)
# each line of 'Variable' contains an array with dimensions (X_cells,Y_cells) (grid for each timestep)
# one pickle file for each variable
pickle_in = open(dir_data + '/' + 'TDict.pickle','rb')
tempDict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'TsinDict.pickle','rb')
tsinDict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'PDict.pickle','rb')
precDict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'SMLTDict.pickle','rb')
snowmeltDict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'EDict.pickle','rb')
EDict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'SFDict.pickle','rb')
SFDict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'SWVL1Dict.pickle','rb')
SWVL1Dict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'SWVL2Dict.pickle','rb')
SWVL2Dict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'SWVL3Dict.pickle','rb')
SWVL3Dict = pickle.load(pickle_in)
pickle_in = open(dir_data + '/' + 'SWVL4Dict.pickle','rb')
SWVL4Dict = pickle.load(pickle_in)
T = np.asarray(tempDict['T'])
Tsin = np.asarray(tsinDict['Tsin'])
SMLT = np.asarray(snowmeltDict['SMLT'])
P = np.asarray(precDict['P'])
E = np.asarray(EDict['E'])
SF = np.asarray(SFDict['SF'])
SWVL1 = np.asarray(SWVL1Dict['SWVL1'])
SWVL2 = np.asarray(SWVL2Dict['SWVL2'])
SWVL3 = np.asarray(SWVL3Dict['SWVL3'])
SWVL4 = np.asarray(SWVL4Dict['SWVL4'])
# pickle file for Q contains only an array ('Q' time series) and a datetimeindex ('date')
pickle_in = open(dir_data + '/' + 'QDict.pickle','rb')
QDict=pickle.load(pickle_in)
Q = np.asarray(QDict['Q'])
t = QDict['date']
#%% split data
#years/indices of testing/training
#modify accordingly
trainStartYear = 2012
trainFinYear = 2017
valStartYear = 2018
valFinYear = 2018
optStartYear = 2019
optFinYear = 2019
testStartYear = 2020
testFinYear = 2020
trainInds = np.squeeze(np.argwhere((t.year>=trainStartYear) & (t.year<=trainFinYear)))
valInds = np.squeeze(np.argwhere((t.year>=valStartYear) & (t.year<=valFinYear)))
optInds = np.squeeze(np.argwhere((t.year>=optStartYear) & (t.year<=optFinYear)))
testInds = np.squeeze(np.argwhere((t.year>=testStartYear) & (t.year<=testFinYear)))
refInds = np.squeeze(np.argwhere((t.year<testStartYear)))
Ntrain = len(trainInds)
Nval = len(valInds)
Nopt = len(optInds)
Ntest = len(testInds)
#scaling
scaler = StandardScaler()
Tnorm = scaler.fit_transform(T.reshape(-1, T.shape[-1])).reshape(T.shape)
SMLTnorm = scaler.fit_transform(SMLT.reshape(-1, SMLT.shape[-1])).reshape(SMLT.shape)
Pnorm = scaler.fit_transform(P.reshape(-1, P.shape[-1])).reshape(P.shape)
Tsinnorm = scaler.fit_transform(Tsin.reshape(-1, Tsin.shape[-1])).reshape(Tsin.shape)
Enorm = scaler.fit_transform(E.reshape(-1, E.shape[-1])).reshape(E.shape)
SFnorm = scaler.fit_transform(SF.reshape(-1, SF.shape[-1])).reshape(SF.shape)
SWVL1norm = scaler.fit_transform(SWVL1.reshape(-1, SWVL1.shape[-1])).reshape(SWVL1.shape)
SWVL2norm = scaler.fit_transform(SWVL2.reshape(-1, SWVL2.shape[-1])).reshape(SWVL2.shape)
SWVL3norm = scaler.fit_transform(SWVL3.reshape(-1, SWVL3.shape[-1])).reshape(SWVL3.shape)
SWVL4norm = scaler.fit_transform(SWVL4.reshape(-1, SWVL4.shape[-1])).reshape(SWVL4.shape)
Qscaler = StandardScaler()
Qscaler.fit(pd.DataFrame(Q))
Qnorm = Qscaler.transform(pd.DataFrame(Q))
#%% Define Bayesian Optimization to be able to load from existing logs:
pbounds = {'steps_in': (1,10*4),
'n': (7,7),
'batchsize': (7,7),
'inpTsin': (0,1),
'inpSMLT': (0,1),
'inpE': (0,1),
'inpT': (0,1),
'inpSF': (0,1),
'inpSWVL1': (0,1),
'inpSWVL2': (0,1),
'inpSWVL3': (0,1),
'inpSWVL4': (0,1)}
optimizer = BayesianOptimization(
f= bayesOpt_function,
pbounds=pbounds,
random_state=1,
verbose = 0
)
# #load existing optimizer
log_already_available = 0
if os.path.isfile("./logs.json"):
load_logs(optimizer, logs=["./logs.json"]);
print("\nExisting optimizer is already aware of {} points.".format(len(optimizer.space)))
log_already_available = 1
#get best values from optimizer
n = 2**int(optimizer.max.get("params").get("n"))
steps_in= 6*int(optimizer.max.get("params").get("steps_in"))
batch_size = 2**int(optimizer.max.get("params").get("batchsize"))
inpT = int(round(optimizer.max.get("params").get("inpT")))
inpTsin = int(round(optimizer.max.get("params").get("inpTsin")))
inpSMLT = int(round(optimizer.max.get("params").get("inpSMLT")))
inpE = int(round(optimizer.max.get("params").get("inpE")))
inpSF = int(round(optimizer.max.get("params").get("inpSF")))
inpSWVL1 = int(round(optimizer.max.get("params").get("inpSWVL1")))
inpSWVL2 = int(round(optimizer.max.get("params").get("inpSWVL2")))
inpSWVL3 = int(round(optimizer.max.get("params").get("inpSWVL3")))
inpSWVL4 = int(round(optimizer.max.get("params").get("inpSWVL4")))
# correct and print best values to console
maxDict = optimizer.max
maxDict['params']['n'] = n
maxDict['params']['steps_in'] = steps_in
maxDict['params']['batchsize'] = batch_size
maxDict['params']['steps_in(days)'] = steps_in/24
print("\nBEST:\t{}".format(maxDict))
#%% Compile test data
learning_rate = 1e-3
training_epochs = 100
earlystopping_patience = 12
nchannels = 1 + inpT + inpTsin + inpSMLT + inpE + inpSF + inpSWVL1 + inpSWVL2 + inpSWVL3 + inpSWVL4
y_train = np.squeeze([Qnorm[steps_in:trainInds[-1]+1,]]).T
y_val = np.squeeze([Qnorm[valInds,] ]).T
y_test = np.squeeze([Qnorm[testInds,] ]).T
y_train = y_train.astype(dtype = np.float16)
y_val = y_val.astype(dtype = np.float16)
y_test = y_test.astype(dtype = np.float16)
x_intermediate = np.empty(np.shape(Pnorm) + (nchannels,),dtype='single')
x_intermediate[:,:,:,0] = Pnorm
channel_names = ['P']
idx = 1
if inpT:
x_intermediate[:,:,:,idx] = Tnorm
channel_names.append('T')
idx = idx+1
if inpSMLT:
x_intermediate[:,:,:,idx] = SMLTnorm
channel_names.append('SMLT')
idx = idx+1
if inpTsin:
x_intermediate[:,:,:,idx] = Tsinnorm
channel_names.append('Tsin')
idx = idx+1
if inpE:
x_intermediate[:,:,:,idx] = Enorm
channel_names.append('E')
idx = idx+1
if inpSF:
x_intermediate[:,:,:,idx] = SFnorm
channel_names.append('SF')
idx = idx+1
if inpSWVL1:
x_intermediate[:,:,:,idx] = SWVL1norm
channel_names.append('SWVL1')
idx = idx+1
if inpSWVL2:
x_intermediate[:,:,:,idx] = SWVL2norm
channel_names.append('SWVL2')
idx = idx+1
if inpSWVL3:
x_intermediate[:,:,:,idx] = SWVL3norm
channel_names.append('SWVL3')
idx = idx+1
if inpSWVL4:
x_intermediate[:,:,:,idx] = SWVL4norm
channel_names.append('SWVL4')
idx = idx+1
x_train = np.empty((Ntrain-steps_in, steps_in, ) + np.shape(Tnorm)[1:] + (nchannels,),dtype=np.float16)
x_val = np.empty((Nval, steps_in,) + np.shape(Tnorm)[1:] + (nchannels,), dtype = np.float16)
x_test = np.empty((Ntest, steps_in,) + np.shape(Tnorm)[1:] + (nchannels,),dtype=np.float16)
#training
for ii in range(Ntrain-steps_in):
x_train[ii] = x_intermediate[ii:ii+steps_in]
# #validation
for ii in range(Nval):
x_val[ii] = x_intermediate[ii + Ntrain - steps_in : ii + Ntrain]
# #testing ()
for ii in range(Ntest):
x_test[ii] = x_intermediate[ii + Ntrain + Nval + Nopt - steps_in : ii + Ntrain + Nval + Nopt]
# #convert target arrays to tensors
x_train = tf.convert_to_tensor(x_train)
x_val = tf.convert_to_tensor(x_val)
x_test = tf.convert_to_tensor(x_test)
y_train = tf.convert_to_tensor(y_train)
y_val = tf.convert_to_tensor(y_val)
y_test = tf.convert_to_tensor(y_test)
#%% Load existing Models and calculate heatmaps
with tf.device("/gpu:2"): # adapt to your available device
for c in range(nchannels): #perturb one channel at a time
inimax = 10 # use 10 different trained models
heat = np.zeros((T.shape[1]*T.shape[2],inimax)) # preallocate
print(channel_names[c])
for ini in range(inimax):
fileName = dir_output + '/heatmap_'+channel_names[c]+'_channel_ini'+str(ini)+'.csv'
if os.path.isfile(fileName): # check for previous calculation runs to save time
temp_load = | pd.read_csv(fileName,header=None) | pandas.read_csv |
from __future__ import division
from __future__ import print_function
# Preprocessing of Option Quotes
# ==============================
#
# This notebook demonstrates the preprocessing of equity options, in preparation for the estimation of the parameters of a stochastic model.
# A number of preliminary calculations must be performed:
#
# 1. Calculation of implied risk-free rate and dividend yield, and derivation of forward prices
# 2. Calculation of forward at-the-money volatility. There is probably no option struck at the forward price, so this item must be computed by interpolation.
# 3. Calculation of the Black-=Scholes implied bid and ask volatility, given bid and ask option prices.
# 4. Calculation of 'Quick Delta': this is a common measure of moneyness, useful for representing the volatility smile.
#
# Each step is now described.
#
# Calculation of implied dividend yield and risk-free rate
# --------------------------------------------------------
#
# Recall the put-call parity relationship with continuous dividends:
#
# $$
# C_t - P_t = S_t e^{-d (T-t)} - K e^{-r (T-t)}
# $$
#
# where
#
# * $C_t$ price of call at time $t$
# * $P_t$ price of put at time $t$
# * $S_t$ spot price of underlying asset
# * $d$ continuous dividend yield
# * $r$ risk-free rate
# * $T$ Expity
#
# For each maturity, we estimate the linear regression:
#
# $$
# C_t - P_t = a_0 + a_1 K
# $$
#
# which yields
#
# $$
# r = - \frac{1}{T} \ln (-a_1)
# $$
# $$
# d = \frac{1}{T} \ln \left( \frac{S_t}{a_0} \right)
# $$
#
# Calculation of forward at-the-money volatility
# ----------------------------------------------
#
# We next want to estimate the implied volatility of an option struck at the forward price. In general, such option is not traded, and the volatility must therefore be estimated. The calculation involves 3 steps, performed separately on calls and puts:
#
# 1. Estimate the bid ($\sigma_b(K)$) and ask ($\sigma_a(K)$) Black-Scholes volatility for each quote.
# 2. Compute a mid-market implied volatility for each quote:
# $$
# \sigma(K) = \frac{\sigma_b(K)+\sigma_a(K)}{2}
# $$
# 3. Let $F$ be the forward price, the corresponding mid-market implied volatility is computed by linear interpolation between the two quotes bracketing $F$.
#
# The forward ATM volatility is the average of the volatilities computed on calls and puts.
#
# Quick Delta
# -----------
#
# Recall that the delta of a European call is defined as $N(d_1)$, where
#
# $$
# d_{1} = \frac{1}{\sigma \sqrt{T}} \left[ \ln \left( \frac{S}{K} \right) + \left( r + \frac{1}{2}\sigma^2 \right)T \right]
# $$
#
# The "Quick Delta" (QD) is a popular measure of moneyness, inspired from the definition of delta:
#
# $$
# QD(K) = N \left( \frac{1}{\sigma \sqrt{T}} \ln \left( \frac{F_T}{K} \right) \right)
# $$
#
# Note that $QD(F_T)=0.5$, for all maturities, while the regular forward delta is a function of time to expiry. This property of Quick Delta makes it convenient for representing the volatility smile.
#
# Data Filters
# ------------
#
# A number of filters may be applied, in an attempt to exclude inconsistent or erroneous data.
#
# 1. Exclusion of maturities shorter than $tMin$
# 2. Exclusion of maturities with less than $nMin$ quotes
# 3. Exclusion of quotes with Quick Delta less than $QDMin$ or higher than $QDMax$
#
# Implementation
# --------------
#
# This logic is implemented in the function `Compute_IV`, presented below. The function takes as argument a `pandas DataFrame` and returns another
# `DataFrame`, with one row per quote and 14 columns:
#
# 1. Type: 'C'/'P'
# 2. Strike
# 3. dtExpiry
# 4. dtTrade
# 5. Spot
# 6. IVBid: Black-Scholes implied volatility (bid)
# 7. IVAsk: Black-Scholes implied volatility (ask)
# 8. QD: Quick Delta
# 9. iRate: risk-free rate (continuously compounded)
# 10. iDiv: dividend yield (continuously compounded)
# 11. Fwd: Forward price
# 12. TTM: Time to maturity, in fraction of years (ACT/365)
# 13. PBid: Premium (bid)
# 14. PAsk: Premium (ask)
# <codecell>
import pandas
import dateutil
import re
import datetime
import numpy as np
from pandas import DataFrame
from scipy.interpolate import interp1d
from scipy.stats import norm
from scipy.linalg import lstsq
import quantlib.pricingengines.blackformula
from quantlib.pricingengines.blackformula import blackFormulaImpliedStdDev
from quantlib.instruments.option import Call, Put
def Compute_IV(optionDataFrame, tMin=0, nMin=0, QDMin=0, QDMax=1, keepOTMData=True):
"""
Pre-processing of a standard European option quote file.
- Calculation of implied risk-free rate and dividend yield
- Calculation of implied volatility
- Estimate ATM volatility for each expiry
- Compute implied volatility and Quick Delta for each quote
Options for filtering the input data set:
- maturities with less than nMin strikes are ignored
- maturities shorter than tMin (ACT/365 daycount) are ignored
- strikes with Quick Delta < qdMin or > qdMax are ignored
"""
grouped = optionDataFrame.groupby('dtExpiry')
isFirst = True
for spec, group in grouped:
print('processing group %s' % spec)
# implied vol for this type/expiry group
indx = group.index
dtTrade = group['dtTrade'][indx[0]]
dtExpiry = group['dtExpiry'][indx[0]]
spot = group['Spot'][indx[0]]
daysToExpiry = (dtExpiry-dtTrade).days
timeToMaturity = daysToExpiry/365.0
# exclude groups with too short time to maturity
if timeToMaturity < tMin:
continue
# exclude groups with too few data points
df_call = group[group['Type'] == 'C']
df_put = group[group['Type'] == 'P']
if (len(df_call) < nMin) | (len(df_put) < nMin):
continue
# calculate forward, implied interest rate and implied div. yield
df_C = DataFrame((df_call['PBid']+df_call['PAsk'])/2,
columns=['PremiumC'])
df_C.index = df_call['Strike']
df_P = DataFrame((df_put['PBid']+df_put['PAsk'])/2,
columns=['PremiumP'])
df_P.index = df_put['Strike']
# use 'inner' join because some strikes are not quoted for C and P
df_all = df_C.join(df_P, how='inner')
df_all['Strike'] = df_all.index
df_all['C-P'] = df_all['PremiumC'] - df_all['PremiumP']
y = np.array(df_all['C-P'])
x = np.array(df_all['Strike'])
A = np.vstack((x, np.ones(x.shape))).T
b = np.linalg.lstsq(A, y, rcond=None)[0]
# intercept is last coef
iRate = -np.log(-b[0])/timeToMaturity
dRate = np.log(spot/b[1])/timeToMaturity
discountFactor = np.exp(-iRate*timeToMaturity)
Fwd = spot * np.exp((iRate-dRate)*timeToMaturity)
print('Fwd: %f int rate: %f div yield: %f' % (Fwd, iRate, dRate))
# mid-market ATM volatility
def impvol(cp, strike, premium):
try:
vol = blackFormulaImpliedStdDev(cp, strike,
forward=Fwd, blackPrice=premium, discount=discountFactor,
TTM=timeToMaturity)
except RuntimeError:
vol = np.nan
return vol/np.sqrt(timeToMaturity)
# implied bid/ask vol for all options
df_call = df_call.assign(IVBid = [impvol(Call, strike, price) for strike, price
in zip(df_call['Strike'], df_call['PBid'])],
IVAsk = [impvol(Call, strike, price) for strike, price
in zip(df_call['Strike'], df_call['PBid'])])
df_call = df_call.assign(IVMid = (df_call.IVBid + df_call.IVAsk)/2)
df_put = df_put.assign(IVBid = [impvol(Put, strike, price) for strike, price
in zip(df_put['Strike'], df_put['PBid'])],
IVAsk = [impvol(Put, strike, price) for strike, price
in zip(df_put['Strike'], df_put['PAsk'])])
df_put = df_put.assign(IVMid = (df_put['IVBid'] + df_put['IVAsk'])/2)
f_call = interp1d(df_call['Strike'].values, df_call['IVMid'].values)
f_put = interp1d(df_put['Strike'].values, df_put['IVMid'].values)
atmVol = (f_call(Fwd)+f_put(Fwd))/2
print('ATM vol: %f' % atmVol)
# Quick Delta, computed with ATM vol
rv = norm()
df_call = (df_call.
assign(QuickDelta=
rv.cdf(np.log(Fwd/df_call.Strike.values) / (atmVol*np.sqrt(timeToMaturity)))))
df_put = (df_put.
assign(QuickDelta=
rv.cdf(np.log(Fwd/df_put.Strike.values)/(atmVol*np.sqrt(timeToMaturity)))))
# keep data within QD range
df_call = df_call[(df_call['QuickDelta'] >= QDMin) & \
(df_call['QuickDelta'] <= QDMax) ]
df_put = df_put[ (df_put['QuickDelta'] >= QDMin) & \
(df_put['QuickDelta'] <= QDMax) ]
# final assembly...
df_cp = df_call.append(df_put, ignore_index=True)
df_cp['iRate'] = iRate
df_cp['iDiv'] = dRate
df_cp['ATMVol'] = atmVol
df_cp['Fwd'] = Fwd
df_cp['TTM'] = timeToMaturity
df_cp['CP'] = [1 if t == 'C' else -1 for t in df_cp['Type']]
# keep only OTM data ?
if keepOTMData:
df_cp = df_cp[((df_cp['Strike']>=Fwd) & (df_cp['Type'] == 'C')) |
((df_cp['Strike']<Fwd) & (df_cp['Type'] == 'P'))]
if isFirst:
df_final = df_cp
isFirst = False
else:
df_final = df_final.append(df_cp, ignore_index=True)
return df_final
# <markdowncell>
# Example
# -------
#
# Using the SPX data set found in the data folder, the above procedure generates a `DataFrame` suited for use in a calibration program.
# <codecell>
if __name__ == '__main__':
option_data_frame = | pandas.read_pickle('../data/df_SPX_24jan2011.pkl') | pandas.read_pickle |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5), index=pd.date_range("2011-01-01", freq="D", periods=10)
),
pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")).set_index(["a", "b"]),
]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = "databricks"
expected_error_message = "'Index' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = "databricks"
expected_error_message = "'MultiIndex' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
# With name
pidx.name = "Koalas"
kidx.name = "Koalas"
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name=("x", "a"))), repr(pidx.to_series(name=("x", "a"))))
# With tupled name
pidx.name = ("x", "a")
kidx.name = ("x", "a")
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name="a"), pidx.to_series(name="a"))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = "a"
kidx.name = "a"
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name="x")), repr(pidx.to_frame(name="x")))
self.assert_eq(
repr(kidx.to_frame(index=False, name="x")),
repr(pidx.to_frame(index=False, name="x")),
)
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(
repr(kidx.to_frame(name=["x", "y"])), repr(pidx.to_frame(name=["x", "y"]))
)
self.assert_eq(
repr(kidx.to_frame(index=False, name=["x", "y"])),
repr(pidx.to_frame(index=False, name=["x", "y"])),
)
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list("abcde"))
kdf = ks.from_pandas(pdf)
pser = pdf.a
kser = kdf.a
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = "hi"
expected_error_message = "Length of new names must be {}, got {}".format(
len(kdf._internal.index_map), len(["0", "1"])
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ["0", "1"]
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ["renamed_number", "renamed_color"]
kidx.names = ["renamed_number", "renamed_color"]
self.assertEqual(kidx.names, pidx.names)
pidx.names = ["renamed_number", None]
kidx.names = ["renamed_number", None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = "renamed"
def test_index_rename(self):
pdf = pd.DataFrame(
np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
)
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename("y"), pidx.rename("y"))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename("z", inplace=True)
pidx.rename("z", inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
pmidx = pdf.index
kmidx = kdf.index
self.assert_eq(kmidx.rename(["n", "c"]), pmidx.rename(["n", "c"]))
self.assert_eq(kdf.index.names, pdf.index.names)
kmidx.rename(["num", "col"], inplace=True)
pmidx.rename(["num", "col"], inplace=True)
self.assert_eq(kmidx, pmidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))
self.assert_eq(kdf.index.names, pdf.index.names)
self.assertRaises(TypeError, lambda: kmidx.rename("number"))
self.assertRaises(ValueError, lambda: kmidx.rename(["number"]))
def test_multi_index_levshape(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
kidx = ks.from_pandas(pidx)
self.assertEqual(pidx.levshape, kidx.levshape)
def test_index_unique(self):
kidx = self.kdf.index
# here the output is different than pandas in terms of order
expected = [0, 1, 3, 5, 6, 8, 9]
self.assert_eq(expected, sorted(kidx.unique().to_pandas()))
self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))
expected = [1, 2, 4, 6, 7, 9, 10]
self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))
with self.assertRaisesRegex(IndexError, "Too many levels*"):
kidx.unique(level=1)
with self.assertRaisesRegex(KeyError, "Requested level (hi)*"):
kidx.unique(level="hi")
def test_multi_index_copy(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index.copy(), pdf.index.copy())
def test_drop_duplicates(self):
pidx = pd.Index([4, 2, 4, 1, 4, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.drop_duplicates().sort_values(), pidx.drop_duplicates().sort_values())
self.assert_eq(
(kidx + 1).drop_duplicates().sort_values(), (pidx + 1).drop_duplicates().sort_values()
)
def test_dropna(self):
pidx = pd.Index([np.nan, 2, 4, 1, np.nan, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.dropna(), pidx.dropna())
self.assert_eq((kidx + 1).dropna(), (pidx + 1).dropna())
def test_index_symmetric_difference(self):
pidx1 = pd.Index([1, 2, 3, 4])
pidx2 = pd.Index([2, 3, 4, 5])
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(
kidx1.symmetric_difference(kidx2).sort_values(),
pidx1.symmetric_difference(pidx2).sort_values(),
)
self.assert_eq(
(kidx1 + 1).symmetric_difference(kidx2).sort_values(),
(pidx1 + 1).symmetric_difference(pidx2).sort_values(),
)
pmidx1 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pmidx2 = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
kmidx1 = ks.from_pandas(pmidx1)
kmidx2 = ks.from_pandas(pmidx2)
self.assert_eq(
kmidx1.symmetric_difference(kmidx2).sort_values(),
pmidx1.symmetric_difference(pmidx2).sort_values(),
)
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
idx.symmetric_difference(midx)
def test_multi_index_symmetric_difference(self):
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
midx_ = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
self.assert_eq(
midx.symmetric_difference(midx_),
midx.to_pandas().symmetric_difference(midx_.to_pandas()),
)
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
midx.symmetric_difference(idx)
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
# Index functions
missing_functions = inspect.getmembers(MissingPandasLikeIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)()
# MultiIndex functions
missing_functions = inspect.getmembers(MissingPandasLikeMultiIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)()
# Index properties
missing_properties = inspect.getmembers(
MissingPandasLikeIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)
# MultiIndex properties
missing_properties = inspect.getmembers(
MissingPandasLikeMultiIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)
def test_index_has_duplicates(self):
indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)]
names = [None, "ks", "ks", None]
has_dup = [False, True, True, False]
for idx, name, expected in zip(indexes, names, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multiindex_has_duplicates(self):
indexes = [
[list("abc"), list("edf")],
[list("aac"), list("edf")],
[list("aac"), list("eef")],
[[1, 4, 4], [4, 6, 6]],
]
has_dup = [False, False, True, True]
for idx, expected in zip(indexes, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multi_index_not_supported(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
with self.assertRaisesRegex(TypeError, "cannot perform any with this index type"):
kdf.set_index(["a", "b"]).index.any()
with self.assertRaisesRegex(TypeError, "cannot perform all with this index type"):
kdf.set_index(["a", "b"]).index.all()
def test_index_nlevels(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(["a", "b", "c"]))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 1)
def test_multiindex_nlevel(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=[list("abc"), list("def")])
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 2)
def test_multiindex_from_arrays(self):
arrays = [["a", "a", "b", "b"], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays)
kidx = ks.MultiIndex.from_arrays(arrays)
self.assert_eq(pidx, kidx)
def test_multiindex_swaplevel(self):
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", "number"])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(-2, -1), kidx.swaplevel(-2, -1))
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
self.assert_eq(pidx.swaplevel("word", 1), kidx.swaplevel("word", 1))
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(-3, "word")
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, 2)
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, -3)
with self.assertRaisesRegex(KeyError, "Level work not found"):
kidx.swaplevel(0, "work")
def test_multiindex_droplevel(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2)], names=["level1", "level2", "level3"]
)
kidx = ks.from_pandas(pidx)
with self.assertRaisesRegex(IndexError, "Too many levels: Index has only 3 levels, not 5"):
kidx.droplevel(4)
with self.assertRaisesRegex(KeyError, "Level level4 not found"):
kidx.droplevel("level4")
with self.assertRaisesRegex(KeyError, "Level.*level3.*level4.*not found"):
kidx.droplevel([("level3", "level4")])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 4 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 1, 2])
self.assert_eq(pidx.droplevel(0), kidx.droplevel(0))
self.assert_eq(pidx.droplevel([0, 1]), kidx.droplevel([0, 1]))
self.assert_eq(pidx.droplevel([0, "level2"]), kidx.droplevel([0, "level2"]))
def test_index_fillna(self):
pidx = pd.Index([1, 2, None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.fillna(0), kidx.fillna(0))
self.assert_eq(pidx.rename("name").fillna(0), kidx.rename("name").fillna(0))
with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"):
kidx.fillna([1, 2])
def test_index_drop(self):
pidx = pd.Index([1, 2, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop(1), kidx.drop(1))
self.assert_eq(pidx.drop([1, 2]), kidx.drop([1, 2]))
def test_multiindex_drop(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z")], names=["level1", "level2"]
)
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop("a"), kidx.drop("a"))
self.assert_eq(pidx.drop(["a", "b"]), kidx.drop(["a", "b"]))
self.assert_eq(pidx.drop(["x", "y"], level=1), kidx.drop(["x", "y"], level=1))
self.assert_eq(pidx.drop(["x", "y"], level="level2"), kidx.drop(["x", "y"], level="level2"))
pidx.names = ["lv1", "lv2"]
kidx.names = ["lv1", "lv2"]
self.assert_eq(pidx.drop(["x", "y"], level="lv2"), kidx.drop(["x", "y"], level="lv2"))
self.assertRaises(IndexError, lambda: kidx.drop(["a", "b"], level=2))
self.assertRaises(KeyError, lambda: kidx.drop(["a", "b"], level="level"))
kidx.names = ["lv", "lv"]
self.assertRaises(ValueError, lambda: kidx.drop(["x", "y"], level="lv"))
def test_sort_values(self):
pidx = pd.Index([-10, -100, 200, 100])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx.name = "koalas"
kidx.name = "koalas"
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx.names = ["hello", "koalas", "goodbye"]
kidx.names = ["hello", "koalas", "goodbye"]
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
def test_index_drop_duplicates(self):
pidx = pd.Index([1, 1, 2])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
pidx = pd.MultiIndex.from_tuples([(1, 1), (1, 1), (2, 2)], names=["level1", "level2"])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
def test_index_sort(self):
idx = ks.Index([1, 2, 3, 4, 5])
midx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
with self.assertRaisesRegex(
TypeError, "cannot sort an Index object in-place, use sort_values instead"
):
idx.sort()
with self.assertRaisesRegex(
TypeError, "cannot sort an Index object in-place, use sort_values instead"
):
midx.sort()
def test_multiindex_isna(self):
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"):
kidx.isna()
with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"):
kidx.isnull()
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notna()
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notnull()
def test_index_nunique(self):
pidx = pd.Index([1, 1, 2, None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.nunique(), kidx.nunique())
self.assert_eq(pidx.nunique(dropna=True), kidx.nunique(dropna=True))
def test_multiindex_nunique(self):
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notnull()
def test_multiindex_rename(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx = pidx.rename(list("ABC"))
kidx = kidx.rename(list("ABC"))
self.assert_eq(pidx, kidx)
pidx = pidx.rename(["my", "name", "is"])
kidx = kidx.rename(["my", "name", "is"])
self.assert_eq(pidx, kidx)
def test_multiindex_set_names(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx = pidx.set_names(["set", "new", "names"])
kidx = kidx.set_names(["set", "new", "names"])
self.assert_eq(pidx, kidx)
pidx.set_names(["set", "new", "names"], inplace=True)
kidx.set_names(["set", "new", "names"], inplace=True)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("first", level=0)
kidx = kidx.set_names("first", level=0)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("second", level=1)
kidx = kidx.set_names("second", level=1)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("third", level=2)
kidx = kidx.set_names("third", level=2)
self.assert_eq(pidx, kidx)
pidx.set_names("first", level=0, inplace=True)
kidx.set_names("first", level=0, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names("second", level=1, inplace=True)
kidx.set_names("second", level=1, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names("third", level=2, inplace=True)
kidx.set_names("third", level=2, inplace=True)
self.assert_eq(pidx, kidx)
def test_multiindex_from_tuples(self):
tuples = [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")]
pidx = pd.MultiIndex.from_tuples(tuples)
kidx = ks.MultiIndex.from_tuples(tuples)
self.assert_eq(pidx, kidx)
def test_multiindex_from_product(self):
iterables = [[0, 1, 2], ["green", "purple"]]
pidx = pd.MultiIndex.from_product(iterables)
kidx = ks.MultiIndex.from_product(iterables)
self.assert_eq(pidx, kidx)
def test_multiindex_tuple_column_name(self):
column_labels = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=column_labels)
pdf.set_index(("a", "x"), append=True, inplace=True)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf, kdf)
def test_len(self):
pidx = pd.Index(range(10000))
kidx = ks.from_pandas(pidx)
self.assert_eq(len(pidx), len(kidx))
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
self.assert_eq(len(pidx), len(kidx))
def test_delete(self):
pidx = pd.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])
kidx = ks.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])
self.assert_eq(pidx.delete(5).sort_values(), kidx.delete(5).sort_values())
self.assert_eq(pidx.delete(-5).sort_values(), kidx.delete(-5).sort_values())
if LooseVersion(np.__version__) < LooseVersion("1.19"):
self.assert_eq(
pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values()
)
self.assert_eq(
pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()
)
else:
self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values())
self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values())
with self.assertRaisesRegex(IndexError, "index 10 is out of bounds for axis 0 with size 9"):
kidx.delete(10)
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
self.assert_eq(pidx.delete(1).sort_values(), kidx.delete(1).sort_values())
self.assert_eq(pidx.delete(-1).sort_values(), kidx.delete(-1).sort_values())
if LooseVersion(np.__version__) < LooseVersion("1.19"):
self.assert_eq(
pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values()
)
self.assert_eq(
pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()
)
else:
self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values())
self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values())
def test_append(self):
# Index
pidx = pd.Index(range(10000))
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.append(pidx), kidx.append(kidx))
# Index with name
pidx1 = pd.Index(range(10000), name="a")
pidx2 = pd.Index(range(10000), name="b")
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# Index from DataFrame
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"])
pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]}, index=["x", "y", "z"])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index("a").index
pidx2 = pdf2.set_index("d").index
kidx1 = kdf1.set_index("a").index
kidx2 = kdf2.set_index("d").index
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# Index from DataFrame with MultiIndex columns
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]})
pdf1.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
pdf2.columns = pd.MultiIndex.from_tuples([("a", "x"), ("d", "y")])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index(("a", "x")).index
pidx2 = pdf2.set_index(("d", "y")).index
kidx1 = kdf1.set_index(("a", "x")).index
kidx2 = kdf2.set_index(("d", "y")).index
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# MultiIndex
pmidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(pmidx.append(pmidx), kmidx.append(kmidx))
# MultiIndex with names
pmidx1 = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["x", "y", "z"]
)
pmidx2 = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["p", "q", "r"]
)
kmidx1 = ks.from_pandas(pmidx1)
kmidx2 = ks.from_pandas(pmidx2)
self.assert_eq(pmidx1.append(pmidx2), kmidx1.append(kmidx2))
self.assert_eq(pmidx2.append(pmidx1), kmidx2.append(kmidx1))
self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)
self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)
# Index & MultiIndex currently is not supported
expected_error_message = r"append\(\) between Index & MultiIndex currently is not supported"
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kidx.append(kmidx)
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kmidx.append(kidx)
def test_argmin(self):
pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.argmin(), kidx.argmin())
# MultiIndex
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(
TypeError, "reduction operation 'argmin' not allowed for this dtype"
):
kidx.argmin()
def test_argmax(self):
pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.argmax(), kidx.argmax())
# MultiIndex
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(
TypeError, "reduction operation 'argmax' not allowed for this dtype"
):
kidx.argmax()
def test_monotonic(self):
# test monotonic_increasing & monotonic_decreasing for MultiIndex.
# Since the Behavior for null value was changed in pandas >= 1.0.0,
# several cases are tested differently.
datas = []
# increasing / decreasing ordered each index level with string
datas.append([("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")])
datas.append([("w", "d"), ("x", "c"), ("y", "b"), ("z", "a")])
datas.append([("z", "a"), ("y", "b"), ("x", "c"), ("w", "d")])
datas.append([("z", "d"), ("y", "c"), ("x", "b"), ("w", "a")])
# mixed order each index level with string
datas.append([("z", "a"), ("x", "b"), ("y", "c"), ("w", "d")])
datas.append([("z", "a"), ("y", "c"), ("x", "b"), ("w", "d")])
# increasing / decreasing ordered each index level with integer
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 500), (2, 400), (3, 300), (4, 200), (5, 100)])
datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, 500)])
datas.append([(5, 500), (4, 400), (3, 300), (2, 200), (1, 100)])
# mixed order each index level with integer
datas.append([(1, 500), (3, 400), (2, 300), (4, 200), (5, 100)])
datas.append([(1, 100), (2, 300), (3, 200), (4, 400), (5, 500)])
# integer / negative mixed tests
datas.append([("a", -500), ("b", -400), ("c", -300), ("d", -200), ("e", -100)])
datas.append([("e", -500), ("d", -400), ("c", -300), ("b", -200), ("a", -100)])
datas.append([(-5, "a"), (-4, "b"), (-3, "c"), (-2, "d"), (-1, "e")])
datas.append([(-5, "e"), (-4, "d"), (-3, "c"), (-2, "b"), (-1, "a")])
datas.append([(-5, "e"), (-3, "d"), (-2, "c"), (-4, "b"), (-1, "a")])
datas.append([(-5, "e"), (-4, "c"), (-3, "b"), (-2, "d"), (-1, "a")])
# None type tests (None type is treated as the smallest value)
datas.append([(1, 100), (2, 200), (None, 300), (4, 400), (5, 500)])
datas.append([(5, None), (4, 200), (3, 300), (2, 400), (1, 500)])
datas.append([(5, 100), (4, 200), (3, None), (2, 400), (1, 500)])
datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, None)])
datas.append([(1, 100), (2, 200), (None, None), (4, 400), (5, 500)])
datas.append([(-5, None), (-4, None), (-3, None), (-2, None), (-1, None)])
datas.append([(None, "e"), (None, "c"), (None, "b"), (None, "d"), (None, "a")])
datas.append([(None, None), (None, None), (None, None), (None, None), (None, None)])
# duplicated index value tests
datas.append([("x", "d"), ("y", "c"), ("y", "b"), ("z", "a")])
datas.append([("x", "d"), ("y", "b"), ("y", "c"), ("z", "a")])
datas.append([("x", "d"), ("y", "c"), ("y", None), ("z", "a")])
datas.append([("x", "d"), ("y", None), ("y", None), ("z", "a")])
datas.append([("x", "d"), ("y", "c"), ("y", "b"), (None, "a")])
datas.append([("x", "d"), ("y", "b"), ("y", "c"), (None, "a")])
# more depth tests
datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", "q"), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", "q"), ("y", "c", "p"), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", None), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", None), ("z", "a", "r")])
for data in datas:
with self.subTest(data=data):
pmidx = pd.MultiIndex.from_tuples(data)
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.is_monotonic_increasing, pmidx.is_monotonic_increasing)
self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)
# The datas below are showing different result depends on pandas version.
# Because the behavior of handling null values is changed in pandas >= 1.0.0.
datas = []
datas.append([(None, 100), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, 500)])
datas.append([(None, None), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, None)])
datas.append([("x", "d"), ("y", None), ("y", "c"), ("z", "a")])
datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", "q"), ("z", "a", "r")])
for data in datas:
with self.subTest(data=data):
pmidx = pd.MultiIndex.from_tuples(data)
kmidx = ks.from_pandas(pmidx)
expected_increasing_result = pmidx.is_monotonic_increasing
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
expected_increasing_result = not expected_increasing_result
self.assert_eq(kmidx.is_monotonic_increasing, expected_increasing_result)
self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)
def test_difference(self):
# Index
kidx1 = ks.Index([1, 2, 3, 4], name="koalas")
kidx2 = ks.Index([3, 4, 5, 6], name="koalas")
pidx1 = kidx1.to_pandas()
pidx2 = kidx2.to_pandas()
self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())
self.assert_eq(
kidx1.difference([3, 4, 5, 6]).sort_values(),
pidx1.difference([3, 4, 5, 6]).sort_values(),
)
self.assert_eq(
kidx1.difference((3, 4, 5, 6)).sort_values(),
pidx1.difference((3, 4, 5, 6)).sort_values(),
)
self.assert_eq(
kidx1.difference({3, 4, 5, 6}).sort_values(),
pidx1.difference({3, 4, 5, 6}).sort_values(),
)
self.assert_eq(
kidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),
pidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),
)
# Exceptions for Index
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference("1234")
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(1234)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(12.34)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(None)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(np.nan)
with self.assertRaisesRegex(
ValueError, "The 'sort' keyword only takes the values of None or True; 1 was passed."
):
kidx1.difference(kidx2, sort=1)
# MultiIndex
kidx1 = ks.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["hello", "koalas", "world"]
)
kidx2 = ks.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "z", 2), ("k", "z", 3)], names=["hello", "koalas", "world"]
)
pidx1 = kidx1.to_pandas()
pidx2 = kidx2.to_pandas()
self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())
self.assert_eq(
kidx1.difference({("a", "x", 1)}).sort_values(),
pidx1.difference({("a", "x", 1)}).sort_values(),
)
self.assert_eq(
kidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(),
pidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(),
)
# Exceptions for MultiIndex
with self.assertRaisesRegex(TypeError, "other must be a MultiIndex or a list of tuples"):
kidx1.difference(["b", "z", "2"])
def test_repeat(self):
pidx = pd.Index(["a", "b", "c"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.repeat(3).sort_values(), pidx.repeat(3).sort_values())
self.assert_eq(kidx.repeat(0).sort_values(), pidx.repeat(0).sort_values())
self.assert_eq((kidx + "x").repeat(3).sort_values(), (pidx + "x").repeat(3).sort_values())
self.assertRaises(ValueError, lambda: kidx.repeat(-1))
self.assertRaises(ValueError, lambda: kidx.repeat("abc"))
pmidx = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.repeat(3).sort_values(), pmidx.repeat(3).sort_values())
self.assert_eq(kmidx.repeat(0).sort_values(), pmidx.repeat(0).sort_values())
self.assertRaises(ValueError, lambda: kmidx.repeat(-1))
self.assertRaises(ValueError, lambda: kmidx.repeat("abc"))
def test_unique(self):
pidx = | pd.Index(["a", "b", "a"]) | pandas.Index |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
#Custom Transformer Class
class NewFeatureTransformer(BaseEstimator, TransformerMixin):
def fit(self, x, y=None):
return self
def transform(self, x):
x['ratio'] = x['thalach']/x['trestbps']
x= | pd.DataFrame(x.loc[:, 'ratio']) | pandas.DataFrame |
import torch
from lib import utils
from lib.dataloaders.dataloader import Dataset
from lib.metrics import metrics_torch, metrics_np
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from model.pytorch import supervisor
from model.pytorch.engine import Evaluator
from model.pytorch.gwnet_model import gwnet
from model.pytorch.lstm_model import LSTMNet
import torch.nn.functional as F
from model.pytorch.supervisor import Supervisor
def main(args):
torch.manual_seed(args.seed)
np.random.seed(args.seed)
device = torch.device(args.device)
sensor_ids, sensor_id_to_ind, adj_mx = utils.load_adj(args.adjdata, args.adjtype)
ds = Dataset(args.data)
ds.load_category('test', args.batch_size)
dataloader = ds.data
scaler = dataloader['scaler']
# Model loading
print(args)
sv = Supervisor(adj_mx, args)
# Testing
yhat, realy = sv.show_multiple_horizon(scaler, dataloader['test_loader'])
if args.plotheatmap == "True":
adp = F.softmax(F.relu(torch.mm(sv.model.nodevec1, sv.model.nodevec2)), dim=1)
device = torch.device('cpu')
adp.to(device)
adp = adp.cpu().detach().numpy()
adp = adp*(1/np.max(adp))
df = pd.DataFrame(adp)
sns.heatmap(df, cmap="RdYlBu")
ds.experiment_save_plot(plt, 'viz/gwnet_emb.pdf')
y12 = realy[:,99,11].cpu().detach().numpy()
yhat12 = scaler.inverse_transform(yhat[:,99,11]).cpu().detach().numpy()
y3 = realy[:,99,2].cpu().detach().numpy()
yhat3 = scaler.inverse_transform(yhat[:,99,2]).cpu().detach().numpy()
df2 = | pd.DataFrame({'real12':y12,'pred12':yhat12, 'real3': y3, 'pred3':yhat3}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sandy
from sandy.core.endf6 import _FormattedFile
__author__ = "<NAME>"
__all__ = [
"Errorr",
]
pd.options.display.float_format = '{:.5e}'.format
class Errorr(_FormattedFile):
"""
Container for ERRORR file text grouped by MAT, MF and MT numbers.
"""
def get_energy_grid(self, **kwargs):
"""
Obtaining the energy grid.
Parameters
----------
mat : `int`, optional
MAT number. The default is None.
Returns
-------
`np.array`
The energy grid of the `sandy.Errorr` object.
Examples
--------
>>> endf6_2 = sandy.get_endf6_file("jeff_33", "xs", 942410)
>>> err = endf6_2.get_errorr(ek_errorr=sandy.energy_grids.CASMO12, err=1, ek_groupr=sandy.energy_grids.CASMO12)
>>> err.get_energy_grid()
array([1.0000e-05, 3.0000e-02, 5.8000e-02, 1.4000e-01, 2.8000e-01,
3.5000e-01, 6.2500e-01, 4.0000e+00, 4.8052e+01, 5.5300e+03,
8.2100e+05, 2.2310e+06, 1.0000e+07])
>>> err.get_energy_grid(mat=9443)
array([1.0000e-05, 3.0000e-02, 5.8000e-02, 1.4000e-01, 2.8000e-01,
3.5000e-01, 6.2500e-01, 4.0000e+00, 4.8052e+01, 5.5300e+03,
8.2100e+05, 2.2310e+06, 1.0000e+07])
"""
mat_ = kwargs.get('mat', self.mat[0])
mf1 = read_mf1(self, mat_)
return mf1["EG"]
def get_xs(self, **kwargs):
"""
Obtain the xs values across the energy grid.
Returns
-------
xs : `pd.Series`
For a given mat and mt, the xs values in the energy grid.
Examples
--------
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> err = endf6.get_errorr(ek_errorr=sandy.energy_grids.CASMO12, err=1)
>>> err.get_xs()
MAT 125
MT 1 2 102
E
(1e-05, 0.03] 2.10540e+01 2.04363e+01 6.17622e-01
(0.03, 0.058] 2.06986e+01 2.04363e+01 2.62307e-01
(0.058, 0.14] 2.06134e+01 2.04363e+01 1.77108e-01
(0.14, 0.28] 2.05574e+01 2.04363e+01 1.21068e-01
(0.28, 0.35] 2.05377e+01 2.04363e+01 1.01449e-01
(0.35, 0.625] 2.05156e+01 2.04363e+01 7.93598e-02
(0.625, 4.0] 2.04756e+01 2.04360e+01 3.95521e-02
(4.0, 48.052] 2.04452e+01 2.04328e+01 1.23376e-02
(48.052, 5530.0] 2.00727e+01 2.00714e+01 1.31829e-03
(5530.0, 821000.0] 8.05810e+00 8.05804e+00 6.41679e-05
(821000.0, 2231000.0] 3.48867e+00 3.48863e+00 3.54246e-05
(2231000.0, 10000000.0] 1.52409e+00 1.52406e+00 3.44005e-05
>>> err.get_xs(mt=[1, 2])
MAT 125
MT 1 2
E
(1e-05, 0.03] 2.10540e+01 2.04363e+01
(0.03, 0.058] 2.06986e+01 2.04363e+01
(0.058, 0.14] 2.06134e+01 2.04363e+01
(0.14, 0.28] 2.05574e+01 2.04363e+01
(0.28, 0.35] 2.05377e+01 2.04363e+01
(0.35, 0.625] 2.05156e+01 2.04363e+01
(0.625, 4.0] 2.04756e+01 2.04360e+01
(4.0, 48.052] 2.04452e+01 2.04328e+01
(48.052, 5530.0] 2.00727e+01 2.00714e+01
(5530.0, 821000.0] 8.05810e+00 8.05804e+00
(821000.0, 2231000.0] 3.48867e+00 3.48863e+00
(2231000.0, 10000000.0] 1.52409e+00 1.52406e+00
>>> err.get_xs(mt=1)
MAT 125
MT 1
E
(1e-05, 0.03] 2.10540e+01
(0.03, 0.058] 2.06986e+01
(0.058, 0.14] 2.06134e+01
(0.14, 0.28] 2.05574e+01
(0.28, 0.35] 2.05377e+01
(0.35, 0.625] 2.05156e+01
(0.625, 4.0] 2.04756e+01
(4.0, 48.052] 2.04452e+01
(48.052, 5530.0] 2.00727e+01
(5530.0, 821000.0] 8.05810e+00
(821000.0, 2231000.0] 3.48867e+00
(2231000.0, 10000000.0] 1.52409e+00
"""
data = []
listmt_ = kwargs.get('mt', range(1, 10000))
listmt_ = [listmt_] if isinstance(listmt_, int) else listmt_
listmat_ = kwargs.get('mat', range(1, 10000))
listmat_ = [listmat_] if isinstance(listmat_, int) else listmat_
for mat, mf, mt in self.filter_by(listmf=[3],
listmt=listmt_,
listmat=listmat_).data:
mf1 = sandy.errorr.read_mf1(self, mat)
egn = pd.IntervalIndex.from_breaks(mf1["EG"])
mf3 = sandy.errorr.read_mf3(self, mat, mt)
columns = pd.MultiIndex.from_tuples([(mat, mt)],
names=["MAT", "MT"])
index = pd.Index(egn, name="E")
data.append(pd.DataFrame(mf3["XS"], index=index, columns=columns))
data = pd.concat(data, axis=1).fillna(0)
return sandy.Xs(data)
def get_cov(self, multigroup=True):
"""
Extract cross section/nubar covariance from `Errorr` instance.
Returns
-------
data : `sandy CategoryCov`
xs/nubar covariance matrix for all cross section/nubar
MAT/MT in ERRORR file.
Examples
--------
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> err = endf6.get_errorr(ek_errorr=[1e-2, 1e1, 2e7], err=1)
>>> err.get_cov().data
MAT1 125
MT1 1 2 102
E1 (0.01, 10.0] (10.0, 20000000.0] (0.01, 10.0] (10.0, 20000000.0] (0.01, 10.0] (10.0, 20000000.0]
MAT MT E
125 1 (0.01, 10.0] 8.74838e-06 4.62556e-05 8.76101e-06 4.62566e-05 1.07035e-06 5.58627e-07
(10.0, 20000000.0] 4.62556e-05 2.47644e-04 4.63317e-05 2.47650e-04 7.58742e-09 1.49541e-06
2 (0.01, 10.0] 8.76101e-06 4.63317e-05 8.77542e-06 4.63327e-05 0.00000e+00 0.00000e+00
(10.0, 20000000.0] 4.62566e-05 2.47650e-04 4.63327e-05 2.47655e-04 0.00000e+00 0.00000e+00
102 (0.01, 10.0] 1.07035e-06 7.58742e-09 0.00000e+00 0.00000e+00 6.51764e-04 3.40163e-04
(10.0, 20000000.0] 5.58627e-07 1.49541e-06 0.00000e+00 0.00000e+00 3.40163e-04 6.70431e-02
>>> err.get_cov(multigroup=False).data
MAT1 125
MT1 1 2 102
E1 1.00000e-02 1.00000e+01 2.00000e+07 1.00000e-02 1.00000e+01 2.00000e+07 1.00000e-02 1.00000e+01 2.00000e+07
MAT MT E
125 1 1.00000e-02 8.74838e-06 4.62556e-05 0.00000e+00 8.76101e-06 4.62566e-05 0.00000e+00 1.07035e-06 5.58627e-07 0.00000e+00
1.00000e+01 4.62556e-05 2.47644e-04 0.00000e+00 4.63317e-05 2.47650e-04 0.00000e+00 7.58742e-09 1.49541e-06 0.00000e+00
2.00000e+07 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00
2 1.00000e-02 8.76101e-06 4.63317e-05 0.00000e+00 8.77542e-06 4.63327e-05 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00
1.00000e+01 4.62566e-05 2.47650e-04 0.00000e+00 4.63327e-05 2.47655e-04 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00
2.00000e+07 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00
102 1.00000e-02 1.07035e-06 7.58742e-09 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 6.51764e-04 3.40163e-04 0.00000e+00
1.00000e+01 5.58627e-07 1.49541e-06 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 3.40163e-04 6.70431e-02 0.00000e+00
2.00000e+07 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00
"""
eg = self.get_energy_grid()
if multigroup:
eg = | pd.IntervalIndex.from_breaks(eg) | pandas.IntervalIndex.from_breaks |
import pandas as pd
import glob
import os
import re
import phyphy
from ete3 import Tree
import numpy as np
absrel = glob.glob("families_absrel/logs/*.ABSREL.log")
family_list = []
branch_list = []
pvalue_list = []
for file in absrel:
with open(file) as myfile:
for line in myfile:
if re.search(r'^\* \w.+ p-value', line):
family, branch, pvalue = file[21:].split(".")[0], line[2:].split(",")[0], line.split()[-1]
family_list.append(family)
branch_list.append(branch)
pvalue_list.append(pvalue)
ps = pd.DataFrame({"family": family_list, 'branch': branch_list, 'p-value': pvalue_list})
# ps: dataframe that includes all families, branches and pvalues of the ABSREL analysis
absrel_json = glob.glob("families_absrel/codon_alns/*.ABSREL.json")
tree_list = []
family_list = []
for file in absrel_json:
if os.stat(file).st_size > 0:
tree, family = phyphy.Extractor(file).extract_input_tree(), file[27:].split(".")[0]
tree_list.append(tree)
family_list.append(family)
tdf = pd.DataFrame({"tree": tree_list, "family": family_list})
# tdf: dataframe that includes all trees from ABSREL positives and their family
ps = ps.merge(tdf)
children_list = []
for i in range(0, len(ps)):
t = Tree(ps.tree[i], format=1)
node = t.search_nodes(name=ps.branch[i])[0]
children_list.append(node.get_leaf_names())
ps["children"] = children_list
# children list includes all children from the nodes under selection
lst_col = 'children'
r = pd.DataFrame({
col: np.repeat(ps[col].values, ps[lst_col].str.len())
for col in ps.columns.drop(lst_col)}
).assign(**{lst_col: np.concatenate(ps[lst_col].values)})[ps.columns]
r_dd = r.drop_duplicates(subset=['family', 'children'])
files = os.listdir("samples/")
taxa = []
for file in files:
if file.endswith(".faa"):
taxa.append(file.split(".")[0])
spec_name_list = []
for t in taxa:
with open("samples/" + t + ".faa",'r') as file:
for line in file:
if line.startswith(">"):
spec_name_list.append([line[1:].strip(), t])
spec_name = pd.DataFrame(spec_name_list, columns=["index", "species"]).set_index("index")
spec_name["children"] = spec_name.index
spec_name.replace(r"\.", "_", regex=True, inplace=True)
spec_name.replace(r"-", "_", regex=True, inplace=True)
spec_name["ps"] = spec_name["children"].isin(r_dd["children"])
spec_name.to_csv(snakemake.output[0], index=False)
proteome = glob.glob("samples/*.tsv")
col_names = ["protein_accession", "md5", "length", "analysis", "signature_accession",
"signature_description", "start", "stop", "score", "status", "date",
"ip_accession", "ip_description", "go", "pathway"]
annotations = []
for infile in proteome:
data = | pd.read_csv(infile, sep='\t', names=col_names) | pandas.read_csv |
# Import python modules
import os, sys
# data handling libraries
import pandas as pd
import numpy as np
import pickle
import json
import dask
from multiprocessing import Pool
# graphical control libraries
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# shape and layer libraries
import fiona
from shapely.geometry import MultiPolygon, shape, point, box
from descartes import PolygonPatch
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import geopandas as gpd
# data wrangling libraries
import ftplib, urllib, wget, bz2
from bs4 import BeautifulSoup as bs
class ogh_meta:
"""
The json file that describes the Gridded climate data products
"""
def __init__(self):
self.__meta_data = dict(json.load(open('ogh_meta.json','rb')))
# key-value retrieval
def __getitem__(self, key):
return(self.__meta_data[key])
# key list
def keys(self):
return(self.__meta_data.keys())
# value list
def values(self):
return(self.__meta_data.values())
# print('Version '+datetime.fromtimestamp(os.path.getmtime('ogh.py')).strftime('%Y-%m-%d %H:%M:%S')+' jp')
def saveDictOfDf(outfilename, dictionaryObject):
# write a dictionary of dataframes to a json file using pickle
with open(outfilename, 'wb') as f:
pickle.dump(dictionaryObject, f)
f.close()
def readDictOfDf(infilename):
# read a dictionary of dataframes from a json file using pickle
with open(infilename, 'rb') as f:
dictionaryObject = pickle.load(f)
f.close()
return(dictionaryObject)
def reprojShapefile(sourcepath, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None):
"""
sourcepath: (dir) the path to the .shp file
newprojdictionary: (dict) the new projection definition in the form of a dictionary (default provided)
outpath: (dir) the output path for the new shapefile
"""
# if outpath is none, treat the reprojection as a file replacement
if isinstance(outpath, type(None)):
outpath = sourcepath
shpfile = gpd.GeoDataFrame.from_file(sourcepath)
shpfile = shpfile.to_crs(newprojdictionary)
shpfile.to_file(outpath)
def getFullShape(shapefile):
"""
Generate a MultiPolygon to represent each shape/polygon within the shapefile
shapefile: (dir) a path to the ESRI .shp shapefile
"""
shp = fiona.open(shapefile)
mp = [shape(pol['geometry']) for pol in shp]
mp = MultiPolygon(mp)
shp.close()
return(mp)
def getShapeBbox(polygon):
"""
Generate a geometric box to represent the bounding box for the polygon, shapefile connection, or MultiPolygon
polygon: (geometry) a geometric polygon, MultiPolygon, or shapefile connection
"""
# identify the cardinal bounds
minx, miny, maxx, maxy = polygon.bounds
bbox = box(minx, miny, maxx, maxy, ccw=True)
return(bbox)
def readShapefileTable(shapefile):
"""
read in the datatable captured within the shapefile properties
shapefile: (dir) a path to the ESRI .shp shapefile
"""
#cent_df = gpd.read_file(shapefile)
shp = fiona.open(shapefile)
centroid = [eachpol['properties'] for eachpol in shp]
cent_df = pd.DataFrame.from_dict(centroid, orient='columns')
shp.close()
return(cent_df)
def filterPointsinShape(shape, points_lat, points_lon, points_elev=None, buffer_distance=0.06, buffer_resolution=16,
labels=['LAT', 'LONG_', 'ELEV']):
"""
filter for datafiles that can be used
shape: (geometry) a geometric polygon or MultiPolygon
points_lat: (series) a series of latitude points in WGS84 projection
points_lon: (series) a series of longitude points in WGS84 projection
points_elev: (series) a series of elevation points in meters; optional - default is None
buffer_distance: (float64) a numerical multiplier to increase the geodetic boundary area
buffer_resolution: (float64) the increments between geodetic longlat degrees
labels: (list) a list of preferred labels for latitude, longitude, and elevation
"""
# add buffer region
region = shape.buffer(buffer_distance, resolution=buffer_resolution)
# construct points_elev if null
if isinstance(points_elev, type(None)):
points_elev=np.repeat(np.nan, len(points_lon))
# Intersection each coordinate with the region
limited_list = []
for lon, lat, elev in zip(points_lon, points_lat, points_elev):
gpoint = point.Point(lon, lat)
if gpoint.intersects(region):
limited_list.append([lat, lon, elev])
maptable = pd.DataFrame.from_records(limited_list, columns=labels)
## dask approach ##
#intersection=[]
#for lon, lat, elev in zip(points_lon, points_lat, points_elev):
# gpoint = point.Point(lon, lat)
# intersection.append(dask.delayed(gpoint.intersects(region)))
# limited_list.append([intersection, lat, lon, elev])
# convert to dataframe
#maptable = pd.DataFrame({labels[0]:points_lat, labels[1]:points_lon, labels[2]:points_elev}
# .loc[dask.compute(intersection)[0],:]
# .reset_index(drop=True)
return(maptable)
def scrapeurl(url, startswith=None, hasKeyword=None):
"""
scrape the gridded datafiles from a url of interest
url: (str) the web folder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
hasKeyword: (str) keywords represented in a webpage element; default is None
"""
# grab the html of the url, and prettify the html structure
page = urllib2.urlopen(url).read()
page_soup = bs(page, 'lxml')
page_soup.prettify()
# loop through and filter the hyperlinked lines
if pd.isnull(startswith):
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if hasKeyword in anchor['href']]
else:
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if anchor['href'].startswith(startswith)]
# convert to dataframe then separate the lon and lat as float coordinate values
temp = pd.DataFrame(temp, columns = ['filenames'])
return(temp)
def treatgeoself(shapefile, NAmer, folder_path=os.getcwd(), outfilename='mappingfile.csv', buffer_distance=0.06):
"""
TreatGeoSelf to some [data] lovin'!
shapefile: (dir) the path to an ESRI shapefile for the region of interest
Namer: (dir) the path to an ESRI shapefile, which has each 1/16th coordinate and elevation information from a DEM
folder_path: (dir) the destination folder path for the mappingfile output; default is the current working directory
outfilename: (str) the name of the output file; default name is 'mappingfile.csv'
buffer_distance: (float64) the multiplier to be applied for increasing the geodetic boundary area; default is 0.06
"""
# conform projections to longlat values in WGS84
reprojShapefile(shapefile, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None)
# read shapefile into a multipolygon shape-object
shape_mp = getFullShape(shapefile)
# read in the North American continental DEM points for the station elevations
NAmer_datapoints = readShapefileTable(NAmer).rename(columns={'Lat':'LAT','Long':'LONG_','Elev':'ELEV'})
# generate maptable
maptable = filterPointsinShape(shape_mp,
points_lat=NAmer_datapoints.LAT,
points_lon=NAmer_datapoints.LONG_,
points_elev=NAmer_datapoints.ELEV,
buffer_distance=buffer_distance, buffer_resolution=16, labels=['LAT', 'LONG_', 'ELEV'])
maptable.reset_index(inplace=True)
maptable = maptable.rename(columns={"index":"FID"})
print(maptable.shape)
print(maptable.tail())
# print the mappingfile
mappingfile=os.path.join(folder_path, outfilename)
maptable.to_csv(mappingfile, sep=',', header=True, index=False)
return(mappingfile)
def mapContentFolder(resid):
"""
map the content folder within HydroShare
resid: (str) a string hash that represents the hydroshare resource that has been migrated
"""
path = os.path.join('/home/jovyan/work/notebooks/data', str(resid), str(resid), 'data/contents')
return(path)
# ### CIG (DHSVM)-oriented functions
def compile_bc_Livneh2013_locations(maptable):
"""
compile a list of file URLs for bias corrected Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/Livneh/bcLivneh_WWA_2013/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_Livneh2013_locations(maptable):
"""
compile a list of file URLs for Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://www.cses.washington.edu/rocinante/Livneh/Livneh_WWA_2013/forcs_dhsvm/',basename]
locations.append(''.join(url))
return(locations)
### VIC-oriented functions
def compile_VICASCII_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 VIC.ASCII outputs
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Fluxes_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/VIC.ASCII/latitude.",str(row['LAT']),'/',loci,'.bz2']
locations.append(''.join(url))
return(locations)
def compile_VICASCII_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 VIC.ASCII outputs for the USA
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2/',
startswith='fluxes')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['VIC_fluxes_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
### Climate (Meteorological observations)-oriented functions
def canadabox_bc():
"""
Establish the Canadian (north of the US bounding boxes) Columbia river basin bounding box
"""
# left, bottom, right top
return(box(-138.0, 49.0, -114.0, 53.0))
def scrape_domain(domain, subdomain, startswith=None):
"""
scrape the gridded datafiles from a url of interest
domain: (str) the web folder path
subdomain: (str) the subfolder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
"""
# connect to domain
ftp = ftplib.FTP(domain)
ftp.login()
ftp.cwd(subdomain)
# scrape for data directories
tmp = [dirname for dirname in ftp.nlst() if dirname.startswith(startswith)]
geodf = pd.DataFrame(tmp, columns=['dirname'])
# conform to bounding box format
tmp = geodf['dirname'].apply(lambda x: x.split('.')[1:])
tmp = tmp.apply(lambda x: list(map(float,x)) if len(x)>2 else x)
# assemble the boxes
geodf['bbox']=tmp.apply(lambda x: box(x[0]*-1, x[2]-1, x[1]*-1, x[3]) if len(x)>2 else canadabox_bc())
return(geodf)
def mapToBlock(df_points, df_regions):
for index, eachblock in df_regions.iterrows():
for ind, row in df_points.iterrows():
if point.Point(row['LONG_'], row['LAT']).intersects(eachblock['bbox']):
df_points.loc[ind, 'blocks'] = str(eachblock['dirname'])
return(df_points)
def compile_dailyMET_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2/',
startswith='data')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
def compile_dailyMET_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/ascii/daily/latitude.", str(row['LAT']),"/",loci,".bz2"]
locations.append(''.join(url))
return(locations)
# ### WRF-oriented functions
def compile_wrfnnrp_raw_Salathe2014_locations(maptable):
"""
compile a list of file URLs for Salathe et al., 2014 raw WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/raw/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_wrfnnrp_bc_Salathe2014_locations(maptable):
"""
compile a list of file URLs for the Salathe et al., 2014 bias corrected WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/bc/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
# ## Data file migration functions
def ensure_dir(f):
"""
check if the destination folder directory exists; if not, create it and set it as the working directory
f: (dir) the directory to create and/or set as working directory
"""
if not os.path.exists(f):
os.makedirs(f)
os.chdir(f)
def wget_download(listofinterest):
"""
Download files from an http domain
listofinterest: (list) a list of urls to request
"""
# check and download each location point, if it doesn't already exist in the download directory
for fileurl in listofinterest:
basename = os.path.basename(fileurl)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
# Download the files to the subdirectory
def wget_download_one(fileurl):
"""
Download a file from an http domain
fileurl: (url) a url to request
"""
# check and download each location point, if it doesn't already exist in the download directory
basename=os.path.basename(fileurl)
# if it exists, remove for new download (overwrite mode)
if os.path.isfile(basename):
os.remove(basename)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
def wget_download_p(listofinterest, nworkers=20):
"""
Download files from an http domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 10
"""
pool = Pool(int(nworkers))
pool.map(wget_download_one, listofinterest)
pool.close()
pool.terminate()
def ftp_download(listofinterest):
"""
Download and decompress files from an ftp domain
listofinterest: (list) a list of urls to request
"""
for loci in listofinterest:
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_one(loci):
"""
Download and decompress a file from an ftp domain
loci: (url) a url to request
"""
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_p(listofinterest, nworkers=5):
"""
Download and decompress files from an ftp domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 5
"""
pool = Pool(int(nworkers))
pool.map(ftp_download_one, listofinterest)
pool.close()
pool.terminate()
def decompbz2(filename):
"""
Extract a file from a bz2 file of the same name, then remove the bz2 file
filename: (dir) the file path for a bz2 compressed file
"""
with open(filename.split(".bz2",1)[0], 'wb') as new_file, open(filename, 'rb') as zipfile:
decompressor = bz2.BZ2Decompressor()
for data in iter(lambda : zipfile.read(100 * 1024), b''):
new_file.write(decompressor.decompress(data))
os.remove(filename)
zipfile.close()
new_file.close()
print(os.path.splitext(filename)[0] + ' unzipped')
def catalogfiles(folderpath):
"""
make a catalog of the gridded files within a folderpath
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
"""
# read in downloaded files
temp = [eachfile for eachfile in os.listdir(folderpath) if not os.path.isdir(eachfile)]
if len(temp)==0:
# no files were available; setting default catalog output structure
catalog = pd.DataFrame([], columns=['filenames','LAT','LONG_'])
else:
# create the catalog dataframe and extract the filename components
catalog = pd.DataFrame(temp, columns=['filenames'])
catalog[['LAT','LONG_']] = catalog['filenames'].apply(lambda x: pd.Series(str(x).rsplit('_',2))[1:3]).astype(float)
# convert the filenames column to a filepath
catalog['filenames'] = catalog['filenames'].apply(lambda x: os.path.join(folderpath, x))
return(catalog)
def addCatalogToMap(outfilepath, maptable, folderpath, catalog_label):
"""
Update the mappingfile with a new column, a vector of filepaths for the downloaded files
outfilepath: (dir) the path for the output file
maptable: (dataframe) a dataframe containing the FID, LAT, LONG_, and ELEV information
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# assert catalog_label as a string-object
catalog_label = str(catalog_label)
# catalog the folder directory
catalog = catalogfiles(folderpath).rename(columns={'filenames':catalog_label})
# drop existing column
if catalog_label in maptable.columns:
maptable = maptable.drop(labels=catalog_label, axis=1)
# update with a vector for the catalog of files
maptable = maptable.merge(catalog, on=['LAT','LONG_'], how='left')
# remove blocks, if they were needed
if 'blocks' in maptable.columns:
maptable = maptable.drop(labels=['blocks'], axis=1)
# write the updated mappingfile
maptable.to_csv(outfilepath, header=True, index=False)
# Wrapper scripts
def getDailyMET_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/raw', catalog_label='dailymet_livneh2013'):
"""
Get the Livneh el al., 2013 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate DailyMET livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_MET_1950_2013/raw', catalog_label='dailymet_livneh2015'):
"""
Get the Livneh el al., 2015 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily MET livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_bcLivneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/bc', catalog_label='dailymet_bclivneh2013'):
"""
Get the Livneh el al., 2013 bias corrected Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate baseline_corrected livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_bc_Livneh2013_locations(maptable)
# download the files
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_VIC_1915_2011', catalog_label='dailyvic_livneh2013'):
"""
Get the Livneh el al., 2013 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# FIRST RUN
# check and generate VIC_ASCII Flux model livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points for USA
locations = compile_VICASCII_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_VIC_1950_2013', catalog_label='dailyvic_livneh2015'):
"""
Get the Livneh el al., 2015 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily VIC.ASCII Flux model livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_VICASCII_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_salathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/raw', catalog_label='dailywrf_salathe2014'):
"""
Get the Salathe el al., 2014 raw Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology raw WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_raw_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_bcsalathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/bc', catalog_label='dailywrf_bcsalathe2014'):
"""
Get the Salathe el al., 2014 bias corrected Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology bias corrected WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_bc_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
# # Data Processing libraries
def filesWithPath(folderpath):
"""
Create a list of filepaths for the files
folderpath: (dir) the folder of interest
"""
files =[os.path.join(folderpath, eachfile) for eachfile in os.listdir(folderpath)
if not eachfile.startswith('.') and not os.path.isdir(eachfile)] # exclude hidden files
return(files)
def compareonvar(map_df, colvar='all'):
"""
subsetting a dataframe based on some columns of interest
map_df: (dataframe) the dataframe of the mappingfile table
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# apply row-wise inclusion based on a subset of columns
if isinstance(colvar, type(None)):
return(map_df)
if colvar is 'all':
# compare on all columns except the station info
return(map_df.dropna())
else:
# compare on only the listed columns
return(map_df.dropna(subset=colvar))
def mappingfileToDF(mappingfile, colvar='all'):
"""
read in a dataframe and subset based on columns of interest
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# Read in the mappingfile as a data frame
map_df = pd.read_csv(mappingfile)
# select rows (datafiles) based on the colvar(s) chosen, default is
map_df = compareonvar(map_df=map_df, colvar=colvar)
# compile summaries
print(map_df.head())
print('Number of gridded data files:'+ str(len(map_df)))
print('Minimum elevation: ' + str(np.min(map_df.ELEV))+ 'm')
print('Mean elevation: '+ str(np.mean(map_df.ELEV))+ 'm')
print('Maximum elevation: '+ str(np.max(map_df.ELEV))+ 'm')
return(map_df, len(map_df))
def read_in_all_files(map_df, dataset, metadata, file_start_date, file_end_date, file_time_step, file_colnames, file_delimiter, subset_start_date, subset_end_date):
"""
Read in files based on dataset label
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
dataset: (str) the name of the dataset catalogged into map_df
metadata (str) the dictionary that contains the metadata explanations; default is None
file_colnames: (list) the list of shorthand variables; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# extract metadata if the information are not provided
if pd.notnull(metadata):
if isinstance(file_start_date, type(None)):
file_start_date = metadata[dataset]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[dataset]['end_date']
if isinstance(file_time_step, type(None)):
file_time_step = metadata[dataset]['temporal_resolution']
if isinstance(file_colnames, type(None)):
file_colnames = metadata[dataset]['variable_list']
if isinstance(file_delimiter, type(None)):
file_delimiter = metadata[dataset]['delimiter']
#initialize dictionary and time sequence
df_dict=dict()
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step) # daily
# import data for all climate stations
for ind, row in map_df.iterrows():
tmp = pd.read_table(row[dataset], header=None, delimiter=file_delimiter, names=file_colnames)
tmp.set_index(met_daily_dates, inplace=True)
# subset to the date range of interest (default is file date range)
tmp = tmp.iloc[(met_daily_dates>=subset_start_date) & (met_daily_dates<=subset_end_date),:]
# set row indices
df_dict[tuple(row[['FID','LAT','LONG_']].tolist())] = tmp
return(df_dict)
def read_files_to_vardf(map_df, df_dict, gridclimname, dataset, metadata,
file_start_date, file_end_date, file_delimiter, file_time_step, file_colnames,
subset_start_date, subset_end_date, min_elev, max_elev):
"""
# reads in the files to generate variables dataframes
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
df_dict: (dict) an existing dictionary where new computations will be stored
gridclimname: (str) the suffix for the dataset to be named; if None is provided, default to the dataset name
dataset: (str) the name of the dataset catalogged into map_df
metadata: (str) the dictionary that contains the metadata explanations; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_delimiter: (str) a file parsing character to be used for file reading
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# start time
starttime = pd.datetime.now()
# date range from ogh_meta file
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step)
met_daily_subdates=pd.date_range(subset_start_date, subset_end_date, freq=file_time_step)
# omit null entries or missing data file
map_df = map_df.loc[pd.notnull(map_df[dataset]),:]
print('Number of data files within elevation range ('+str(min_elev)+':'+str(max_elev)+'): '+str(len(map_df)))
# iterate through each data file
for eachvar in metadata[dataset]['variable_list']:
# identify the variable column index
usecols = [metadata[dataset]['variable_list'].index(eachvar)]
# initiate df as a list
df_list=[]
# loop through each file
for ind, row in map_df.iterrows():
# consider rewriting the params to just select one column by index at a time
var_series = dask.delayed(pd.read_table)(filepath_or_buffer=row[dataset],
delimiter=file_delimiter,header=None,usecols=usecols,
names=[tuple(row[['FID','LAT','LONG_']])])
# append the series into the list of series
df_list.append(var_series)
# concatenate list of series (axis=1 is column-wise) into a dataframe
df1 = dask.delayed(pd.concat)(df_list, axis=1)
# set and subset date_range index
df2 = df1.set_index(met_daily_dates, inplace=False).loc[met_daily_subdates]
# end of variable table
print(eachvar+ ' dataframe reading to start: ' + str(pd.datetime.now()-starttime))
# assign dataframe to dictionary object
df_dict['_'.join([eachvar, gridclimname])] = dask.compute(df2)[0]
print(eachvar+ ' dataframe complete:' + str(pd.datetime.now()-starttime))
return(df_dict)
def read_daily_streamflow(file_name, drainage_area_m2, file_colnames=None, delimiter='\t', header='infer'):
# read in a daily streamflow data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, delimiter=delimiter, header=header)
# set columns, if header=None
if file_colnames is not None:
daily_data.columns=file_colnames
else:
file_colnames=list(daily_data.columns)
# calculate cfs to cms conversion, or vice versa
if 'flow_cfs' in daily_data.columns:
flow_cfs=daily_data['flow_cfs']
flow_cms=flow_cfs/(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
elif 'flow_cms' in daily_data.columns:
flow_cms=daily_data['flow_cms']
flow_cfs=flow_cms*(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
# determine the datetime
date_index=[file_colnames.index(each) for each in ['year','month','day']]
row_dates=pd.to_datetime(daily_data[date_index])
# generate the daily_flow and set the datetime as row indices
daily_flow=pd.concat([flow_cfs, flow_cms, flow_mmday],axis=1)
daily_flow.set_index(row_dates, inplace=True)
daily_flow.columns=['flow_cfs', 'flow_cms', 'flow_mmday']
return(daily_flow)
def read_daily_precip(file_name, file_colnames=None, header='infer', delimiter='\s+'):
# read in a daily precipitation data set
# if file_colnames are supplied, use header=None
if ps.notnull(file_colnames):
header=None
# read in the data
daily_data=pd.read_table(file_name, delimiter=delimiter, header=header)
# set columns, if header=None
if pd.notnull(file_colnames):
daily_data.columns=file_colnames
else:
file_colnames=list(daily_data.columns)
# calculate cfs to cms conversion, or vice versa
if 'precip_m' in daily_data.columns:
precip_m=daily_data['precip_m']
precip_mm=precip_m*1000
# determine the datetime
date_index=[file_colnames.index(each) for each in ['year','month','day']]
row_dates=pd.to_datetime(daily_data[date_index])
# generate the daily_flow and set the datetime as row indices
daily_precip=pd.concat([precip_m, precip_mm],axis=1)
daily_precip.set_index(row_dates, inplace=True)
daily_precip.columns=['precip_m', 'precip_mm']
return(daily_precip)
def read_daily_snotel(file_name, file_colnames=None, usecols=None, delimiter=',', header='infer'):
# read in a daily SNOTEL observation data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, usecols=usecols, header=header, delimiter=delimiter)
# reset the colnames
daily_data.columns=['Date', 'Tmax_C', 'Tmin_C', 'Tavg_C', 'Precip_mm']
# transform the data
daily_data['Tmax_C']=(daily_data['Tmax_C'] -32)/1.8
daily_data['Tmin_C']=(daily_data['Tmin_C'] -32)/1.8
daily_data['Tavg_C']=(daily_data['Tavg_C'] -32)/1.8
daily_data['Precip_mm']=daily_data['Precip_mm'] *25.4
# determine the datetime
row_dates=pd.to_datetime(daily_data.Date)
# generate the daily_flow and set the datetime as row indices
daily_snotel=daily_data[['Tmax_C', 'Tmin_C', 'Tavg_C', 'Precip_mm']]
daily_snotel.set_index(row_dates, inplace=True)
return(daily_snotel)
def read_daily_coop(file_name, file_colnames=None, usecols=None, delimiter=',', header='infer'):
# read in a daily COOP observation data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, usecols=usecols, header=header, delimiter=delimiter,
date_parser=lambda x: pd.datetime.strptime(x, '%Y%m%d'),
parse_dates=[0],
na_values=-9999)
# reset the colnames
daily_data.columns=['Date', 'Precip_mm','Tmax_C', 'Tmin_C', 'Tavg_C']
# transform the data
daily_data['Tmax_C']=(daily_data['Tmax_C'] -32)/1.8
daily_data['Tmin_C']=(daily_data['Tmin_C'] -32)/1.8
daily_data['Tavg_C']=(daily_data['Tavg_C'] -32)/1.8
daily_data['Precip_mm']=daily_data['Precip_mm'] *25.4
# determine the datetime
row_dates=pd.to_datetime(daily_data.Date)
# generate the daily_flow and set the datetime as row indices
daily_coop=daily_data[['Precip_mm','Tmax_C', 'Tmin_C', 'Tavg_C']]
daily_coop.set_index(row_dates, inplace=True)
return(daily_coop)
# ### Data Processing functions
def generateVarTables(file_dict, gridclimname, dataset, metadata, df_dict=None):
"""
Slice the files by their common variable
all_files: (dict) a dictionary of dataframes for each tabular datafile
dataset: (str) the name of the dataset
metadata (dict) the dictionary that contains the metadata explanations; default is None
"""
# combine the files into a pandas panel
panel = pd.Panel.from_dict(file_dict)
# initiate output dictionary
if pd.isnull(df_dict):
df_dict = dict()
# slice the panel for each variable in list
for eachvar in metadata[dataset]['variable_list']:
df_dict['_'.join([eachvar, gridclimname])] = panel.xs(key=eachvar, axis=2)
return(df_dict)
# compare two date sets for the start and end of the overlapping dates
def overlappingDates(date_set1, date_set2):
# find recent date
if date_set1[0] > date_set2[0]:
start_date = date_set1[0]
else:
start_date = date_set2[0]
# find older date
if date_set1[-1] < date_set2[-1]:
end_date = date_set1[-1]
else:
end_date = date_set2[-1]
return(start_date, end_date)
# Calculate means by 8 different methods
def multigroupMeans(VarTable, n_stations, start_date, end_date):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# e.g., Mean monthly temperature at each station
month_daily=Var_daily.groupby(Var_daily.index.month).mean() # average monthly minimum temperature at each station
# e.g., Mean monthly temperature averaged for all stations in analysis
meanmonth_daily=month_daily.mean(axis=1)
# e.g., Mean monthly temperature for minimum and maximum elevation stations
meanmonth_min_maxelev_daily=Var_daily.loc[:,analysis_elev_max_station].groupby(Var_daily.index.month).mean()
meanmonth_min_minelev_daily=Var_daily.loc[:,analysis_elev_min_station].groupby(Var_daily.index.month).mean()
# e.g., Mean annual temperature
year_daily=Var_daily.groupby(Var_daily.index.year).mean()
# e.g., mean annual temperature each year for all stations
meanyear_daily=year_daily.mean(axis=1)
# e.g., mean annual min temperature for all years, for all stations
meanallyear_daily=np.nanmean(meanyear_daily)
# e.g., anomoly per year compared to average
anom_year_daily=meanyear_daily-meanallyear_daily
return(month_daily,
meanmonth_daily,
meanmonth_min_maxelev_daily,
meanmonth_min_minelev_daily,
year_daily,
meanyear_daily,
meanallyear_daily,
anom_year_daily)
def specialTavgMeans(VarTable):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# Average temperature for each month at each station
permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).mean()
# Average temperature each month averaged at all stations
meanpermonth_daily=permonth_daily.mean(axis=1)
# Average monthly temperature for all stations
meanallpermonth_daily=meanpermonth_daily.mean(axis=0)
# anomoly per year compared to average
anom_month_daily=(meanpermonth_daily-meanallpermonth_daily)/1000
return(permonth_daily,
meanpermonth_daily,
meanallpermonth_daily,
anom_month_daily)
def aggregate_space_time_average(VarTable, df_dict, suffix, start_date, end_date):
"""
VarTable: (dataframe) a dataframe with date ranges as the index
df_dict: (dict) a dictionary to which computed outputs will be stored
suffix: (str) a string representing the name of the original table
start_date: (date) the start of the date range within the original table
end_date: (date) the end of the date range within the original table
"""
starttime = pd.datetime.now()
# subset dataframe to the date range of interest
Var_daily = VarTable.loc[start_date:end_date,:]
# Mean monthly temperature at each station
df_dict['month_'+suffix] = Var_daily.groupby(Var_daily.index.month).mean()
# Mean monthly temperature averaged for all stations in analysis
df_dict['meanmonth_'+suffix] = Var_daily.groupby(Var_daily.index.month).mean().mean(axis=1)
# Mean annual temperature
df_dict['year_'+suffix] = Var_daily.groupby(Var_daily.index.year).mean()
# mean annual temperature each year for all stations
df_dict['meanyear_'+suffix] = Var_daily.groupby(Var_daily.index.year).mean().mean(axis=1)
# mean annual temperature for all years, for all stations
df_dict['meanallyear_'+suffix] = Var_daily.mean(axis=1).mean(axis=0)
# anomaly per year compared to average
df_dict['anom_year_'+suffix] = df_dict['meanyear_'+suffix] - df_dict['meanallyear_'+suffix]
print(suffix+ ' calculations completed in ' + str(pd.datetime.now()-starttime))
return(df_dict)
def aggregate_space_time_sum(VarTable, n_stations, start_date, end_date):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# Average precipitation per month at each station
permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).sum()
# Average precipitation per month averaged at all stations
meanpermonth_daily=permonth_daily.mean(axis=1)
# Average monthly precipitation averaged at all stations
meanmonth_daily= meanpermonth_daily.groupby(meanpermonth_daily.index.month).mean()
return(Var_daily,
permonth_daily,
meanpermonth_daily,
meanmonth_daily)
#def aggregate_space_time_sum(VarTable, n_stations, start_date, end_date):
# Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
#
# # Average precipitation per month at each station
# permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).sum()
#
# # Average precipitation per month averaged at all stations
# meanpermonth_daily=permonth_daily.mean(axis=1)
#
# # Average monthly precipitation averaged at all stations
# meanmonth_daily= meanpermonth_daily.groupby(meanpermonth_daily.index.month).mean()
#
# return(Var_daily,
# permonth_daily,
# meanpermonth_daily,
# meanmonth_daily)
#def specialTavgMeans(VarTable):
# Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
#
# # Average temperature for each month at each station
# permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).mean()
#
# # Average temperature each month averaged at all stations
# meanpermonth_daily=permonth_daily.mean(axis=1)
#
# # Average monthly temperature for all stations
# meanallpermonth_daily=meanpermonth_daily.mean(axis=0)
#
# # anomoly per year compared to average
# anom_month_daily=(meanpermonth_daily-meanallpermonth_daily)/1000
#
# return(permonth_daily,
# meanpermonth_daily,
# meanallpermonth_daily,
# anom_month_daily)
def plotTavg(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_temp_avg_liv2013_met_daily' and 'meanmonth_temp_avg_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_temp_avg_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_liv2013_met_daily'][wy_numbers],'r*--',linewidth=1, label='Liv Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_liv2013_met_daily'][wy_numbers],'rX--',linewidth=1, label='Liv Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_wrf2014_met_daily'][wy_numbers],'b^--',linewidth=1, label='WRF Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_wrf2014_met_daily'][wy_numbers],'bo--',linewidth=1, label='WRF Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g^--',linewidth=1, label='WRFbc Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'go--',linewidth=1, label='WRFbc Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Temperature (deg C)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Temperature\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('avg_monthly_temp'+str(loc_name)+'.png')
plt.show()
def plotPavg(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_precip_liv2013_met_daily' and 'meanmonth_precip_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_precip_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_liv2013_met_daily'][wy_numbers],'r^--',linewidth=1, label='Liv Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_liv2013_met_daily'][wy_numbers],'ro--',linewidth=1, label='Liv Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_wrf2014_met_daily'][wy_numbers],'b^--',linewidth=1, label='WRF Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_wrf2014_met_daily'][wy_numbers],'bo--',linewidth=1, label='WRF Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g^--',linewidth=1, label='WRFbc Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'go--',linewidth=1, label='WRFbc Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Precip (mm)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Precipitation\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('avg_monthly_precip'+str(loc_name)+'.png')
plt.show()
def gridclim_dict(mappingfile, dataset, gridclimname=None, metadata=None, min_elev=None, max_elev=None,
file_start_date=None, file_end_date=None, file_time_step=None,
file_colnames=None, file_delimiter=None,
subset_start_date=None, subset_end_date=None, df_dict=None, colvar='all'):
"""
# pipelined operation for assimilating data, processing it, and standardizing the plotting
mappingfile: (dir) the path directory to the mappingfile
dataset: (str) the name of the dataset within mappingfile to use
gridclimname: (str) the suffix for the dataset to be named; if None is provided, default to the dataset name
metadata: (str) the dictionary that contains the metadata explanations; default is None
min_elev: (float) the minimum elevation criteria; default is None
max_elev: (float) the maximum elevation criteria; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
file_delimiter: (str) a file parsing character to be used for file reading
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
df_dict: (dict) an existing dictionary where new computations will be stored
"""
# generate the climate locations and n_stations
locations_df, n_stations = mappingfileToDF(mappingfile, colvar=colvar)
# generate the climate station info
if pd.isnull(min_elev):
min_elev = locations_df.ELEV.min()
if pd.isnull(max_elev):
max_elev = locations_df.ELEV.max()
# extract metadata if the information are not provided
if not isinstance(metadata, type(None)):
if isinstance(file_start_date, type(None)):
file_start_date = metadata[dataset]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[dataset]['end_date']
if isinstance(file_time_step, type(None)):
file_time_step = metadata[dataset]['temporal_resolution']
if isinstance(file_colnames, type(None)):
file_colnames = metadata[dataset]['variable_list']
if isinstance(file_delimiter, type(None)):
file_delimiter = metadata[dataset]['delimiter']
# take all defaults if subset references are null
if pd.isnull(subset_start_date):
subset_start_date = file_start_date
if pd.isnull(subset_end_date):
subset_end_date = file_end_date
# initiate output dictionary df_dict was null
if pd.isnull(df_dict):
df_dict = dict()
if pd.isnull(gridclimname):
if | pd.notnull(dataset) | pandas.notnull |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from scipy.interpolate import interp1d
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# -------------------------------------------------------------------------------- 5.1 Approximation Demand and Supply
# ---------- Demand and Supply Functions ----------
def demand(p):
"""Vectorized Function to determine *demand*.
Args:
p (np.array): Price vector for demand.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns demand quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
r = np.random.rand() * 2
n = abs(np.random.randn()) * 2
q = (
40 / (p + n)
+ 1 / (1 + np.exp(p - 75 + r))
+ 2 / (1 + np.exp(p - 50 + r))
+ 3 / (1 + np.exp(p - 25 + r))
)
q[q > 20] = np.nan
assert type(q) == type(p), "Type of output does not equal type of input!"
return q
def supply(p):
"""Vectorized Function to determine *supply.*
Args:
p (np.array): Price vector for supply.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns supply quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
q = np.zeros(p.shape)
for i, c in enumerate(p):
if (c > 0) and (c < 10):
q[i] = 1.0
elif (c >= 10) and (c < 20):
q[i] = 1.5
elif (c >= 20) and (c < 25):
q[i] = 3.0
elif (c >= 25) and (c < 35):
q[i] = 3.6
elif (c >= 35) and (c < 45):
q[i] = 4.2
elif (c >= 45) and (c < 60):
q[i] = 5.0
elif (c >= 60) and (c < 75):
q[i] = 8.0
elif (c >= 75) and (c < 85):
q[i] = 12.0
elif (c >= 85) and (c < 90):
q[i] = 16.5
elif (c >= 90) and (c < 95):
q[i] = 18.5
elif c >= 95:
q[i] = 20.0
assert type(q) == type(p), "Type of output does not equals type of input!"
return q
# ---------- Approximation using scipy ----------
class PolynomialDS:
"""Object that approximates supply and demand functions using sicpy
interpolate method.
Args:
a (int): Lower bound of prices.
b (int): Upper bound of prices.
nodes (int): Interpolation nodes for demand and supply.
demand (function): Benchmark function supply.
supply (function): Benchmark function demand.
Raises:
AssertionError: Price must be non-negative.
AssertionError: By Assumption: price cannot exceed 100.
"""
def __init__(self, a, b, nodes, demand, supply):
"""Constructor method.
"""
self.a = a
self.b = b
assert a >= 0, "Price cannot be negative!"
assert (b > a) and (b <= 100), "By Assumption: Price cannot exceed 100!"
self.nodes = nodes
self.demand = demand
self.supply = supply
self.p = np.linspace(a, b, nodes)
self.qd = demand(self.p)
self.qs = supply(self.p)
def __len__(self):
"""Returns number of interpolation nodes.
Returns:
int: Number of known prices.
"""
return len(self.p)
def __repr__(self):
"""String representation of object.
"""
p = np.around(self.p, decimals=2)
qd = np.around(self.qd, decimals=2)
qs = np.around(self.qs, decimals=2)
return f"{len(self)} known values for Demand and Supply:\n\nPrices={p} \n\nDemand={qd} \nSupply={qs}"
def __call__(self, p):
"""Returns true and approximated value of demand and supply for a
given price.
Args:
p (np.array): Price vector.
Returns:
: Comparison.
"""
self.apprx_qd = interp1d(self.p, self.qd)
self.apprx_qs = interp1d(self.p, self.qs)
return f"-- Real value -- at price {p}: \n\nDemand = {self.demand(p)} \nSupply = {self.supply(p)} \n\n-- Approximated value -- at price {p}: \n\nDemand = {self.apprx_qd(p)} \nSupply = {self.apprx_qs(p)}"
@staticmethod
def __name__():
"""Returns the name of the object.
"""
return "Demand and Supply Interpolator"
def plt_approx(self, fs=(14, 7), num1=16.1, num2=16.2, num3=16.3, num4=16.4):
"""Plots Approximation and true supply as well as demand.
Args:
fs (tuple, optional): Figuresize. Defaults to (14, 7).
num1 (float, optional): Number first figure. Defaults to 16.1.
num2 (float, optional): Number second figure. Defaults to 16.2.
num3 (float, optional): Number third figure. Defaults to 16.3.
num4 (float, optional): Number fourth figure. Defaults to 16.4.
"""
prices = np.linspace(self.a, self.b, self.nodes * 150)
apprx_qd = self.apprx_qd(prices)
apprx_qs = self.apprx_qs(prices)
qd = self.demand(prices)
qs = self.supply(prices)
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
ax1[0].plot(self.qd, self.p, "o", label="Nodes Demand", color="#4B045D")
ax1[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax1[0].plot(qd, prices, label="Real Demand", alpha=0.7, color="#D98D08")
ax1[0].set_title(f"Figure {num1}: Approximation of Demand")
ax1[0].legend(loc="center right")
ax1[0].grid()
ax1[1].plot(self.qs, self.p, "o", label="Nodes Supply", color="#4B045D")
ax1[1].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax1[1].plot(qs, prices, label="Real Supply", alpha=0.7, color="#67853E")
ax1[1].set_title(f"Figure {num2}: Approximation of Supply")
ax1[1].legend(loc="center right")
ax1[1].grid()
ax2[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax2[0].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax2[0].set_title(f"Figure {num3}: Approximated Demand and Supply")
ax2[0].legend(loc="center right")
ax2[0].grid()
ax2[1].plot(qd, prices, label="Real Demand", color="#D98D08")
ax2[1].plot(qs, prices, label="Real Supply", color="#67853E")
ax2[1].set_title(f"Figure {num4}: True Demand and Supply")
ax2[1].legend(loc="center right")
ax2[1].grid()
plt.show()
abs_error_qd = np.array(abs(qd - apprx_qd))
abs_error_qd = abs_error_qd[~np.isnan(abs_error_qd)]
abs_error_qs = np.array(abs(qs - apprx_qs))
print(
f"Mean Absolute Error: \n\nDemand = {abs_error_qd.mean():.4f} \nSupply = {abs_error_qs.mean():.4f}"
)
def close_intersection(self, nodes=1000000):
"""Returns true and approximated market equilibrium.
Args:
nodes (int, optional): Number of interpolation nodes. Defaults to 1000000.
"""
prices = np.linspace(self.a, self.b, nodes)
f = lambda p: self.demand(p) - self.supply(p)
abs_sd = f(prices)
abs_sd = abs_sd[~np.isnan(abs_sd)]
argmin = abs(abs_sd).argmin()
pe = prices[argmin]
qe_demand = np.around(demand(np.array([pe])), decimals=3)
qe_supply = np.around(supply(np.array([pe])), decimals=3)
g = lambda p: self.apprx_qd(p) - self.apprx_qs(p)
abs_asd = f(prices)
abs_asd = abs_asd[~np.isnan(abs_asd)]
argmin_a = abs(abs_asd).argmin()
pea = prices[argmin_a]
aqe_demand = np.around(self.apprx_qd(np.array([pea])), decimals=3)
aqe_supply = np.around(self.apprx_qs(np.array([pea])), decimals=3)
print(
f"Equilibrium True (Quantity, Price) \n*** *** *** *** \nDemand: {(qe_demand[0], np.around(pe, decimals=3))} \nSupply: {(qe_supply[0], np.around(pe, decimals=3))}\n"
)
print(
f"Equilibrium Approximation (Quantity, Price) \n*** *** *** *** \nDemand: {(aqe_demand[0], np.around(pea, decimals=3))} \nSupply: {(aqe_supply[0], np.around(pea, decimals=3))}"
)
# ---------- Approximation using ML ----------
class AISupplyDemandApprox:
"""Object that approximates supply and demand using various ML methods.
Args:
nodes (int): Number of known nodes.
supply (function): Unknown supply function.
demand (function): Unknown demand function.
a (int, optional): Lower bound of prices. Defaults to 0.
b (int, optional): Upper bound of prices. Defaults to 100.
ts (float, optional): Size of testing data. Defaults to 0.4.
rs (int, optional): Random state. Defaults to 42.
Raises:
AssertionError: Price must be non-negative.
AssertionError: Training data includes nan values.
AssertionError: Testing data includes nan values.
"""
def __init__(self, nodes, supply, demand, a=0, b=100, ts=0.4, rs=42):
"""Constructor method.
"""
assert a >= 0, "Price must be Non Negative!"
p = np.linspace(a, b, nodes)
q = supply(p)
qd = demand(p)
p_train, p_test, q_train, q_test = train_test_split(
p, q, test_size=ts, random_state=rs
)
pd_train, pd_test, qd_train, qd_test = train_test_split(
p, qd, test_size=ts, random_state=rs
)
self.p_train = p_train.reshape(-1, 1) # reshape data
self.p_test = p_test.reshape(-1, 1) # reshape data
self.q_train = q_train.reshape(-1, 1) # reshape data
self.q_test = q_test.reshape(-1, 1) # reshape data
nan_ind = np.argwhere(np.isnan(qd_train)) # select index of nan values
qd_train_mod = np.delete(qd_train, nan_ind) # delete nan index value
pd_train_mod = np.delete(pd_train, nan_ind)
self.pd_train = pd_train_mod.reshape(-1, 1)
self.pd_test = pd_test.reshape(-1, 1)
self.qd_train = qd_train_mod.reshape(-1, 1)
self.qd_test = qd_test.reshape(-1, 1)
assert np.isnan(self.pd_train).all() == False, "There are nan Values!"
assert np.isnan(self.pd_test).all() == False, "There are nan Values!"
@staticmethod
def __name__():
"""Returns name of AISupplyDemandApprox object.
"""
return "Modern-ML Demand and Supply Interpolator"
def plots(
self,
colors=["teal", "yellowgreen", "gold"],
label=["Training Values", "Testing Values"] * 2,
markers=["x", "*", "v"],
n_neighbors=4,
degrees=[3, 6],
weight="distance",
fs=(15, 10),
num1=17.1,
num2=17.2,
num3=17.3,
num4=17.4,
):
"""Plots approximation results as well as training and testing data.
Args:
colors (list, optional): Colors of approximation results. Defaults
to ["teal", "yellowgreen", "gold"].
label (list, optional): Labels of training and testing data.
Defaults to ["Training Values", "Testing Values"]*2.
markers (list, optional): Markers of approximation. Defaults
to ["x", "*", "v"].
n_neighbors (int, optional): Number of k-nearest neighbors. Defaults to 4.
degrees (list, optional): Number of degrees for Linear Regression.
Defaults to [3, 6].
weight (str, optional): Weight of KNN Regression. Defaults to "distance".
fs (tuple, optional): Figuresize. Defaults to (15, 10)
num1 (float, optional): Number of first Figure. Defaults to 17.1.
num2 (float, optional): Number of second Figure. Defaults to 17.2.
num3 (float, optional): Number of third Figure. Defaults to 17.3.
num4 (float, optional): Number of fourth Figure. Defaults to 17.4.
Raises:
AssertionError: Length of degrees is out of range.
"""
self.degrees = degrees
assert len(degrees) == 2, "List out of range!"
qsup, psup = [self.q_train, self.q_test], [self.p_train, self.p_test]
qdem, pdem = [self.qd_train, self.qd_test], [self.pd_train, self.pd_test]
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
for i, (qs, ps, qd, pd) in enumerate(zip(qsup, psup, qdem, pdem)):
for ax in [ax1[0], ax1[1]]:
ax.plot(qs, ps, "o", ms=4, label=label[i])
for ax in [ax2[0], ax2[1]]:
ax.plot(qd, pd, "o", ms=4, label=label[i])
self.maes, self.maed = [], []
self.mses, self.msed = [], []
self.evss, self.evsd = [], []
self.r2s, self.r2d = [], []
for i, ax in enumerate([ax1, ax2]):
for j, d in enumerate(degrees):
model = make_pipeline(PolynomialFeatures(d), LinearRegression())
if i == 0:
model.fit(self.p_train, self.q_train)
pred = model.predict(self.p_test)
ax[i].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.p_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(
f"Figure {num1}: Linear Regression Approximation Supply"
)
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
model.fit(self.pd_train, self.qd_train)
pred = model.predict(self.pd_test)
ax[i - 1].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i - 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i - 1].set_title(
f"Figure {num3}: Linear Regression Approximation Demand"
)
ax[i - 1].grid(True)
ax[i - 1].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
methods = ["KNN", "DecisionTree"]
knn = KNeighborsRegressor(n_neighbors, weights=weight)
tree = DecisionTreeRegressor()
for i, ax in enumerate([ax1, ax2]):
for j, m in enumerate([knn, tree]):
if i == 0:
m.fit(self.p_train, self.q_train)
pred = m.predict(self.p_test)
ax[i + 1].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i + 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i + 1].set_title(
f"Figure {num2}: KNN and DT Approximation Supply"
)
ax[i + 1].grid(True)
ax[i + 1].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
m.fit(self.pd_train, self.qd_train)
pred = m.predict(self.pd_test)
ax[i].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(f"Figure {num4}: KNN and DT Approximation Demand")
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
plt.show()
def reslts_as_frame(self, num=14):
"""Returns accuracy of approximation using ML.
Args:
num (int, float, optional): Number of dataframe. Defaults to 14.
Returns:
pd.DataFrame: Accuracy of approximation.
"""
d1, d2 = self.degrees[0], self.degrees[1]
index_as_array_sup = [
np.array(["Supply"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
index_as_array_dem = [
np.array(["Demand"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
col = [
"Mean Absolute Error",
"Mean Squared Error",
"Explained Variance Score",
"$R^2$-Score",
]
data_supply = pd.concat(
[
| pd.DataFrame(self.maes, index=index_as_array_sup) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
| tm.assert_series_equal(s, s2) | pandas._testing.assert_series_equal |
import pandas as pd
import numpy as np
import unittest
import decipy.executors as exe
import decipy.normalizers as norm
import decipy.weigtings as wgt
matrix = np.array([
[4, 3, 2, 4],
[5, 4, 3, 7],
[6, 5, 5, 3],
])
alts = ['A1', 'A2', 'A3']
crits = ['C1', 'C2', 'C3', 'C4']
beneficial = [True, True, True, True]
weights = [0.10, 0.20, 0.30, 0.40]
xij = | pd.DataFrame(matrix, index=alts, columns=crits) | pandas.DataFrame |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
:return: 特征矩阵 pandas.DataFrame
"""
id = startid # 序列号
feature_set = pd.DataFrame() # 特征矩阵
# 单独的特征向量
labels = pd.Series()
emotion_vector = pd.Series()
songname_vector = pd.Series()
tempo_vector = pd.Series()
total_beats = pd.Series()
average_beats = pd.Series()
chroma_stft_mean = pd.Series()
# chroma_stft_std = pd.Series()
chroma_stft_var = pd.Series()
# chroma_cq_mean = pd.Series()
# chroma_cq_std = pd.Series()
# chroma_cq_var = pd.Series()
# chroma_cens_mean = pd.Series()
# chroma_cens_std = pd.Series()
# chroma_cens_var = pd.Series()
mel_mean = pd.Series()
# mel_std = pd.Series()
mel_var = pd.Series()
mfcc_mean = pd.Series()
# mfcc_std = pd.Series()
mfcc_var = pd.Series()
mfcc_delta_mean = pd.Series()
# mfcc_delta_std = pd.Series()
mfcc_delta_var = pd.Series()
rmse_mean = pd.Series()
# rmse_std = pd.Series()
rmse_var = pd.Series()
cent_mean = | pd.Series() | pandas.Series |
import os
import pprint as pp
from collections import OrderedDict, defaultdict
import diff_viewer
import pandas as pd
import streamlit as st
from datasets import load_from_disk
DATASET_DIR_PATH_BEFORE_CLEAN_SELECT = os.getenv("DATASET_DIR_PATH_BEFORE_CLEAN_SELECT")
OPERATION_TYPES = [
"Applied filter",
"Applied deduplication function",
"Applied map function",
]
MAX_LEN_DS_CHECKS = os.getenv("MAX_LEN_DS_CHECKS")
def get_ds(ds_path):
ds = load_from_disk(ds_path)
return ds
def next_idx(idx: int):
idx += 1
return idx % len(st.session_state["ds"])
def previous_idx(idx: int):
idx -= 1
return idx % len(st.session_state["ds"])
def on_click_next():
st.session_state["idx_1"] = next_idx(st.session_state["idx_1"])
st.session_state["idx_2"] = next_idx(st.session_state["idx_2"])
def on_click_previous():
st.session_state["idx_1"] = previous_idx(st.session_state["idx_1"])
st.session_state["idx_2"] = previous_idx(st.session_state["idx_2"])
def on_ds_change(ds_path):
st.session_state["ds"] = get_ds(ds_path)
st.session_state["idx_1"] = 0
st.session_state["idx_2"] = 1 if len(st.session_state["ds"]) > 1 else 0
st.session_state["ds_name"] = ds_path
def get_log_stats_df(raw_log):
data = OrderedDict(
{
"Order": [],
"Name": [],
"Initial number of samples": [],
"Final number of samples": [],
"Initial size in bytes": [],
"Final size in bytes": [],
}
)
metric_dict = defaultdict(lambda: {})
order = 0
for line in raw_log.split("\n"):
for metric_name in list(data.keys()) + OPERATION_TYPES:
if metric_name == "Name" or metric_name == "Order":
continue
if metric_name not in line:
continue
if (
metric_name == "Removed percentage"
and "Removed percentage in bytes" in line
):
continue
if (
metric_name == "Deduplicated percentage"
and "Deduplicated percentage in bytes" in line
):
continue
value = line.split(metric_name)[1].split(" ")[1]
if metric_name in OPERATION_TYPES:
operation_name = value
metric_dict[operation_name]["Order"] = order
order += 1
continue
assert (
metric_name not in metric_dict[operation_name]
), f"operation_name: {operation_name}\n\nvalue: {value}\n\nmetric_dict: {pp.pformat(metric_dict)} \n\nmetric_name: {metric_name} \n\nline: {line}"
metric_dict[operation_name][metric_name] = value
for name, data_dict in metric_dict.items():
for metric_name in data.keys():
if metric_name == "Name":
data[metric_name].append(name)
continue
data[metric_name].append(data_dict[metric_name])
df = | pd.DataFrame(data) | pandas.DataFrame |
# Procurement Charts - chart data
# -*- coding: latin-1 -*-
# A set of functions to calculate the chart data for procurement
# dashboards
import pandas as pd
import numpy as np
import sys
import settings
def generate_overview(df):
"""
Generate an overview of the whole dataset.
:param df:
Pandas dataframe
:type df:
Dataframe
:returns:
List labels and values
:example:
[
{
'label': 'Total procurement procedures',
'value': 2105446
},
{
'label': 'Total amount spent',
'value': 210544616548924
},
...
]
"""
total_contracts = df['contract_id'].nunique()
total_spent = (int(df['contract_value_amount'].sum()) / 1000000)
overview = [
{
'label': 'Total contracts',
'value': '{:,}'.format(total_contracts)
},
{
'label': 'Total amount contracted',
'value': '$ ' + '{:,}'.format(total_spent) + ' mm (' + settings.desired_currency + ')'
},
{
'label': 'Contract start dates between',
'value': df[settings.main_date_contract].min().strftime("%d-%m-%Y") + ' and ' + df[settings.main_date_contract].max().strftime("%d-%m-%Y")
},
{
'label': 'Most active supplier',
'value': df['award_suppliers_0_name'].value_counts().index[0]
},
{
'label': 'Most active buyer',
'value': df['buyer_name'].value_counts().index[0]
}
]
return overview
def contracts_time(df):
"""
Generate chart data for total amount of contracts per month
Receives sliced data
:param df:
Pandas dataframe
:type df:
Dataframe
:returns:
Object with the domains and data
:example:
{
'xdomain': ['2015-02-01', '2105-03-01', '2015-04-01'],
'xdomain': [0, 100],
'data': [ { 'date': '2015-02-01', 'value': 100 }, { 'date': '2015-03-01', 'value': 200 } ]
}
"""
chart_data = {
'domain': {
'x': [],
'y': []
},
'data': []
}
# Prep the dataframe
chart_df = df.loc[:,['contract_period_startDate','contract_id']].set_index('contract_period_startDate')
chart_df = chart_df.groupby(pd.Grouper(level='contract_period_startDate',freq='1M')).contract_id.nunique()
# Calculate the data
# Improve this. Shouldn't have to use iterrows
for ind, val in chart_df.iteritems():
formatted_date = ind.strftime('%Y-%m-%d')
if np.isnan(val):
val = None
else:
val = int(val)
chart_data['data'].append({'date': formatted_date, 'value': val})
# Calculate the domains of this slice
chart_data['domain']['x'] = [dt.strftime("%Y-%m-%d") for dt in chart_df.index.tolist()]
chart_data['domain']['y'] = [0, int(chart_df.max())]
return chart_data
def amount_time(df):
"""
Generate chart data for total amount of money spent per month
Receives sliced data
:param df:
Pandas dataframe
:type df:
Dataframe
:returns:
Object with the domains and data
:example:
{
'xdomain': ['2015-02-01', '2105-03-01', '2015-04-01'],
'xdomain': [0, 100],
'data': [ { 'date': '2015-02-01', 'value': 100 }, { 'date': '2015-03-01', 'value': 200 } ]
}
"""
chart_data = {
'domain': {
'x': [],
'y': []
},
'data': []
}
# Prep the dataframe
chart_df = df.loc[:,['contract_period_startDate','contract_value_amount']].set_index('contract_period_startDate')
xdf = chart_df.groupby(pd.Grouper(level='contract_period_startDate',freq='1M')).sum()
# Calculate the data
# Improve this. Shouldn't have to use iterrows
for ind, row in xdf.iterrows():
formatted_date = ind.strftime('%Y-%m-%d')
if np.isnan(row[0]):
val = None
else:
val = int(row[0])
chart_data['data'].append({'date': formatted_date, 'value': val})
# Calculate the domains of this slice
chart_data['domain']['x'] = [dt.strftime("%Y-%m-%d") for dt in xdf.index.tolist()]
chart_data['domain']['y'] = [0, int(xdf.max())]
return chart_data
def average_timeline(df):
"""
Generate chart data for the average timeline
Receives sliced data
:param df:
Pandas dataframe
:type df:
Dataframe
:returns:
Object with the domains and data
:example:
{
'data': [ 100, 200, 300 ]
}
"""
chart_data = {
'data': []
}
# Prep the dataframe
# To calculate a correct mean, we convert the timedelta to hours
p1 = (df['tender_tenderPeriod_startDate'] - df['tender_publicationDate']).astype('timedelta64[h]')
p2 = (df['award_date'] - df['tender_tenderPeriod_startDate']).astype('timedelta64[h]')
p3 = (df['contract_period_startDate'] - df['award_date']).astype('timedelta64[h]')
# Calculate the data
chart_data['data'].append(int(p1.mean() / 24))
chart_data['data'].append(int(p2.mean() / 24))
chart_data['data'].append(int(p3.mean() / 24))
return chart_data
def price_variation(df):
"""
Generate chart data for a box and whisker plot about price variation
Receives sliced data
:param df:
Pandas dataframe
:type df:
Dataframe
:returns:
(Dict) with the chart data
:example:
[ { 'min': '590', 'max': 8090, 'whisker1': 590, 'q1': 1090 } ]
"""
chart_data = {
'domain': {
'x': [],
'y': []
},
'data': []
}
# Prep the dataframe
s = df.loc[:,['contract_value_amount']]
# Calculate the data
qr1 = int(s.quantile(0.25))
median = int(s.median(numeric_only=True))
qr3 = int(s.quantile(0.75))
iqr = qr3 - qr1
# Outlier = less than Q1 or greater than Q3 by more than 1.5 the IQR
outlier_min = qr1 - (iqr * 1.5)
outlier_max = qr3 + (iqr * 1.5)
ol_series = s[(s > outlier_min) & (s < outlier_max)]
boxplot = {}
boxplot['min'] = int(s.min(numeric_only=True))
boxplot['max'] = int(s.max(numeric_only=True))
boxplot['whisker1'] = int(ol_series.min())
boxplot['q1'] = qr1
boxplot['median'] = median
boxplot['q3'] = qr3
boxplot['whisker2'] = int(ol_series.max())
# Calculate the domains of this slice
chart_data['data'] = boxplot
chart_data['domain']['x'] = [int(ol_series.min()), int(ol_series.max())]
chart_data['domain']['y'] = None
return chart_data
def price_distribution(df):
"""
:param df:
Pandas dataframe, the full dataset or a slice of it
:type df:
Dataframe
:returns:
:example:
"""
chart_data = {
'domain': {
'x': [],
'y': []
},
'data': []
}
# Prep the dataframe
# Cut off data above 95 percentile
df_perc = df[(df['contract_value_amount'] <= df['contract_value_amount'].quantile(.95))]
maxcontr = df_perc['contract_value_amount'].max()
# Determine bins
# Equally spaced bins
bin_limits = np.arange(0, int(maxcontr) +1, int(maxcontr / 10))
# Generate the chart data
binned = pd.cut(df_perc['contract_value_amount'], bin_limits, labels=False)
dist = | pd.value_counts(binned) | pandas.value_counts |
import pandas as pd
import numpy as np
#主要针对时间序列动量和hp6-8 这几个没法分组
adjust_price=pd.read_csv("../adjust_price/adjust_price.csv")
adjust_price=adjust_price.set_index('date')
cat_list=pd.read_csv("../data_extraction/cat_list.csv",header=None)
cat_list=pd.Series(cat_list[0])
#新建一个数据框记录持仓信息
port=pd.DataFrame(index=adjust_price.index,columns=adjust_price.columns)
#读入筛选条件
check_vol=pd.read_csv("../check_vol/check_vol.csv")
check_vol=check_vol.set_index('date')
check_vol=check_vol[check_vol==1]
def set_port(df):
df_return= | pd.DataFrame(index=check_vol.index,columns=check_vol.columns) | pandas.DataFrame |
from unittest import TestCase
from unittest.mock import ANY, Mock, call, patch
import pandas as pd
from mlblocks import MLPipeline
from orion import benchmark
from orion.evaluation import CONTEXTUAL_METRICS as METRICS
from orion.evaluation import contextual_confusion_matrix
def test__sort_leaderboard_rank():
rank = 'f1'
metrics = METRICS
score = pd.DataFrame({
'pipeline': range(5),
'f1': range(5),
})
expected_return = pd.DataFrame({
'pipeline': range(5)[::-1],
'rank': range(1, 6),
'f1': range(5)[::-1],
})
returned = benchmark._sort_leaderboard(score, rank, metrics)
| pd.testing.assert_frame_equal(returned, expected_return) | pandas.testing.assert_frame_equal |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_sequence_of_arrays', [
1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'],
(1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'),
[(1,), 2], [1, (2,)], [('a',), 'b'],
((1,), 2), (1, (2,)), (('a',), 'b')
])
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize('idx1, idx2', [
([1, 2, 3], ['a', 'b']),
([], ['a', 'b']),
([1, 2, 3], [])
])
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = '^all arrays must be same length$'
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = 'Cannot infer number of levels from empty list'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = 'Input must be a list / sequence of tuple-likes.'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='abc')
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='a')
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame([[2, 1, 2], [4, (1, 2), 3]],
columns=['a', 'b', 'c']).set_index(['a', 'b'])
idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=('a', 'b'))
result = pd.DataFrame([2, 3], columns=['c'], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
@pytest.mark.parametrize('first, second', [
([], []),
(['foo', 'bar', 'baz'], []),
([], ['a', 'b', 'c']),
])
def test_from_product_empty_two_levels(first, second):
names = ['A', 'B']
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
codes=[[], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('N', list(range(4)))
def test_from_product_empty_three_levels(N):
# GH12258
names = ['A', 'B', 'C']
lvl2 = list(range(N))
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
codes=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_input', [
1,
[1],
[1, 2],
[[1], 2],
'a',
['a'],
['a', 'b'],
[['a'], 'b'],
])
def test_from_product_invalid_input(invalid_input):
msg = (r"Input must be a list / sequence of iterables|"
"Input must be list-like")
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(iterables=invalid_input)
def test_from_product_datetimeindex():
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([
(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02')),
])
tm.assert_numpy_array_equal(mi.values, etalon)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('f', [
lambda x: x,
lambda x: pd.Series(x),
lambda x: x.values
])
def test_from_product_index_series_categorical(ordered, f):
# GH13743
first = ['foo', 'bar']
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
result = pd.MultiIndex.from_product([first, f(idx)])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_from_product():
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator():
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
msg = "Input must be a list / sequence of iterables."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(0)
def test_create_index_existing_name(idx):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
index = idx
index.names = ['foo', 'bar']
result = pd.Index(index)
expected = Index(
Index([
('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'
),
names=['foo', 'bar']
)
tm.assert_index_equal(result, expected)
result = pd.Index(index, names=['A', 'B'])
expected = Index(
Index([
('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'
),
names=['A', 'B']
)
tm.assert_index_equal(result, expected)
# ----------------------------------------------------------------------------
# from_frame
# ----------------------------------------------------------------------------
def test_from_frame():
# GH 22420
df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']],
columns=['L1', 'L2'])
expected = pd.MultiIndex.from_tuples([('a', 'a'), ('a', 'b'),
('b', 'a'), ('b', 'b')],
names=['L1', 'L2'])
result = pd.MultiIndex.from_frame(df)
tm.assert_index_equal(expected, result)
@pytest.mark.parametrize('non_frame', [
pd.Series([1, 2, 3, 4]),
[1, 2, 3, 4],
[[1, 2], [3, 4], [5, 6]],
pd.Index([1, 2, 3, 4]),
np.array([[1, 2], [3, 4], [5, 6]]),
27
])
def test_from_frame_error(non_frame):
# GH 22420
with pytest.raises(TypeError, match='Input must be a DataFrame'):
pd.MultiIndex.from_frame(non_frame)
def test_from_frame_dtype_fidelity():
# GH 22420
df = pd.DataFrame(OrderedDict([
('dates', pd.date_range('19910905', periods=6, tz='US/Eastern')),
('a', [1, 1, 1, 2, 2, 2]),
('b', pd.Categorical(['a', 'a', 'b', 'b', 'c', 'c'], ordered=True)),
('c', ['x', 'x', 'y', 'z', 'x', 'y'])
]))
original_dtypes = df.dtypes.to_dict()
expected_mi = pd.MultiIndex.from_arrays([
pd.date_range('19910905', periods=6, tz='US/Eastern'),
[1, 1, 1, 2, 2, 2],
pd.Categorical(['a', 'a', 'b', 'b', 'c', 'c'], ordered=True),
['x', 'x', 'y', 'z', 'x', 'y']
], names=['dates', 'a', 'b', 'c'])
mi = pd.MultiIndex.from_frame(df)
mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)}
tm.assert_index_equal(expected_mi, mi)
assert original_dtypes == mi_dtypes
@pytest.mark.parametrize('names_in,names_out', [
(None, [('L1', 'x'), ('L2', 'y')]),
(['x', 'y'], ['x', 'y']),
])
def test_from_frame_valid_names(names_in, names_out):
# GH 22420
df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']],
columns=pd.MultiIndex.from_tuples([('L1', 'x'),
('L2', 'y')]))
mi = | pd.MultiIndex.from_frame(df, names=names_in) | pandas.MultiIndex.from_frame |
"""
Contains all functions that are needed in intermediary steps in order to obtain
certain tables and figures of the thesis.
"""
import os
import pickle
import numpy as np
import pandas as pd
import scipy.io
from ruspy.estimation.estimation import estimate
from ruspy.estimation.estimation_transitions import create_transition_matrix
from ruspy.model_code.cost_functions import calc_obs_costs
if os.environ.get("TRAVIS"):
pass
else:
from ruspy.model_code.demand_function import get_demand
from ruspy.model_code.fix_point_alg import calc_fixp
from ruspy.simulation.simulation import simulate
def get_iskhakov_results(
discount_factor,
approach,
starting_cost_params,
starting_expected_value_fun,
number_runs,
number_buses,
number_periods,
number_states,
number_cost_params,
):
"""
Run the Monte Carlo Simulation to replicate Iskhakov et al. (2016)
Parameters
----------
discount_factor : list
beta vector for which to run the simulation.
approach : list
run with NFXP and/or MPEC.
starting_cost_params : numpy.array
contains the starting values for the cost parameters.
starting_expected_value_fun : numpy.array
contains the starting values of the expected values for MPEC.
number_runs : float
number of runs per beta and starting vector combination.
number_buses : int
number of buses per data set.
number_periods : int
number of months per data set.
number_states : int
number of grid points in which the mileage state is discretized.
number_cost_params : int
number of cost parameters.
Returns
-------
results : pd.DataFrame
contains the estimates for the structural parameters per run.
"""
# Initialize the set up for the nested fixed point algorithm
stopping_crit_fixed_point = 1e-13
switch_tolerance_fixed_point = 1e-2
# Initialize the set up for MPEC
lower_bound = np.concatenate(
(np.full(number_states, -np.inf), np.full(number_cost_params, 0.0))
)
upper_bound = np.concatenate(
(np.full(number_states, 50.0), np.full(number_cost_params, np.inf))
)
rel_ipopt_stopping_tolerance = 1e-6
init_dict_nfxp = {
"model_specifications": {
"number_states": number_states,
"maint_cost_func": "linear",
"cost_scale": 1e-3,
},
"optimizer": {
"approach": "NFXP",
"algorithm": "estimagic_bhhh",
# implies that we use analytical first order derivatives as opposed
# to numerical ones
"gradient": "Yes",
},
"alg_details": {
"threshold": stopping_crit_fixed_point,
"switch_tol": switch_tolerance_fixed_point,
},
}
init_dict_mpec = {
"model_specifications": {
"number_states": number_states,
"maint_cost_func": "linear",
"cost_scale": 1e-3,
},
"optimizer": {
"approach": "MPEC",
"algorithm": "ipopt",
# implies that we use analytical first order derivatives as opposed
# to numerical ones
"gradient": "Yes",
"tol": rel_ipopt_stopping_tolerance,
"set_lower_bounds": lower_bound,
"set_upper_bounds": upper_bound,
},
}
# Initialize DataFrame to store the results of each run of the Monte Carlo simulation
index = pd.MultiIndex.from_product(
[
discount_factor,
range(number_runs),
range(starting_cost_params.shape[1]),
approach,
],
names=["Discount Factor", "Run", "Start", "Approach"],
)
columns = [
"RC",
"theta_11",
"theta_30",
"theta_31",
"theta_32",
"theta_33",
"CPU Time",
"Converged",
"# of Major Iter.",
"# of Func. Eval.",
"# of Bellm. Iter.",
"# of N-K Iter.",
]
results = pd.DataFrame(index=index, columns=columns)
# Main loop to calculate the results for each run
for factor in discount_factor:
# load simulated data
mat = scipy.io.loadmat(
"data/RustBusTableXSimDataMC250_beta" + str(int(100000 * factor))
)
for run in range(number_runs):
if run in np.arange(10, number_runs, 10):
results.to_pickle("data/intermediate/results_" + str(factor))
data = process_data(mat, run, number_buses, number_periods)
for start in range(starting_cost_params.shape[1]):
# Adapt the Initiation Dictionairy of NFXP for this run
init_dict_nfxp["model_specifications"]["discount_factor"] = factor
init_dict_nfxp["optimizer"]["params"] = pd.DataFrame(
starting_cost_params[:, start], columns=["value"]
)
# Run NFXP using ruspy
transition_result_nfxp, cost_result_nfxp = estimate(
init_dict_nfxp, data
)
# store the results of this run
results.loc[factor, run, start, "NFXP"] = process_result_iskhakov(
"NFXP", transition_result_nfxp, cost_result_nfxp, number_states
)
# Adapt the Initiation Dictionairy of MPEC for this run
init_dict_mpec["model_specifications"]["discount_factor"] = factor
init_dict_mpec["optimizer"]["params"] = np.concatenate(
(starting_expected_value_fun, starting_cost_params[:, start])
)
# Run MPEC using ruspy
transition_result_mpec, cost_result_mpec = estimate(
init_dict_mpec, data
)
# store the results of this run
results.loc[factor, run, start, "MPEC"].loc[
~results.columns.isin(["# of Bellm. Iter.", "# of N-K Iter."])
] = process_result_iskhakov(
"MPEC", transition_result_mpec, cost_result_mpec, number_states
)
return results
def process_data(df, run, number_buses, number_periods):
"""
prepare the raw data set from matlab for the Monte Carlo simulation in
``get_iskhakov_results``.
Parameters
----------
df : pd.DataFrame
contains the raw data of Iskhakov et al. created with their original
matlab code.
run : int
indicates the run in the Monte Carlo simulation.
number_buses : int
number of buses per data set.
number_periods : int
number of months per data set.
Returns
-------
data : pd.DataFrame
the processed data set that can be used in the ruspy estimate function.
"""
state = df["MC_xt"][:, :, run] - 1
decision = df["MC_dt"][:, :, run]
usage = df["MC_dx"][:-1, :, run] - 1
first_usage = np.full((1, usage.shape[1]), np.nan)
usage = np.vstack((first_usage, usage))
data = pd.DataFrame()
state_new = state[:, 0]
decision_new = decision[:, 0]
usage_new = usage[:, 0]
for i in range(0, len(state[0, :]) - 1):
state_new = np.hstack((state_new, state[:, i + 1]))
decision_new = np.hstack((decision_new, decision[:, i + 1]))
usage_new = np.hstack((usage_new, usage[:, i + 1]))
data["state"] = state_new
data["decision"] = decision_new
data["usage"] = usage_new
iterables = [range(number_buses), range(number_periods)]
index = pd.MultiIndex.from_product(iterables, names=["Bus_ID", "period"])
data.set_index(index, inplace=True)
return data
def process_result_iskhakov(approach, transition_result, cost_result, number_states):
"""
process the raw results from a Monte Carlo simulation run in the
``get_iskhakov_results`` function.
Parameters
----------
approach : string
indicates whether the raw results were created from the NFXP or MPEC.
transition_result : dict
the result dictionairy of ruspy for the tranisition parameters.
cost_result : dict
the result dictionairy of ruspy for the cost parameters.
number_states : int
number of grid points in which the mileage state is discretized.
Returns
-------
result : numpy.array
contains the transformed results of a Monte Carlo simulation run.
"""
if approach == "NFXP":
result = np.concatenate((cost_result["x"], transition_result["x"][:4]))
for name in [
"time",
"status",
"n_iterations",
"n_evaluations",
"n_contraction_steps",
"n_newt_kant_steps",
]:
result = np.concatenate((result, np.array([cost_result[name]])))
else:
result = np.concatenate(
(cost_result["x"][number_states:], transition_result["x"][:4])
)
for name in ["time", "status", "n_iterations", "n_evaluations"]:
result = np.concatenate((result, np.array([cost_result[name]])))
return result
def simulate_figure_3_and_4(results, beta, number_states, number_buses):
"""
Get the implied demand function for certain parameter estimates for Figure
3 and 4.
Parameters
----------
results : pd.DataFrame
the results of the Monte Carlo simulation in Iskhakov et al. (2016)
created by ``get_iskhakov_results``.
beta : float
indicates for which of the beta the demand function is supposed to be
derived.
number_states : int
number of grid points in which the mileage state is discretized.
number_buses : int
number of buses per data set.
Returns
-------
demand : pd.DataFrame
contains the demand over a range of replacement costs depending on
the estimated structural parameters of a Monte Carlo run.
rc_range : np.array
range over which the demand is calculated.
true_demand : pd.DataFrame
contains the demand derived from true structural parameters.
correlation : pd.DataFrame
the correlation matrix of the estimated structural parameters across
all Monte Carlo runs.
"""
init_dict = {
"model_specifications": {
"discount_factor": beta,
"number_states": number_states,
"maint_cost_func": "linear",
"cost_scale": 1e-3,
},
"optimizer": {
"approach": "NFXP",
"algorithm": "estimagic_bhhh",
"gradient": "Yes",
},
"alg_details": {"threshold": 1e-13, "switch_tol": 1e-2},
}
demand_dict = {
"RC_lower_bound": 2,
"RC_upper_bound": 13,
"demand_evaluations": 100,
"tolerance": 1e-10,
"num_periods": 12,
"num_buses": number_buses,
}
# get true demand function
true_params = np.array([0.0937, 0.4475, 0.4459, 0.0127, 0.0002, 11.7257, 2.4569])
true_demand = get_demand(init_dict, demand_dict, true_params)[0]["demand"].astype(
float
)
# setup loop for demand calculation
results_beta = (
results.loc[
(beta, slice(None), 0, "MPEC"),
("RC", "theta_11", "theta_30", "theta_31", "theta_32", "theta_33"),
]
.astype(float)
.to_numpy()
)
rc_range = np.linspace(
demand_dict["RC_lower_bound"],
demand_dict["RC_upper_bound"],
demand_dict["demand_evaluations"],
)
demand = pd.DataFrame(index=rc_range)
demand.index.name = "RC"
for j in range(len(results_beta)):
trans_params = results_beta[j, 2:]
trans_params = np.append(trans_params, 1 - sum(trans_params))
params = np.concatenate((trans_params, results_beta[j, :2]))
demand[str(j)] = get_demand(init_dict, demand_dict, params)[0]["demand"]
# save the data
demand.to_pickle("data/demand.pickle")
# get correlation
results_beta = results.loc[
(beta, slice(None), 0, "MPEC"),
("RC", "theta_11", "theta_30", "theta_31", "theta_32", "theta_33"),
].astype(float)
correlation = results_beta.corr()
values = [demand, rc_range, true_demand, correlation]
names = ["demand", "rc_range", "true_demand", "correlation"]
for value, name in zip(values, names):
pd.to_pickle(value, f"data/{name}.pickle")
return demand, rc_range, true_demand, correlation
def simulate_data(
seed,
disc_fac,
num_buses,
num_periods,
num_states,
cost_params,
trans_params,
cost_func,
scale,
):
"""
simulates a single data set with a given specification using the ``simulate``
function of ruspy.
Parameters
----------
seed : int
seed for the simulation function.
disc_fac : float
the discount factor in the Rust Model.
num_buses : int
the amount of buses that should be simulated.
num_periods : int
The number of periods that should be simulated for each bus.
num_states : int
the number of states for the which the mileage state is discretized.
cost_params : np.array
the cost parameters for which the data is simulated.
trans_params : np.array
the cost parameters for which the data is simulated..
cost_func : callable
the cost function that underlies the data generating process.
scale : float
the scale of the cost function.
Returns
-------
df : pd.DataFrame
Simulated data set for the given data generating process.
"""
init_dict = {
"simulation": {
"discount_factor": disc_fac,
"periods": num_periods,
"seed": seed,
"buses": num_buses,
},
}
costs = calc_obs_costs(num_states, cost_func, cost_params, scale)
trans_mat = create_transition_matrix(num_states, trans_params)
ev = calc_fixp(trans_mat, costs, disc_fac)[0]
df = simulate(init_dict["simulation"], ev, costs, trans_mat)
return df
def transform_grid(column):
"""
transforms the grid state for lower grid size.
Parameters
----------
column : pd.Series
column that contains the discretized mileage of a data set.
Returns
-------
column : pd.Series
transformed column for state corresping to half of the grid size.
"""
if column.name == "state":
column = np.floor(column / 2)
return column
def process_result(approach, cost_result, alg_nfxp):
"""
process the raw results from a Monte Carlo simulation run in the
``sensitivity_simulation`` function.
Parameters
----------
approach : string
indicates whether the raw results were created from the NFXP or MPEC.
cost_result : dict
the result dictionairy of ruspy for the cost parameters.
Returns
-------
result : numpy.array
contains the transformed results of a Monte Carlo simulation run.
"""
if approach == "NFXP":
result = np.array([])
for name in [
"fun",
"time",
"status",
"n_iterations",
"n_evaluations",
"n_contraction_steps",
"n_newt_kant_steps",
]:
result = np.concatenate((result, np.array([cost_result[name]])))
if alg_nfxp == "scipy_L-BFGS-B":
if result[2] == "success":
result[2] = 1
else:
result[2] = 0
else:
result = np.array([])
for name in [
"fun",
"time",
"status",
"n_iterations",
"n_evaluations",
"n_evaluations_total",
]:
result = np.concatenate((result, np.array([cost_result[name]])))
return result
def get_qoi(init_dict, params):
"""
calculates the quantitiy of interest for a given estimated parameter vector
in a certain specification.
Parameters
----------
init_dict : dict
dictionairy needed for the estimation procedure which gives info about
the model specification to the demand function calculation.
params : np.array
contains the estimated transition and cost parameters.
Returns
-------
demand : float
the resulting quantitiy of interest.
"""
demand_dict = {
"RC_lower_bound": 11,
"RC_upper_bound": 11,
"demand_evaluations": 1,
"tolerance": 1e-10,
"num_periods": 12,
"num_buses": 50,
}
demand = get_demand(init_dict, demand_dict, params)
demand = demand["demand"].astype(float).to_numpy()[0]
return demand
def sensitivity_simulation(
specification, number_runs, alg_nfxp, tolerance=None, max_cont=20, max_nk=20
):
"""
performs a certain number of estimations with certain specifications
on simulated data.
Parameters
----------
specification : tuple
contains the information about which discount factor, cost function,
grid size, derivative and approach is used for the estimation.
number_runs : int
number of runs per specification.
alg_nfxp : string
the algorithm used for the NFXP.
tolerance : dict
specifies the stopping tolerance for the optimizer of the NFXP.
max_cont : int
maximum number of contraction steps for the NFXP.
max_nk : int
maximum number of Newton-Kantorovich steps for the NFXP.
Returns
-------
results : pd.DataFrame
contains results such as likelihood, estimated parameters etc per run.
"""
# set default tolerance
if tolerance is None:
if alg_nfxp == "estimagic_bhhh":
tolerance = {"tol": {"abs": 1e-05, "rel": 1e-08}}
elif alg_nfxp == "scipy_L-BFGS-B":
tolerance = {"gtol": 1e-05}
# Initialize the set up for the nested fixed point algorithm
stopping_crit_fixed_point = 1e-13
switch_tolerance_fixed_point = 1e-2
# Initialize the set up for MPEC
rel_ipopt_stopping_tolerance = 1e-6
# get specifications in order
index_names = [
"Discount Factor",
"Cost Function",
"Grid Size",
"Analytical Gradient",
"Approach",
]
identifier = specification[1]
indexer = list(specification[0])
indexer[1] = list(indexer[1])[0]
specification = dict(zip(index_names, specification[0]))
# load data
data_sets = pickle.load(
open("data/simulated_data_" + str(specification["Grid Size"]) + ".pickle", "rb")
)
# set up empty dataframe for results
index = pd.MultiIndex.from_product(
[*[[element] for element in indexer], range(number_runs)],
names=[*index_names, "Run"],
)
columns = [
"RC",
"theta_11",
"theta_12",
"theta_13",
"theta_30",
"theta_31",
"theta_32",
"theta_33",
"theta_34",
"theta_35",
"theta_36",
"theta_37",
"theta_38",
"theta_39",
"theta_310",
"Likelihood",
"Demand",
"CPU Time",
"Converged",
"# of Major Iter.",
"# of Func. Eval.",
"# of Func. Eval. (Total)",
"# of Bellm. Iter.",
"# of N-K Iter.",
]
results = pd.DataFrame(index=index, columns=columns)
if specification["Approach"] == "NFXP":
init_dict_nfxp = {
"model_specifications": {
"discount_factor": specification["Discount Factor"],
"number_states": specification["Grid Size"],
"maint_cost_func": specification["Cost Function"][0],
"cost_scale": specification["Cost Function"][1],
},
"optimizer": {
"approach": "NFXP",
"algorithm": alg_nfxp,
"gradient": specification["Analytical Gradient"],
"algo_options": tolerance,
},
"alg_details": {
"threshold": stopping_crit_fixed_point,
"switch_tol": switch_tolerance_fixed_point,
"max_contr_steps": max_cont,
"max_newt_kant_steps": max_nk,
},
}
column_slicer_nfxp = [
"Likelihood",
"CPU Time",
"Converged",
"# of Major Iter.",
"# of Func. Eval.",
"# of Bellm. Iter.",
"# of N-K Iter.",
]
for run in np.arange(number_runs):
print(specification, run)
# Run estimation
data = data_sets[run]
try:
transition_result_nfxp, cost_result_nfxp = estimate(
init_dict_nfxp, data
)
results.loc[(*indexer, run), (slice("RC", "theta_13"))][
: len(cost_result_nfxp["x"])
] = cost_result_nfxp["x"]
results.loc[(*indexer, run), (slice("theta_30", "theta_310"))][
: len(transition_result_nfxp["x"])
] = transition_result_nfxp["x"]
results.loc[(*indexer, run), column_slicer_nfxp] = process_result(
specification["Approach"], cost_result_nfxp, alg_nfxp
)
results.loc[(*indexer, run), "Demand"] = get_qoi(
init_dict_nfxp,
np.concatenate(
(transition_result_nfxp["x"], cost_result_nfxp["x"])
),
)
# the N-K step sometimes cannot be found due to a LinAlgError
# somehow estimagic cannot handle this and translate it into nonconvergence
# instead it raises a ValueError
# below I manually translate this into nonconvergence
except ValueError:
results.loc[(*indexer, run), :] = results.shape[1] * np.nan
results.loc[(*indexer, run), "Converged"] = 0
results.to_pickle(
"data/sensitivity/sensitivity_specification_"
+ alg_nfxp
+ str(identifier)
+ ".pickle"
)
elif specification["Approach"] == "MPEC":
if specification["Cost Function"][0] in ["linear", "square root", "hyperbolic"]:
num_cost_params = 2
elif specification["Cost Function"][0] == "quadratic":
num_cost_params = 3
else:
num_cost_params = 4
init_dict_mpec = {
"model_specifications": {
"discount_factor": specification["Discount Factor"],
"number_states": specification["Grid Size"],
"maint_cost_func": specification["Cost Function"][0],
"cost_scale": specification["Cost Function"][1],
},
"optimizer": {
"approach": "MPEC",
"algorithm": "ipopt",
"gradient": specification["Analytical Gradient"],
"tol": rel_ipopt_stopping_tolerance,
"set_lower_bounds": np.concatenate(
(
np.full(specification["Grid Size"], -np.inf),
np.full(num_cost_params, 0.0),
)
),
"set_upper_bounds": np.concatenate(
(
np.full(specification["Grid Size"], 50.0),
np.full(num_cost_params, np.inf),
)
),
},
}
column_slicer_mpec = [
"Likelihood",
"CPU Time",
"Converged",
"# of Major Iter.",
"# of Func. Eval.",
"# of Func. Eval. (Total)",
]
for run in np.arange(number_runs):
# Run estimation
data = data_sets[run]
transition_result_mpec, cost_result_mpec = estimate(init_dict_mpec, data)
results.loc[(*indexer, run), (slice("RC", "theta_13"))][
: len(cost_result_mpec["x"][specification["Grid Size"] :])
] = cost_result_mpec["x"][specification["Grid Size"] :]
results.loc[(*indexer, run), (slice("theta_30", "theta_310"))][
: len(transition_result_mpec["x"])
] = transition_result_mpec["x"]
results.loc[(*indexer, run), column_slicer_mpec] = process_result(
specification["Approach"], cost_result_mpec, alg_nfxp
)
results.loc[(*indexer, run), "Demand"] = get_qoi(
init_dict_mpec,
np.concatenate(
(
transition_result_mpec["x"],
cost_result_mpec["x"][specification["Grid Size"] :],
)
),
)
results.to_pickle(
"data/sensitivity/sensitivity_specification_"
+ str(identifier)
+ ".pickle"
)
return results
def partial_sensitivity(sensitivity_results, axis, axis_name):
"""
creates a table with mean and standard deviation of the statistics in the
sensititvity_results table when changing only one part of the specification,
namely the axis.
Parameters
----------
sensitivity_results : pd.DataFrame
table with all runs of the sensitivity simulation.
axis : list
the values of one ingredient of the specification.
axis_name : string
the name of the specification part that is supposed to be changed.
Returns
-------
table : pd.DataFrame
table that contains the mean and standard deviation of some variables
across NFXP and MPEC when changing one part of the specifications.
"""
table_temp = (
sensitivity_results.loc[sensitivity_results["Converged"] == 1]
.astype(float)
.groupby(level=[axis_name, "Approach"])
)
approaches = ["NFXP", "MPEC"]
statistics = ["Mean", "Standard Deviation"]
index = pd.MultiIndex.from_product(
[axis, approaches, statistics], names=[axis_name, "Approach", "Statistic"]
)
table = pd.DataFrame(index=index, columns=sensitivity_results.columns)
table.loc(axis=0)[:, :, "Mean"] = table_temp.mean()
table.loc(axis=0)[:, :, "Standard Deviation"] = table_temp.std()
table_temp = (
sensitivity_results["Converged"]
.astype(float)
.groupby(level=[axis_name, "Approach"])
)
table.loc[(slice(None), slice(None), "Mean"), "Converged"] = table_temp.mean()
table.loc[
(slice(None), slice(None), "Standard Deviation"), "Converged"
] = table_temp.std()
table = table.astype(float)
return table
def get_difference_approach(sensitivity_results):
"""
obtain the averages and standard deviations across all specifications and runs
for MPEC and NFXP, respectively.
Parameters
----------
sensitivity_results : pd.DataFrame
table with all runs of the sensitivity simulation.
Returns
-------
table : pd.DataFrame
table that contains the means and standard deviations.
"""
table_temp = (
sensitivity_results.loc[sensitivity_results["Converged"] == 1]
.astype(float)
.groupby(level=["Approach"])
)
approaches = ["NFXP", "MPEC"]
statistics = ["Mean", "Standard Deviation"]
index = pd.MultiIndex.from_product(
[approaches, statistics], names=["Approach", "Statistic"]
)
table = pd.DataFrame(index=index, columns=sensitivity_results.columns)
table.loc(axis=0)["MPEC", "Mean"] = table_temp.mean().loc["MPEC"]
table.loc(axis=0)["NFXP", "Mean"] = table_temp.mean().loc["NFXP"]
table.loc(axis=0)["MPEC", "Standard Deviation"] = table_temp.std().loc["MPEC"]
table.loc(axis=0)["NFXP", "Standard Deviation"] = table_temp.std().loc["NFXP"]
table_temp = (
sensitivity_results["Converged"].astype(float).groupby(level=["Approach"])
)
table.loc[("NFXP", "Mean"), "Converged"] = table_temp.mean().loc["NFXP"]
table.loc[("NFXP", "Standard Deviation"), "Converged"] = table_temp.std().loc[
"NFXP"
]
table.loc[("MPEC", "Mean"), "Converged"] = table_temp.mean().loc["MPEC"]
table.loc[("MPEC", "Standard Deviation"), "Converged"] = table_temp.std().loc[
"MPEC"
]
table = table.astype(float)
return table
def get_specific_sensitivity(sensitivity_results, specifications):
"""
get mean and standard deviations for small pertubations in model
specification and numerical approach.
Parameters
----------
sensitivity_results : pd.DataFrame
table with all runs of the sensitivity simulation.
specifications : list
contains the model specifications for which the means and standard
deviations are calculated.
Returns
-------
sensitivity_results_new : pd.DataFrame
contains the results per run for the given specifications.
table : pd.DataFrame
contains the means and standard devaitions across different variables.
"""
indexes = []
original_index = sensitivity_results.index
for index in original_index:
if list(index[:4]) in specifications:
indexes.append(index)
sensitivity_results_new = sensitivity_results.loc[indexes, :]
index_table = []
for spec in np.arange(int(len(indexes) / 250)):
temp_index = list(indexes[250 * spec][:5])
for statistic in [["Mean"], ["Standard Deviation"]]:
temp = temp_index.copy()
temp.extend(statistic)
index_table.append(tuple(temp))
index_table = pd.MultiIndex.from_tuples(index_table)
table = | pd.DataFrame(index=index_table, columns=sensitivity_results.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'CT_Viewer.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
import os
from moviepy.editor import ImageSequenceClip
from PyQt5 import QtCore, QtGui, QtWidgets
import pylidc as pl
from matplotlib.patches import Circle
from PIL import Image
import glob
import settings
import helpers
import sys
import glob
import random
import pandas
import ntpath
import cv2
import numpy
from typing import List, Tuple
from keras.optimizers import Adam, SGD
from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, merge, Convolution3D, MaxPooling3D, UpSampling3D, LeakyReLU, BatchNormalization, Flatten, Dense, Dropout, ZeroPadding3D, AveragePooling3D, Activation
from keras.models import Model, load_model, model_from_json
from keras.metrics import binary_accuracy, binary_crossentropy, mean_squared_error, mean_absolute_error
from keras import backend as K
from keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import math
from step3_predict_nodules import *
# limit memory usage..
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import step2_train_nodule_detector
import shutil
import SimpleITK # conda install -c https://conda.anaconda.org/simpleitk SimpleITK
#from bs4 import BeautifulSoup # conda install beautifulsoup4, coda install lxml
from plotting_functions import *
random.seed(1321)
numpy.random.seed(1321)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1681, 1056)
MainWindow.setStyleSheet("background-color: rgb(100, 100,100)")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(310, -10, 240, 51))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(110, 40, 631, 591))
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(400, 700, 121, 61))
self.pushButton_2.setStyleSheet("background-color :rgb(80,80,80)")
self.pushButton_2.setObjectName("pushButton_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(1000, 40, 631, 591))
self.label_3.setText("")
self.label_3.setObjectName("label_3")
self.tableView = QtWidgets.QTableWidget(self.centralwidget)
self.tableView.setGeometry(QtCore.QRect(680, 720, 350, 140))
self.tableView.setObjectName("tableView")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(1140, -10, 240, 51))
self.label_4.setObjectName("label_4")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(1350, 700, 121, 61))
self.pushButton_4.setStyleSheet("background-color :rgb(80,80,80)")
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(1240, 900, 191, 71))
self.pushButton.setStyleSheet("background-color :rgb(80,80,80)")
self.pushButton.setObjectName("pushButton")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(350, 660, 101, 31))
self.label_5.setText("")
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(1300, 660, 101, 31))
self.label_6.setText("")
self.label_6.setObjectName("label_6")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1681, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(280, 700, 121, 61))
self.pushButton_3.setStyleSheet("background-color :rgb(80,80,80)")
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setGeometry(QtCore.QRect(1230, 700, 121, 61))
self.pushButton_5.setStyleSheet("background-color :rgb(80,80,80)")
self.pushButton_5.setObjectName("pushButton_5")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.pushButton.clicked.connect(self.browse_data1)
self.pushButton_4.clicked.connect(self.plot_next_gif_real)
self.pushButton_2.clicked.connect(self.plot_next_gif_pred)
self.pushButton_3.clicked.connect(self.plot_next_frame_pred)
self.pushButton_5.clicked.connect(self.plot_next_frame_real)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", " Predicted Scan Nodules"))
self.label.setFont(QtGui.QFont("Arial", 10))
palette = self.label.palette()
palette.setColor(self.label.foregroundRole(), QtGui.QColor(255,255,255))
self.label.setPalette(palette)
self.pushButton_2.setText(_translate("MainWindow", "Next Nodule"))
self.pushButton_2.setStyleSheet('QPushButton {color: white;}')
self.label_4.setText(_translate("MainWindow", " Real Scan Nodules"))
self.label_4.setPalette(palette)
self.label_4.setFont(QtGui.QFont("Arial", 10))
self.pushButton_4.setText(_translate("MainWindow", "Next Nodule"))
self.pushButton_4.setStyleSheet('QPushButton {color: white;}')
self.pushButton_3.setText(_translate("MainWindow", "Next Frame"))
self.pushButton_5.setText(_translate("MainWindow", "Next Frame"))
self.pushButton_5.setStyleSheet('QPushButton {color: white;}')
self.pushButton_3.setStyleSheet('QPushButton {color: white;}')
self.pushButton.setText(_translate("MainWindow", "Browse/Process CT"))
self.pushButton.setStyleSheet('QPushButton {color: white;}')
def browse_data1(self):
data_path , _=QtWidgets.QFileDialog.getOpenFileName(None,'Open File',r"C:\Users\Ahmed\Desktop\CT_Gui")
self.data_path = data_path
if not os.path.exists('Predicted/{}'.format(self.data_path.split('/')[-1][:-4])):
os.mkdir('Predicted/{}'.format(self.data_path.split('/')[-1][:-4]))
os.mkdir('Real/{}'.format(self.data_path.split('/')[-1][:-4]))
#for magnification in [1, 1.5, 2]:
if True:
#for magnification in [1]:
version = 2
holdout = 0
CONTINUE_JOB = True
only_patient_id = None # "ebd601d40a18634b100c92e7db39f585"
magnification = 1
# predict_cubes("models/model_luna_posnegndsb_v" + str(version) + "__fs_h" + str(holdout) + "_end.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=True, holdout_no=holdout, ext_name="luna_posnegndsb_v" + str(version), fold_count=2)
# if holdout == 0:
self.process_images(self.data_path)
predict_cubes("models/model_luna_posnegndsb_v" + str(version) + "__fs_h" + str(holdout) + "_end.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=False, holdout_no=holdout, ext_name="luna_posnegndsb_v" + str(version), fold_count=2)
#os.copy(self.data_path, 'Luna/luna16_extracted_images')
# if True:
#for magnification in [1]: #
#predict_cubes("models/model_luna16_full__fs_best.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=True, holdout_no=None, ext_name="luna16_fs")
# predict_cubes("models/model_luna16_full__fs_best.hd5", CONTINUE_JOB, only_patient_id=only_patient_id, magnification=magnification, flip=False, train_data=False, holdout_no=None, ext_name="luna16_fs")
#self.predict_cubes(self.data_path[:-4],"models/model_luna16_full__fs_best.hd5", magnification=magnification, holdout_no=None, ext_name="luna16_fs")
radii , centroids,chances,image_array = get_nodules(self.data_path.split('/')[-1])
paths, self.sub_dfs = plot(radii,centroids,image_array,self.data_path.split('/')[-1][:-4],chances = chances)
#print("Done")
self.sub_dfs_gen_pred = (df_ for df_ in self.sub_dfs)
frames = []
self.n_pred = len(paths)
self.current_pred = 0
print('Predicted are',self.n_pred)
for i in range(len(paths)):
frames = []
for path in paths[i]:
frames.append(cv2.imread(path))
print(self.data_path)
print(paths[i])
gif('Predicted/{}/{}_{}.gif'.format(self.data_path.split('/')[-1][:-4],paths[i][0].split('\\')[-1].split('.')[0],paths[i][-1].split('\\')[-1].split('.')[0]),np.array(frames))
self.gif_ls_pred = glob.iglob("Predicted\\{}\\*.gif".format(self.data_path.split('/')[-1][:-4]))
self.plot_next_gif_pred()
# Query for all CT scans with desired traits.
scan = pl.query(pl.Scan).filter(pl.Scan.series_instance_uid == self.data_path.split('/')[-1][:-4]).first()
nods = scan.cluster_annotations()
print("%s has %d nodules." % (scan, len(nods)))
nnods = len(nods)
centroids = [np.array([a.centroid for a in group]).mean(0)
for group in nods]
radii = [np.mean([a.diameter/2 for a in group])
for group in nods]
itk_img = SimpleITK.ReadImage(self.data_path)
img_array = SimpleITK.GetArrayFromImage(itk_img) # indexes are z,y,x (notice the ordering)
paths_real,self.sub_dfs_real = plot(radii,centroids,img_array,self.data_path.split('/')[-1][:-4],real = True)
self.sub_dfs_gen_real = (df_ for df_ in self.sub_dfs_real)
self.n_real = len(paths_real)
self.current_real = 0
for i in range(len(paths_real)):
frames = []
for path in paths_real[i]:
frames.append(cv2.imread(path))
gif('Real\\{}\\{}_{}.gif'.format(self.data_path.split('/')[-1][:-4],paths[i][0].split('\\')[-1].split('.')[0],paths[i][-1].split('\\')[-1].split('.')[0]),np.array(frames))
self.gif_ls_real = glob.iglob("Real\\{}\\*.gif".format(self.data_path.split('/')[-1][:-4]))
self.plot_next_gif_real()
def plot_next_frame_pred(self):
try:
img_path = next(self.frames_pred)
except:
self.frames_pred = glob.iglob("Real\\{}\\{}\\*".format(self.data_path.split('/')[-1][:-4],str(self.current_real)))
img_path = next(self.frames_pred)
print(img_path)
pixmap = QtGui.QPixmap(img_path)
#self.Pic_Label.setPixmap(pixmap)
#image = QtGui.QImage(img, img.shape[1], img.shape[0],img.strides[0], QtGui.QImage.Format_RGB888)
self.label_2.setPixmap(pixmap.scaled(631, 591))
def plot_next_frame_real(self):
try:
img_path = next(self.frames_real)
except:
self.frames_real = glob.iglob("Real\\{}\\{}\\*".format(self.data_path.split('/')[-1][:-4],str(self.current_real)))
img_path = next(self.frames_real)
print(img_path)
pixmap = QtGui.QPixmap(img_path)
#self.Pic_Label.setPixmap(pixmap)
#image = QtGui.QImage(img, img.shape[1], img.shape[0],img.strides[0], QtGui.QImage.Format_RGB888)
self.label_3.setPixmap(pixmap.scaled(631, 591))
def plot_next_gif_pred(self):
try:
movie = QtGui.QMovie(next(self.gif_ls_pred))
self.current_pred += 1
self.frames_pred = glob.iglob("Predicted\\{}\\{}\\*".format(self.data_path.split('/')[-1][:-4],str(self.current_pred)))
print(self.frames_pred)
except:
self.gif_ls_pred = glob.iglob("Predicted\\{}\\*.gif".format(self.data_path.split('/')[-1][:-4]))
self.sub_dfs_gen_pred = (df_ for df_ in self.sub_dfs)
movie = QtGui.QMovie(next(self.gif_ls_pred))
self.current_pred = 1
self.frames_pred = glob.iglob("Predicted\\{}\\{}\\*".format(self.data_path.split('/')[-1][:-4],str(self.current_pred)))
movie.setScaledSize(QtCore.QSize(631, 591))
self.label_2.setMovie(movie)
movie.start()
df_tmp = next(self.sub_dfs_gen_pred)
chance = df_tmp['chances'].values[0]
radius = df_tmp['rad'].values[0]
self.label_5.setText('{}/{}'.format(self.current_pred,self.n_pred))
self.label_5.setFont(QtGui.QFont("Arial", 10))
palette = self.label_5.palette()
palette.setColor(self.label_5.foregroundRole(), QtGui.QColor(255,255,255))
self.label_5.setPalette(palette)
self.tableView.setRowCount(2)
fg = QtGui.QColor(255, 255, 255)
# set column count
self.tableView.setColumnCount(5)
self.tableView.setItem(0,0, QtWidgets.QTableWidgetItem("X"))
self.tableView.setItem(0,1, QtWidgets.QTableWidgetItem("Y"))
self.tableView.setItem(0,2, QtWidgets.QTableWidgetItem("Z"))
self.tableView.setItem(0,3, QtWidgets.QTableWidgetItem("rad"))
self.tableView.setItem(0,4, QtWidgets.QTableWidgetItem("chance"))
self.tableView.setItem(1,0, QtWidgets.QTableWidgetItem(str(round(df_tmp['X'].values[0],2)) + 'mm'))
self.tableView.setItem(1,1, QtWidgets.QTableWidgetItem(str(round(df_tmp['Y'].values[0],2)) + 'mm'))
self.tableView.setItem(1,2, QtWidgets.QTableWidgetItem(str(round(df_tmp['Z'].values[0],2)) + 'mm'))
self.tableView.setItem(1,3, QtWidgets.QTableWidgetItem(str(round(df_tmp['rad'].values[0],2)) + 'mm'))
self.tableView.setItem(1,4, QtWidgets.QTableWidgetItem(str(round(100*df_tmp['chances'].values[0])) +'%'))
self.tableView.resizeRowsToContents()
self.tableView.resizeColumnsToContents()
for i in range(2):
for j in range(5):
self.tableView.item(i,j).setForeground(fg)
#self.textBrowser.setText('This nodule has a probability of {} to be Malignant, and its radius is {}'.format(chance,radius))
def plot_next_gif_real(self):
try:
movie = QtGui.QMovie(next(self.gif_ls_real))
self.current_real += 1
self.frames_real = glob.iglob("Real\\{}\\{}\\*".format(self.data_path.split('/')[-1][:-4],str(self.current_real)))
print(self.frames_real)
except:
self.gif_ls_real = glob.iglob("Real\\{}\\*.gif".format(self.data_path.split('/')[-1][:-4]))
self.sub_dfs_gen_real = (df_ for df_ in self.sub_dfs_real)
movie = QtGui.QMovie(next(self.gif_ls_real))
self.current_real = 1
self.frames_real = glob.iglob("Real\\{}\\{}\\*".format(self.data_path.split('/')[-1][:-4],str(self.current_real)))
movie.setScaledSize(QtCore.QSize(631, 591))
self.label_3.setMovie(movie)
movie.start()
df_tmp = next(self.sub_dfs_gen_real)
chance = df_tmp['chances'].values[0]
radius = df_tmp['rad'].values[0]
print(df_tmp)
self.label_6.setText('{}/{}'.format(self.current_real,self.n_real))
self.label_6.setFont(QtGui.QFont("Arial", 10))
palette = self.label_6.palette()
palette.setColor(self.label_6.foregroundRole(), QtGui.QColor(255,255,255))
self.label_6.setPalette(palette)
def prepare_image_for_net3D(self,img):
img = img.astype(numpy.float32)
img -= MEAN_PIXEL_VALUE
img /= 255.
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2], 1)
return img
def filter_patient_nodules_predictions(self,df_nodule_predictions: pandas.DataFrame, patient_id, view_size):
src_dir = ''
patient_mask = helpers.load_patient_images('Luna\\luna16_extracted_images\\' + patient_id + '\\', src_dir, "*_m.png")
delete_indices = []
for index, row in df_nodule_predictions.iterrows():
z_perc = row["coord_z"]
y_perc = row["coord_y"]
center_x = int(round(row["coord_x"] * patient_mask.shape[2]))
center_y = int(round(y_perc * patient_mask.shape[1]))
center_z = int(round(z_perc * patient_mask.shape[0]))
mal_score = row["diameter_mm"]
start_y = center_y - view_size / 2
start_x = center_x - view_size / 2
nodule_in_mask = False
for z_index in [-1, 0, 1]:
img = patient_mask[z_index + center_z]
start_x = int(start_x)
start_y = int(start_y)
view_size = int(view_size)
img_roi = img[start_y:start_y+view_size, start_x:start_x + view_size]
if img_roi.sum() > 255: # more than 1 pixel of mask.
nodule_in_mask = True
if not nodule_in_mask:
print("Nodule not in mask: ", (center_x, center_y, center_z))
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
else:
if center_z < 30:
print("Z < 30: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
if (z_perc > 0.75 or z_perc < 0.25) and y_perc > 0.85:
print("SUSPICIOUS FALSEPOSITIVE: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if center_z < 50 and y_perc < 0.30:
print("SUSPICIOUS FALSEPOSITIVE OUT OF RANGE: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
df_nodule_predictions.drop(df_nodule_predictions.index[delete_indices], inplace=True)
return df_nodule_predictions
def filter_nodule_predictions(self,only_patient_id=None):
src_dir = settings.LUNA_NODULE_DETECTION_DIR
for csv_index, csv_path in enumerate(glob.glob(src_dir + "*.csv")):
file_name = ntpath.basename(csv_path)
patient_id = file_name.replace(".csv", "")
print(csv_index, ": ", patient_id)
if only_patient_id is not None and patient_id != only_patient_id:
continue
df_nodule_predictions = pandas.read_csv(csv_path)
self.filter_patient_nodules_predictions(df_nodule_predictions, patient_id, CUBE_SIZE)
df_nodule_predictions.to_csv(csv_path, index=False)
def make_negative_train_data_based_on_predicted_luna_nodules():
src_dir = settings.LUNA_NODULE_DETECTION_DIR
pos_labels_dir = settings.LUNA_NODULE_LABELS_DIR
keep_dist = CUBE_SIZE + CUBE_SIZE / 2
total_false_pos = 0
for csv_index, csv_path in enumerate(glob.glob(src_dir + "*.csv")):
file_name = ntpath.basename(csv_path)
patient_id = file_name.replace(".csv", "")
# if not "273525289046256012743471155680" in patient_id:
# continue
df_nodule_predictions = pandas.read_csv(csv_path)
pos_annos_manual = None
manual_path = settings.MANUAL_ANNOTATIONS_LABELS_DIR + patient_id + ".csv"
if os.path.exists(manual_path):
pos_annos_manual = pandas.read_csv(manual_path)
self.filter_patient_nodules_predictions(df_nodule_predictions, patient_id, CUBE_SIZE, luna16=True)
pos_labels = pandas.read_csv(pos_labels_dir + patient_id + "_annos_pos_lidc.csv")
print(csv_index, ": ", patient_id, ", pos", len(pos_labels))
patient_imgs = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*_m.png")
for nod_pred_index, nod_pred_row in df_nodule_predictions.iterrows():
if nod_pred_row["diameter_mm"] < 0:
continue
nx, ny, nz = helpers.percentage_to_pixels(nod_pred_row["coord_x"], nod_pred_row["coord_y"], nod_pred_row["coord_z"], patient_imgs)
diam_mm = nod_pred_row["diameter_mm"]
for label_index, label_row in pos_labels.iterrows():
px, py, pz = helpers.percentage_to_pixels(label_row["coord_x"], label_row["coord_y"], label_row["coord_z"], patient_imgs)
dist = math.sqrt(math.pow(nx - px, 2) + math.pow(ny - py, 2) + math.pow(nz- pz, 2))
if dist < keep_dist:
if diam_mm >= 0:
diam_mm *= -1
df_nodule_predictions.loc[nod_pred_index, "diameter_mm"] = diam_mm
break
if pos_annos_manual is not None:
for index, label_row in pos_annos_manual.iterrows():
px, py, pz = helpers.percentage_to_pixels(label_row["x"], label_row["y"], label_row["z"], patient_imgs)
diameter = label_row["d"] * patient_imgs[0].shape[1]
# print((pos_coord_x, pos_coord_y, pos_coord_z))
# print(center_float_rescaled)
dist = math.sqrt(math.pow(px - nx, 2) + math.pow(py - ny, 2) + math.pow(pz - nz, 2))
if dist < (diameter + 72): # make sure we have a big margin
if diam_mm >= 0:
diam_mm *= -1
df_nodule_predictions.loc[nod_pred_index, "diameter_mm"] = diam_mm
print("#Too close", (nx, ny, nz))
break
df_nodule_predictions.to_csv(csv_path, index=False)
df_nodule_predictions = df_nodule_predictions[df_nodule_predictions["diameter_mm"] >= 0]
df_nodule_predictions.to_csv(pos_labels_dir + patient_id + "_candidates_falsepos.csv", index=False)
total_false_pos += len(df_nodule_predictions)
print("Total false pos:", total_false_pos)
def predict_cubes(self,path,model_path, magnification=1, holdout_no=-1, ext_name="", fold_count=2):
dst_dir = settings.LUNA_NODULE_DETECTION_DIR
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
holdout_ext = ""
dst_dir += "predictions" + str(int(magnification * 10)) + holdout_ext + "_" + ext_name + "/"
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
sw = helpers.Stopwatch.start_new()
model = step2_train_nodule_detector.get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=model_path)
patient_id = path
all_predictions_csv = []
if holdout_no is not None:
patient_fold = helpers.get_patient_fold(patient_id)
patient_fold %= fold_count
print( ": ", patient_id)
csv_target_path = dst_dir + patient_id.split('/')[-1] + ".csv"
print(patient_id)
try:
patient_img = helpers.load_patient_images('Luna\\luna16_extracted_images\\' + patient_id + '\\', '', "*_i.png", [])
except:
print('Please Re-Process the dicom file again')
if magnification != 1:
patient_img = helpers.rescale_patient_images(patient_img, (1, 1, 1), magnification)
patient_mask = helpers.load_patient_images('Luna\\luna16_extracted_images\\' + patient_id + '\\','', "*_m.png", [])
if magnification != 1:
patient_mask = helpers.rescale_patient_images(patient_mask, (1, 1, 1), magnification, is_mask_image=True)
# patient_img = patient_img[:, ::-1, :]
# patient_mask = patient_mask[:, ::-1, :]
step = PREDICT_STEP
CROP_SIZE = CUBE_SIZE
# CROP_SIZE = 48
predict_volume_shape_list = [0, 0, 0]
for dim in range(3):
dim_indent = 0
while dim_indent + CROP_SIZE < patient_img.shape[dim]:
predict_volume_shape_list[dim] += 1
dim_indent += step
predict_volume_shape = (predict_volume_shape_list[0], predict_volume_shape_list[1], predict_volume_shape_list[2])
predict_volume = numpy.zeros(shape=predict_volume_shape, dtype=float)
print("Predict volume shape: ", predict_volume.shape)
done_count = 0
skipped_count = 0
batch_size = 128
batch_list = []
batch_list_coords = []
patient_predictions_csv = []
cube_img = None
annotation_index = 0
for z in range(0, predict_volume_shape[0]):
for y in range(0, predict_volume_shape[1]):
for x in range(0, predict_volume_shape[2]):
#if cube_img is None:
cube_img = patient_img[z * step:z * step+CROP_SIZE, y * step:y * step + CROP_SIZE, x * step:x * step+CROP_SIZE]
cube_mask = patient_mask[z * step:z * step+CROP_SIZE, y * step:y * step + CROP_SIZE, x * step:x * step+CROP_SIZE]
if cube_mask.sum() < 2000:
skipped_count += 1
if CROP_SIZE != CUBE_SIZE:
cube_img = helpers.rescale_patient_images2(cube_img, (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE))
# helpers.save_cube_img("c:/tmp/cube.png", cube_img, 8, 4)
# cube_mask = helpers.rescale_patient_images2(cube_mask, (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE))
img_prep = self.prepare_image_for_net3D(cube_img)
batch_list.append(img_prep)
batch_list_coords.append((z, y, x))
if len(batch_list) % batch_size == 0:
batch_data = numpy.vstack(batch_list)
p = model.predict(batch_data, batch_size=batch_size)
for i in range(len(p[0])):
p_z = batch_list_coords[i][0]
p_y = batch_list_coords[i][1]
p_x = batch_list_coords[i][2]
nodule_chance = p[0][i][0]
predict_volume[p_z, p_y, p_x] = nodule_chance
if nodule_chance > P_TH:
p_z = p_z * step + CROP_SIZE / 2
p_y = p_y * step + CROP_SIZE / 2
p_x = p_x * step + CROP_SIZE / 2
p_z_perc = round(p_z / patient_img.shape[0], 4)
p_y_perc = round(p_y / patient_img.shape[1], 4)
p_x_perc = round(p_x / patient_img.shape[2], 4)
diameter_mm = round(p[1][i][0], 4)
# diameter_perc = round(2 * step / patient_img.shape[2], 4)
diameter_perc = round(2 * step / patient_img.shape[2], 4)
diameter_perc = round(diameter_mm / patient_img.shape[2], 4)
nodule_chance = round(nodule_chance, 4)
patient_predictions_csv_line = [annotation_index, p_x_perc, p_y_perc, p_z_perc, diameter_perc, nodule_chance, diameter_mm]
patient_predictions_csv.append(patient_predictions_csv_line)
all_predictions_csv.append([patient_id] + patient_predictions_csv_line)
annotation_index += 1
batch_list = []
batch_list_coords = []
done_count += 1
if done_count % 10000 == 0:
print("Done: ", done_count, " skipped:", skipped_count)
df = pandas.DataFrame(patient_predictions_csv, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "nodule_chance", "diameter_mm"])
print("Started Filtering")
print(all_predictions_csv)
#print(batch_data)
self.filter_patient_nodules_predictions(df, patient_id, CROP_SIZE * magnification)
df.to_csv(csv_target_path, index=False)
print(predict_volume.mean())
print("Done in : ", sw.get_elapsed_seconds(), " seconds")
def find_mhd_file(self,patient_id):
for subject_no in range(settings.LUNA_SUBSET_START_INDEX, 10):
src_dir = settings.LUNA16_RAW_SRC_DIR + "subset" + str(subject_no) + "/"
for src_path in glob.glob(src_dir + "*.mhd"):
if patient_id in src_path:
return src_path
return None
def load_lidc_xml(self,xml_path, agreement_threshold=0, only_patient=None, save_nodules=False):
pos_lines = []
neg_lines = []
extended_lines = []
with open(xml_path, 'r') as xml_file:
markup = xml_file.read()
xml = BeautifulSoup(markup, features="xml")
if xml.LidcReadMessage is None:
return None, None, None
patient_id = xml.LidcReadMessage.ResponseHeader.SeriesInstanceUid.text
if only_patient is not None:
if only_patient != patient_id:
return None, None, None
src_path = self.find_mhd_file(patient_id)
if src_path is None:
return None, None, None
print(patient_id)
itk_img = SimpleITK.ReadImage(src_path)
img_array = SimpleITK.GetArrayFromImage(itk_img)
num_z, height, width = img_array.shape #heightXwidth constitute the transverse plane
origin = numpy.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
spacing = numpy.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
rescale = spacing / settings.TARGET_VOXEL_MM
reading_sessions = xml.LidcReadMessage.find_all("readingSession")
for reading_session in reading_sessions:
# print("Sesion")
nodules = reading_session.find_all("unblindedReadNodule")
for nodule in nodules:
nodule_id = nodule.noduleID.text
# print(" ", nodule.noduleID)
rois = nodule.find_all("roi")
x_min = y_min = z_min = 999999
x_max = y_max = z_max = -999999
if len(rois) < 2:
continue
for roi in rois:
z_pos = float(roi.imageZposition.text)
z_min = min(z_min, z_pos)
z_max = max(z_max, z_pos)
edge_maps = roi.find_all("edgeMap")
for edge_map in edge_maps:
x = int(edge_map.xCoord.text)
y = int(edge_map.yCoord.text)
x_min = min(x_min, x)
y_min = min(y_min, y)
x_max = max(x_max, x)
y_max = max(y_max, y)
if x_max == x_min:
continue
if y_max == y_min:
continue
x_diameter = x_max - x_min
x_center = x_min + x_diameter / 2
y_diameter = y_max - y_min
y_center = y_min + y_diameter / 2
z_diameter = z_max - z_min
z_center = z_min + z_diameter / 2
z_center -= origin[2]
z_center /= spacing[2]
x_center_perc = round(x_center / img_array.shape[2], 4)
y_center_perc = round(y_center / img_array.shape[1], 4)
z_center_perc = round(z_center / img_array.shape[0], 4)
diameter = max(x_diameter , y_diameter)
diameter_perc = round(max(x_diameter / img_array.shape[2], y_diameter / img_array.shape[1]), 4)
if nodule.characteristics is None:
print("!!!!Nodule:", nodule_id, " has no charecteristics")
continue
if nodule.characteristics.malignancy is None:
print("!!!!Nodule:", nodule_id, " has no malignacy")
continue
malignacy = nodule.characteristics.malignancy.text
sphericiy = nodule.characteristics.sphericity.text
margin = nodule.characteristics.margin.text
spiculation = nodule.characteristics.spiculation.text
texture = nodule.characteristics.texture.text
calcification = nodule.characteristics.calcification.text
internal_structure = nodule.characteristics.internalStructure.text
lobulation = nodule.characteristics.lobulation.text
subtlety = nodule.characteristics.subtlety.text
line = [nodule_id, x_center_perc, y_center_perc, z_center_perc, diameter_perc, malignacy]
extended_line = [patient_id, nodule_id, x_center_perc, y_center_perc, z_center_perc, diameter_perc, malignacy, sphericiy, margin, spiculation, texture, calcification, internal_structure, lobulation, subtlety ]
pos_lines.append(line)
extended_lines.append(extended_line)
nonNodules = reading_session.find_all("nonNodule")
for nonNodule in nonNodules:
z_center = float(nonNodule.imageZposition.text)
z_center -= origin[2]
z_center /= spacing[2]
x_center = int(nonNodule.locus.xCoord.text)
y_center = int(nonNodule.locus.yCoord.text)
nodule_id = nonNodule.nonNoduleID.text
x_center_perc = round(x_center / img_array.shape[2], 4)
y_center_perc = round(y_center / img_array.shape[1], 4)
z_center_perc = round(z_center / img_array.shape[0], 4)
diameter_perc = round(max(6 / img_array.shape[2], 6 / img_array.shape[1]), 4)
# print("Non nodule!", z_center)
line = [nodule_id, x_center_perc, y_center_perc, z_center_perc, diameter_perc, 0]
neg_lines.append(line)
if agreement_threshold > 1:
filtered_lines = []
for pos_line1 in pos_lines:
id1 = pos_line1[0]
x1 = pos_line1[1]
y1 = pos_line1[2]
z1 = pos_line1[3]
d1 = pos_line1[4]
overlaps = 0
for pos_line2 in pos_lines:
id2 = pos_line2[0]
if id1 == id2:
continue
x2 = pos_line2[1]
y2 = pos_line2[2]
z2 = pos_line2[3]
d2 = pos_line1[4]
dist = math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2) + math.pow(z1 - z2, 2))
if dist < d1 or dist < d2:
overlaps += 1
if overlaps >= agreement_threshold:
filtered_lines.append(pos_line1)
# else:
# print("Too few overlaps")
pos_lines = filtered_lines
df_annos = pandas.DataFrame(pos_lines, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "malscore"])
df_annos.to_csv(settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/" + patient_id + "_annos_pos_lidc.csv", index=False)
df_neg_annos = pandas.DataFrame(neg_lines, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "malscore"])
df_neg_annos.to_csv(settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/" + patient_id + "_annos_neg_lidc.csv", index=False)
# return [patient_id, spacing[0], spacing[1], spacing[2]]
return pos_lines, neg_lines, extended_lines
def normalize(self,image):
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image > 1] = 1.
image[image < 0] = 0.
return image
def process_image(self,src_path):
patient_id = ntpath.basename(src_path).replace(".mhd", "")
print("Patient: ", patient_id)
dst_dir = 'Luna\\luna16_extracted_images\\' + patient_id + "\\"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
itk_img = SimpleITK.ReadImage(src_path)
img_array = SimpleITK.GetArrayFromImage(itk_img)
print("Img array: ", img_array.shape)
origin = numpy.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
print("Origin (x,y,z): ", origin)
direction = numpy.array(itk_img.GetDirection()) # x,y,z Origin in world coordinates (mm)
print("Direction: ", direction)
spacing = numpy.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
print("Spacing (x,y,z): ", spacing)
rescale = spacing / settings.TARGET_VOXEL_MM
print("Rescale: ", rescale)
img_array = helpers.rescale_patient_images(img_array, spacing, settings.TARGET_VOXEL_MM)
img_list = []
for i in range(img_array.shape[0]):
img = img_array[i]
seg_img, mask = helpers.get_segmented_lungs(img.copy())
img_list.append(seg_img)
img = self.normalize(img)
cv2.imwrite(dst_dir + "img_" + str(i).rjust(4, '0') + "_i.png", img * 255)
cv2.imwrite(dst_dir + "img_" + str(i).rjust(4, '0') + "_m.png", mask * 255)
def process_pos_annotations_patient(src_path, patient_id):
df_node = pandas.read_csv("resources/luna16_annotations/annotations.csv")
dst_dir = settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
dst_dir = dst_dir + patient_id + "/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
itk_img = SimpleITK.ReadImage(src_path)
img_array = SimpleITK.GetArrayFromImage(itk_img)
print("Img array: ", img_array.shape)
df_patient = df_node[df_node["seriesuid"] == patient_id]
print("Annos: ", len(df_patient))
num_z, height, width = img_array.shape #heightXwidth constitute the transverse plane
origin = numpy.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
print("Origin (x,y,z): ", origin)
spacing = numpy.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
print("Spacing (x,y,z): ", spacing)
rescale = spacing /settings.TARGET_VOXEL_MM
print("Rescale: ", rescale)
direction = numpy.array(itk_img.GetDirection()) # x,y,z Origin in world coordinates (mm)
print("Direction: ", direction)
flip_direction_x = False
flip_direction_y = False
if round(direction[0]) == -1:
origin[0] *= -1
direction[0] = 1
flip_direction_x = True
print("Swappint x origin")
if round(direction[4]) == -1:
origin[1] *= -1
direction[4] = 1
flip_direction_y = True
print("Swappint y origin")
print("Direction: ", direction)
assert abs(sum(direction) - 3) < 0.01
patient_imgs = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*_i.png")
pos_annos = []
df_patient = df_node[df_node["seriesuid"] == patient_id]
anno_index = 0
for index, annotation in df_patient.iterrows():
node_x = annotation["coordX"]
if flip_direction_x:
node_x *= -1
node_y = annotation["coordY"]
if flip_direction_y:
node_y *= -1
node_z = annotation["coordZ"]
diam_mm = annotation["diameter_mm"]
print("Node org (x,y,z,diam): ", (round(node_x, 2), round(node_y, 2), round(node_z, 2), round(diam_mm, 2)))
center_float = numpy.array([node_x, node_y, node_z])
center_int = numpy.rint((center_float-origin) / spacing)
# center_int = numpy.rint((center_float - origin) )
print("Node tra (x,y,z,diam): ", (center_int[0], center_int[1], center_int[2]))
# center_int_rescaled = numpy.rint(((center_float-origin) / spacing) * rescale)
center_float_rescaled = (center_float - origin) / settings.TARGET_VOXEL_MM
center_float_percent = center_float_rescaled / patient_imgs.swapaxes(0, 2).shape
# center_int = numpy.rint((center_float - origin) )
print("Node sca (x,y,z,diam): ", (center_float_rescaled[0], center_float_rescaled[1], center_float_rescaled[2]))
diameter_pixels = diam_mm / settings.TARGET_VOXEL_MM
diameter_percent = diameter_pixels / float(patient_imgs.shape[1])
pos_annos.append([anno_index, round(center_float_percent[0], 4), round(center_float_percent[1], 4), round(center_float_percent[2], 4), round(diameter_percent, 4), 1])
anno_index += 1
df_annos = pandas.DataFrame(pos_annos, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "malscore"])
df_annos.to_csv(settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/" + patient_id + "_annos_pos.csv", index=False)
return [patient_id, spacing[0], spacing[1], spacing[2]]
def process_excluded_annotations_patient(src_path, patient_id):
df_node = pandas.read_csv("resources/luna16_annotations/annotations_excluded.csv")
dst_dir = settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
dst_dir = dst_dir + patient_id + "/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
# pos_annos_df = pandas.read_csv(TRAIN_DIR + "metadata/" + patient_id + "_annos_pos_lidc.csv")
pos_annos_df = pandas.read_csv(settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/" + patient_id + "_annos_pos.csv")
pos_annos_manual = None
manual_path = settings.EXTRA_DATA_DIR + "luna16_manual_labels/" + patient_id + ".csv"
if os.path.exists(manual_path):
pos_annos_manual = pandas.read_csv(manual_path)
dmm = pos_annos_manual["dmm"] # check
itk_img = SimpleITK.ReadImage(src_path)
img_array = SimpleITK.GetArrayFromImage(itk_img)
print("Img array: ", img_array.shape)
df_patient = df_node[df_node["seriesuid"] == patient_id]
print("Annos: ", len(df_patient))
num_z, height, width = img_array.shape #heightXwidth constitute the transverse plane
origin = numpy.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
print("Origin (x,y,z): ", origin)
spacing = numpy.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
print("Spacing (x,y,z): ", spacing)
rescale = spacing / settings.TARGET_VOXEL_MM
print("Rescale: ", rescale)
direction = numpy.array(itk_img.GetDirection()) # x,y,z Origin in world coordinates (mm)
print("Direction: ", direction)
flip_direction_x = False
flip_direction_y = False
if round(direction[0]) == -1:
origin[0] *= -1
direction[0] = 1
flip_direction_x = True
print("Swappint x origin")
if round(direction[4]) == -1:
origin[1] *= -1
direction[4] = 1
flip_direction_y = True
print("Swappint y origin")
print("Direction: ", direction)
assert abs(sum(direction) - 3) < 0.01
patient_imgs = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*_i.png")
neg_annos = []
df_patient = df_node[df_node["seriesuid"] == patient_id]
anno_index = 0
for index, annotation in df_patient.iterrows():
node_x = annotation["coordX"]
if flip_direction_x:
node_x *= -1
node_y = annotation["coordY"]
if flip_direction_y:
node_y *= -1
node_z = annotation["coordZ"]
center_float = numpy.array([node_x, node_y, node_z])
center_int = numpy.rint((center_float-origin) / spacing)
center_float_rescaled = (center_float - origin) / settings.TARGET_VOXEL_MM
center_float_percent = center_float_rescaled / patient_imgs.swapaxes(0, 2).shape
# center_int = numpy.rint((center_float - origin) )
# print("Node sca (x,y,z,diam): ", (center_float_rescaled[0], center_float_rescaled[1], center_float_rescaled[2]))
diameter_pixels = 6 / settings.TARGET_VOXEL_MM
diameter_percent = diameter_pixels / float(patient_imgs.shape[1])
ok = True
for index, row in pos_annos_df.iterrows():
pos_coord_x = row["coord_x"] * patient_imgs.shape[2]
pos_coord_y = row["coord_y"] * patient_imgs.shape[1]
pos_coord_z = row["coord_z"] * patient_imgs.shape[0]
diameter = row["diameter"] * patient_imgs.shape[2]
print((pos_coord_x, pos_coord_y, pos_coord_z))
print(center_float_rescaled)
dist = math.sqrt(math.pow(pos_coord_x - center_float_rescaled[0], 2) + math.pow(pos_coord_y - center_float_rescaled[1], 2) + math.pow(pos_coord_z - center_float_rescaled[2], 2))
if dist < (diameter + 64): # make sure we have a big margin
ok = False
print("################### Too close", center_float_rescaled)
break
if pos_annos_manual is not None and ok:
for index, row in pos_annos_manual.iterrows():
pos_coord_x = row["x"] * patient_imgs.shape[2]
pos_coord_y = row["y"] * patient_imgs.shape[1]
pos_coord_z = row["z"] * patient_imgs.shape[0]
diameter = row["d"] * patient_imgs.shape[2]
print((pos_coord_x, pos_coord_y, pos_coord_z))
print(center_float_rescaled)
dist = math.sqrt(math.pow(pos_coord_x - center_float_rescaled[0], 2) + math.pow(pos_coord_y - center_float_rescaled[1], 2) + math.pow(pos_coord_z - center_float_rescaled[2], 2))
if dist < (diameter + 72): # make sure we have a big margin
ok = False
print("################### Too close", center_float_rescaled)
break
if not ok:
continue
neg_annos.append([anno_index, round(center_float_percent[0], 4), round(center_float_percent[1], 4), round(center_float_percent[2], 4), round(diameter_percent, 4), 1])
anno_index += 1
df_annos = pandas.DataFrame(neg_annos, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "malscore"])
df_annos.to_csv(settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/" + patient_id + "_annos_excluded.csv", index=False)
return [patient_id, spacing[0], spacing[1], spacing[2]]
def process_luna_candidates_patient(src_path, patient_id):
dst_dir = settings.LUNA16_EXTRACTED_IMAGE_DIR + "/_labels/"
img_dir = dst_dir + patient_id + "/"
df_pos_annos = | pandas.read_csv(dst_dir + patient_id + "_annos_pos_lidc.csv") | pandas.read_csv |
import pandas as pd
def find_ms(df):
subset_index = df[['BMI', 'Systolic', 'Diastolic',
'Triglyceride', 'HDL-C', 'Glucose',
'Total Cholesterol', 'Gender']].dropna().index
df = df.ix[subset_index]
df_bmi_lo = df.loc[df['BMI']<25.0,:]
df_bmi_hi = df.loc[df['BMI']>=25.0,:]
df_bmi_hi['TG-s'] = df_bmi_hi['Triglyceride']>=150.0
df_bmi_hi['Gluc-s'] = df_bmi_hi['Glucose'] >= 100.0
df_bmi_hi['Systolic-s'] = df_bmi_hi['Systolic']>=130.0
df_bmi_hi['Diastolic-s'] = df_bmi_hi['Diastolic'] >=85.0
df_bmi_hi['TG-s'] = df_bmi_hi['TG-s'].astype(int)
df_bmi_hi['Gluc-s'] = df_bmi_hi['Gluc-s'].astype(int)
df_bmi_hi['BP-s'] = df_bmi_hi['Systolic-s'] | df_bmi_hi['Diastolic-s']
df_bmi_hi['BP-s'] = df_bmi_hi['BP-s'].astype(int)
male_df_bmi_hi = df_bmi_hi[df_bmi_hi['Gender'] == 1]
female_df_bmi_hi = df_bmi_hi[df_bmi_hi['Gender'] == 2]
male_df_bmi_hi['HDL-C-s'] = male_df_bmi_hi['HDL-C'] < 40.0
female_df_bmi_hi['HDL-C-s'] = female_df_bmi_hi['HDL-C'] < 50.0
female_df_bmi_hi['HDL-C-s'] = female_df_bmi_hi['HDL-C-s'].astype(int)
male_df_bmi_hi['HDL-C-s'] = male_df_bmi_hi['HDL-C-s'].astype(int)
male_df_bmi_hi['MS'] = male_df_bmi_hi[['BP-s', 'Gluc-s',
'TG-s', 'HDL-C-s']].sum(axis=1)
female_df_bmi_hi['MS'] = female_df_bmi_hi[['BP-s', 'Gluc-s',
'TG-s', 'HDL-C-s']].sum(axis=1)
df_bmi_lo['MS'] = 0
return | pd.concat([df_bmi_lo, male_df_bmi_hi, female_df_bmi_hi]) | pandas.concat |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Mining #
# File : \mymain.py #
# Python : 3.9.1 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/Data-Mining/ #
# --------------------------------------------------------------------------- #
# Created : Tuesday, March 9th 2021, 12:24:24 am #
# Last Modified : Tuesday, March 9th 2021, 12:24:24 am #
# Modified By : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2021 nov8.ai #
# =========================================================================== #
# =========================================================================== #
# 1. LIBRARIES #
# =========================================================================== #
#%%
# System and python libraries
from abc import ABC, abstractmethod
import datetime
import glob
import itertools
from joblib import dump, load
import os
import pickle
import time
import uuid
# Manipulating, analyzing and processing data
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy as sp
from scipy.stats.stats import pearsonr, f_oneway
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.preprocessing import OneHotEncoder, PowerTransformer
from category_encoders import TargetEncoder, LeaveOneOutEncoder
# Feature and model selection and evaluation
from sklearn.feature_selection import RFECV, SelectKBest
from sklearn.feature_selection import VarianceThreshold, f_regression
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
# Regression based estimators
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
# Tree-based estimators
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
# Visualizing data
import seaborn as sns
import matplotlib.pyplot as plt
from tabulate import tabulate
# Utilities
from utils import notify, PersistEstimator, comment, print_dict, print_dict_keys
# Data Source
from data import AmesData
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
| pd.set_option('display.width', None) | pandas.set_option |
"""
The SamplesFrame class is an extended Pandas DataFrame, offering additional methods
for validation of hydrochemical data, calculation of relevant ratios and classifications.
"""
import logging
import numpy as np
import pandas as pd
from phreeqpython import PhreeqPython
from hgc.constants import constants
from hgc.constants.constants import mw
@pd.api.extensions.register_dataframe_accessor("hgc")
class SamplesFrame(object):
"""
DataFrame with additional hydrochemistry-specific methods.
All HGC methods and attributes defined in this class are available
in the namespace 'hgc' of the Dataframe.
Examples
--------
To use HGC methods, we always start from a Pandas DataFrame::
import pandas as pd
import hgc
# We start off with an ordinary DataFrame
df = pd.DataFrame({'Cl': [1,2,3], 'Mg': [11,12,13]})
# Since we imported hgc, the HGC-methods become available
# on the DataFrame. This allows for instance to use HGC's
# validation function
df.hgc.is_valid
False
df.hgc.make_valid()
"""
def __init__(self, pandas_obj):
self._obj = pandas_obj
self._pp = PhreeqPython() # bind 1 phreeqpython instance to the dataframe
self._valid_atoms = constants.atoms
self._valid_ions = constants.ions
self._valid_properties = constants.properties
@staticmethod
def _clean_up_phreeqpython_solutions(solutions):
"""
This is a convenience function that removes all
the phreeqpython solution in `solutions` from
memory.
Parameters
----------
solutions : list
python list containing of phreeqpython solutions
"""
_ = [s.forget() for s in solutions]
def _check_validity(self, verbose=True):
"""
Check if the dataframe is a valid HGC dataframe
Notes
-----
Checks are:
1. Are there any columns names in the recognized parameter set?
2. Are there no strings in the recognized columns (except '<' and '>')?
3. Are there negative concentrations in the recognized columns?
"""
obj = self._obj
if verbose:
logging.info("Checking validity of DataFrame for HGC...")
# Define allowed columns that contain concentration values
allowed_concentration_columns = (list(constants.atoms.keys()) +
list(constants.ions.keys()))
hgc_cols = self.hgc_cols
neg_conc_cols = []
invalid_str_cols = []
# Check the columns for (in)valid values
for col in hgc_cols:
# check for only numeric values
if obj[col].dtype in ('object', 'str'):
if not all(obj[col].str.isnumeric()):
invalid_str_cols.append(col)
# check for non-negative concentrations
elif (col in allowed_concentration_columns) and (any(obj[col] < 0)):
neg_conc_cols.append(col)
is_valid = ((len(hgc_cols) > 0) and (len(neg_conc_cols) == 0) and (len(invalid_str_cols) == 0))
if verbose:
logging.info(f"DataFrame contains {len(hgc_cols)} HGC-columns")
if len(hgc_cols) > 0:
logging.info(f"Recognized HGC columns are: {','.join(hgc_cols)}")
logging.info(f'These columns of the dataframe are not used by HGC: {set(obj.columns)-set(hgc_cols)}')
logging.info(f"DataFrame contains {len(neg_conc_cols)} HGC-columns with negative concentrations")
if len(neg_conc_cols) > 0:
logging.info(f"Columns with negative concentrations are: {','.join(neg_conc_cols)}")
logging.info(f"DataFrame contains {len(invalid_str_cols)} HGC-columns with invalid values")
if len(invalid_str_cols) > 0:
logging.info(f"Columns with invalid strings are: {','.join(invalid_str_cols)}. Only '<' and '>' and numeric values are allowed.")
if is_valid:
logging.info("DataFrame is valid")
else:
logging.info("DataFrame is not HGC valid. Use the 'make_valid' method to automatically resolve issues")
return is_valid
@property
def allowed_hgc_columns(self):
""" Returns allowed columns of the hgc SamplesFrame"""
return (list(constants.atoms.keys()) +
list(constants.ions.keys()) +
list(constants.properties.keys()))
@property
def hgc_cols(self):
""" Return the columns that are used by hgc """
return [item for item in self.allowed_hgc_columns if item in self._obj.columns]
@property
def is_valid(self):
""" returns a boolean indicating that the columns used by hgc have
valid values """
is_valid = self._check_validity(verbose=False)
return is_valid
def _make_input_df(self, cols_req):
"""
Make input DataFrame for calculations. This DataFrame contains columns for each required parameter,
which is 0 in case the parameter is not present in original HGC frame. It also
replaces all NaN with 0.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
df_in = pd.DataFrame(columns=cols_req)
for col_req in cols_req:
if col_req in self._obj:
df_in[col_req] = self._obj[col_req]
else:
logging.info(f"Column {col_req} is not present in DataFrame, assuming concentration 0 for this compound for now.")
df_in = df_in.fillna(0.0)
return df_in
def _replace_detection_lim(self, rule="half"):
"""
Substitute detection limits according to one of the available
rules. Cells that contain for example '<0.3' or '> 0.3' will be replaced
with 0.15 and 0.45 respectively (in case of rule "half").
Parameters
----------
rule : str, default 'half'
Can be any of "half" or "at"... Rule "half" replaces cells with detection limit for half of the value.
Rule "at" replaces detection limit cells with the exact value of the detection limit.
"""
for col in self.hgc_cols:
if self._obj[col].dtype in ('object', 'str'):
is_below_dl = self._obj[col].str.contains(pat=r'^[<]\s*\d').fillna(False)
is_above_dl = self._obj[col].str.contains(pat=r'^[>]\s*\d').fillna(False)
if rule == 'half':
self._obj.loc[is_below_dl, col] = self._obj.loc[is_below_dl, col].str.extract(r'(\d+)').astype(np.float64) / 2
self._obj.loc[is_above_dl, col] = self._obj.loc[is_above_dl, col].str.extract(r'(\d+)').astype(np.float64) + \
(self._obj.loc[is_above_dl, col].str.extract(r'(\d+)').astype(np.float64) / 2)
elif rule == 'on':
self._obj[col] = self._obj.loc[col].str.extract(r'(\d+)').astype(np.float64)
def _replace_negative_concentrations(self):
"""
Replace any negative concentrations with 0.
"""
# Get all columns that represent chemical compounds
# Replace negatives with 0
for col in self.hgc_cols:
self._obj.loc[self._obj[col] < 0, col] = 0
def _cast_datatypes(self):
"""
Convert all HGC-columns to their correct data type.
"""
for col in self.hgc_cols:
if self._obj[col].dtype in ('object', 'str'):
self._obj[col] = pd.to_numeric(self._obj[col], errors='coerce')
def consolidate(self, use_ph='field', use_ec='lab', use_so4='ic', use_o2='field',
use_temp='field', use_alkalinity='alkalinity',
merge_on_na=False, inplace=True):
"""
Consolidate parameters measured with different methods to one single parameter.
Parameters such as EC and pH are frequently measured both in the lab and field,
and SO4 and PO4 are frequently measured both by IC and ICP-OES. Normally we prefer the
field data for EC and pH, but ill calibrated sensors or tough field circumstances may
prevent these readings to be superior to the lab measurement. This method allows for quick
selection of the preferred measurement method for each parameter and select that for further analysis.
For each consolidated parameter HGC adds a new column that is either filled with the lab measurements or the field
measurements. It is also possible to fill it with the preferred method, and fill remaining NaN's with
measurements gathered with the other possible method.
Parameters
----------
use_ph : {'lab', 'field', None}, default 'field'
Which pH to use? Ignored if None.
use_ec : {'lab', 'field', None}, default 'lab'
Which EC to use?
use_so4 : {'ic', 'field', None}, default 'ic'
Which SO4 to use?
use_o2 : {'lab', 'field', None}, default 'field'
Which O2 to use?
use_alkalinity: str, default 'alkalinity'
name of the column to use for alkalinity
merge_on_na : bool, default False
Fill NaN's from one measurement method with measurements from other method.
inplace : bool, default True
Modify SamplesFrame in place. inplace=False is not allowed
Raises
------
ValueError: if one of the `use_` parameters is set to a column that is not in the dataframe
*or* if one of the default parameters is not in the dataframe while it is not
set to None.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
if inplace is False:
raise NotImplementedError('inplace=False is not (yet) implemented.')
param_mapping = {
'ph': use_ph,
'ec': use_ec,
'SO4': use_so4,
'O2': use_o2,
'temp': use_temp,
}
if not (use_alkalinity in ['alkalinity', None]):
try:
self._obj['alkalinity'] = self._obj[use_alkalinity]
self._obj.drop(columns=[use_alkalinity], inplace=True)
except KeyError:
raise ValueError(f"Invalid value for argument 'use_alkalinity': " +
f"{use_alkalinity}. It is not a column name of " +
f"the dataframe")
for param, method in param_mapping.items():
if not method:
# user did not specify source, ignore
continue
if not isinstance(method, str):
raise ValueError(f"Invalid method {method} for parameter {param}. Arg should be a string.")
if param in self._obj.columns:
logging.info(f"Parameter {param} already present in DataFrame, ignoring. Remove column manually to enable consolidation.")
continue
source = f"{param}_{method}"
if source in self._obj.columns:
source_val = self._obj[source]
if any(np.isnan(source_val)):
raise ValueError('Nan value for column {source}')
self._obj[param] = np.NaN
self._obj[param].fillna(source_val, inplace=True)
if merge_on_na:
raise NotImplementedError('merge_on_na is True is not implemented (yet).')
# Drop source columns
suffixes = ('_field', '_lab', '_ic')
cols = [param + suffix for suffix in suffixes]
self._obj.drop(columns=cols, inplace=True, errors='ignore')
else:
raise ValueError(f"Column {source} not present in DataFrame. Use " +
f"use_{param.lower()}=None to explicitly ignore consolidating " +
f"this column.")
def get_bex(self, watertype="G", inplace=True):
"""
Get Base Exchange Index (meq/L). By default this is the BEX without dolomite.
Parameters
----------
watertype : {'G', 'P'}, default 'G'
Watertype (Groundwater or Precipitation)
Returns
-------
pandas.Series
Series with for each row in the original.
"""
cols_req = ('Na', 'K', 'Mg', 'Cl')
df = self._make_input_df(cols_req)
df_out = pd.DataFrame()
#TODO: calculate alphas on the fly from SMOW constants
alpha_na = 0.556425145165362 # ratio of Na to Cl in SMOW
alpha_k = 0.0206 # ratio of K to Cl in SMOW
alpha_mg = 0.0667508204998738 # ratio of Mg to Cl in SMOW
only_p_and_t = True
if watertype == "P" and only_p_and_t:
df_out['Na_nonmarine'] = df['Na'] - 1.7972 * alpha_na*df['Na']
df_out['K_nonmarine'] = df['K'] - 1.7972 * alpha_k*df['Na']
df_out['Mg_nonmarine'] = df['Mg'] - 1.7972 * alpha_mg*df['Na']
else:
df_out['Na_nonmarine'] = df['Na'] - alpha_na*df['Cl']
df_out['K_nonmarine'] = df['K'] - alpha_k*df['Cl']
df_out['Mg_nonmarine'] = df['Mg'] - alpha_mg*df['Cl']
df_out['bex'] = df_out['Na_nonmarine']/22.99 + df_out['K_nonmarine']/39.098 + df_out['Mg_nonmarine']/12.153
if inplace:
self._obj['bex'] = df_out['bex']
else:
return df_out['bex']
def get_ratios(self, inplace=True):
"""
Calculate common hydrochemical ratios, will ignore any ratios
in case their constituents are not present in the SamplesFrame.
It is assumed that only HCO<sub>3</sub><sup>-</sup> contributes to
the alkalinity.
Notes
-----
HGC will attempt to calculate the following ratios:
* Cl/Br
* Cl/Na
* Cl/Mg
* Ca/Sr
* Fe/Mn
* HCO3/Ca
* 2H/18O
* SUVA: UVA254/DOC
* HCO3/Sum of anions
* HCO3/Sum of Ca and Mg
* MONC
* COD/DOC
Returns
-------
pandas.DataFrame
DataFrame with computed ratios.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
df_ratios = pd.DataFrame()
ratios = {
'cl_to_br': ['Cl', 'Br'],
'cl_to_na': ['Cl', 'Na'],
'ca_to_mg': ['Cl', 'Mg'],
'ca_to_sr': ['Ca', 'Sr'],
'fe_to_mn': ['Fe', 'Mn'],
'hco3_to_ca': ['alkalinity', 'Ca'],
'2h_to_18o': ['2H', '18O'],
'suva': ['uva254', 'doc'],
'hco3_to_sum_anions': ['alkalinity', 'sum_anions'],
'hco3_to_ca_and_mg': ['alkalinity', 'Ca', 'Mg'],
'monc': ['cod', 'Fe', 'NO2', 'doc'],
'cod_to_doc': ['cod', 'Fe', 'NO2', 'doc']
}
for ratio, constituents in ratios.items():
has_cols = [const in self._obj.columns for const in constituents]
if all(has_cols):
if ratio == 'hco3_to_sum_anions':
df_ratios[ratio] = self._obj['alkalinity'] / self.get_sum_anions(inplace=False)
elif ratio == 'hco3_to_ca_and_mg':
df_ratios[ratio] = self._obj['alkalinity'] / (self._obj['Ca'] + self._obj['Mg'])
elif ratio == 'monc':
df_ratios[ratio] = 4 - 1.5 * (self._obj['cod'] - 0.143 * self._obj['Fe'] - 0.348 * self._obj['NO2']) / (3.95 * self._obj['doc'])
elif ratio == 'cod_to_doc':
df_ratios[ratio] = ((0.2532 * self._obj['cod'] - 0.143 * self._obj['Fe'] - 0.348 * self._obj['NO2']) / 32) / (self._obj['doc'] / 12)
else:
df_ratios[ratio] = self._obj[constituents[0]] / self._obj[constituents[1]]
else:
missing_cols = [i for (i, v) in zip(constituents, has_cols) if not v]
logging.info(f"Cannot calculate ratio {ratio} since columns {','.join(missing_cols)} are not present.")
if inplace:
self._obj[df_ratios.columns] = df_ratios
else:
return df_ratios
def get_stuyfzand_water_type(self, inplace=True):
"""
Get Stuyfzand water type. This water type classification contains
5 components: Salinity, Alkalinity, Dominant Cation, Dominant Anion and Base Exchange Index.
This results in a classification such as for example 'F3CaMix+'.
It is assumed that only HCO<sub>3</sub><sup>-</sup> contributes to
the alkalinity.
Returns
-------
pandas.Series
Series with Stuyfzand water type of each row in original SamplesFrame.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
# Create input dataframe containing all required columns
# Inherit column values from HGC frame, assume 0 if column
# is not present
cols_req = ('Al', 'Ba', 'Br', 'Ca', 'Cl', 'Co', 'Cu', 'doc', 'F', 'Fe', 'alkalinity', 'K', 'Li', 'Mg', 'Mn', 'Na', 'Ni', 'NH4', 'NO2', 'NO3', 'Pb', 'PO4', 'ph', 'SO4', 'Sr', 'Zn')
df_in = self._make_input_df(cols_req)
df_out = pd.DataFrame(index=df_in.index)
# Salinity
df_out['swt_s'] = 'G'
df_out.loc[df_in['Cl'] > 5, 'swt_s'] = 'g'
df_out.loc[df_in['Cl'] > 30, 'swt_s'] = 'F'
df_out.loc[df_in['Cl'] > 150, 'swt_s'] = 'f'
df_out.loc[df_in['Cl'] > 300, 'swt_s'] = 'B'
df_out.loc[df_in['Cl'] > 1000, 'swt_s'] = 'b'
df_out.loc[df_in['Cl'] > 10000, 'swt_s'] = 'S'
df_out.loc[df_in['Cl'] > 20000, 'swt_s'] = 'H'
#Alkalinity
df_out['swt_a'] = '*'
df_out.loc[df_in['alkalinity'] > 31, 'swt_a'] = '0'
df_out.loc[df_in['alkalinity'] > 61, 'swt_a'] = '1'
df_out.loc[df_in['alkalinity'] > 122, 'swt_a'] = '2'
df_out.loc[df_in['alkalinity'] > 244, 'swt_a'] = '3'
df_out.loc[df_in['alkalinity'] > 488, 'swt_a'] = '4'
df_out.loc[df_in['alkalinity'] > 976, 'swt_a'] = '5'
df_out.loc[df_in['alkalinity'] > 1953, 'swt_a'] = '6'
df_out.loc[df_in['alkalinity'] > 3905, 'swt_a'] = '7'
#Dominant cation
s_sum_cations = self.get_sum_cations(inplace=False)
df_out['swt_domcat'] = self._get_dominant_anions_of_df(df_in)
# Dominant anion
s_sum_anions = self.get_sum_anions(inplace=False)
cl_mmol = df_in.Cl/mw('Cl')
hco3_mmol = df_in.alkalinity/(mw('H') + mw('C') + 3*mw('O'))
no3_mmol = df_in.NO3/(mw('N') + 3*mw('O'))
so4_mmol = df_in.SO4/(mw('S') + 4*mw('O'))
# TODO: consider renaming doman to dom_an or dom_anion
is_doman_cl = (cl_mmol > s_sum_anions/2)
df_out.loc[is_doman_cl, 'swt_doman'] = "Cl"
is_doman_hco3 = ~is_doman_cl & (hco3_mmol > s_sum_anions/2)
df_out.loc[is_doman_hco3, 'swt_doman'] = "HCO3"
is_doman_so4_or_no3 = ~is_doman_cl & ~is_doman_hco3 & (2*so4_mmol + no3_mmol > s_sum_anions/2)
is_doman_so4 = (2*so4_mmol > no3_mmol)
df_out.loc[is_doman_so4_or_no3 & is_doman_so4, 'swt_doman'] = "SO4"
df_out.loc[is_doman_so4_or_no3 & ~is_doman_so4, 'swt_doman'] = "NO3"
is_mix = ~is_doman_cl & ~is_doman_hco3 & ~is_doman_so4_or_no3
df_out.loc[is_mix, 'swt_doman'] = "Mix"
# Base Exchange Index
s_bex = self.get_bex(inplace=False)
threshold1 = 0.5 + 0.02*cl_mmol
threshold2 = -0.5-0.02*cl_mmol
is_plus = (s_bex > threshold1) & (s_bex > 1.5*(s_sum_cations-s_sum_anions))
is_minus = ~is_plus & (s_bex < threshold2) & (s_bex < 1.5*(s_sum_cations-s_sum_anions))
is_neutral = (~is_plus & ~is_minus &
(s_bex > threshold2) & (s_bex < threshold1) &
((s_sum_cations == s_sum_anions) |
((abs(s_bex + threshold1*(s_sum_cations-s_sum_anions))/abs(s_sum_cations-s_sum_anions))
> abs(1.5*(s_sum_cations-s_sum_anions)))
)
)
is_none = ~is_plus & ~is_minus & ~is_neutral
df_out.loc[is_plus, 'swt_bex'] = '+'
df_out.loc[is_minus, 'swt_bex'] = '-'
df_out.loc[is_neutral, 'swt_bex'] = 'o'
df_out.loc[is_none, 'swt_bex'] = ''
#Putting it all together
df_out['swt'] = df_out['swt_s'].str.cat(df_out[['swt_a', 'swt_domcat', 'swt_doman', 'swt_bex']])
if inplace:
self._obj['water_type'] = df_out['swt']
else:
return df_out['swt']
def _get_dominant_anions_of_df(self, df_in):
""" calculates the dominant anions of the dataframe df_in """
s_sum_cations = self.get_sum_cations(inplace=False)
cols_req = ('ph', 'Na', 'K', 'Ca', 'Mg', 'Fe', 'Mn', 'NH4', 'Al', 'Ba', 'Co', 'Cu', 'Li', 'Ni', 'Pb', 'Sr', 'Zn')
df_in = df_in.hgc._make_input_df(cols_req)
na_mmol = df_in.Na/mw('Na')
k_mmol = df_in.K/mw('K')
nh4_mmol = df_in.NH4/(mw('N')+4*mw('H'))
ca_mmol = df_in.Ca/mw('Ca')
mg_mmol = df_in.Mg/mw('Mg')
fe_mmol = df_in.Fe/mw('Fe')
mn_mmol = df_in.Mn/mw('Mn')
h_mmol = (10**-df_in.ph) / 1000 # ph -> mol/L -> mmol/L
al_mmol = 1000. * df_in.Al/mw('Al') # ug/L ->mg/L -> mmol/L
# - Na, K, NH4
# select rows that do not have Na, K or NH4 as dominant cation
is_no_domcat_na_nh4_k = (na_mmol + k_mmol + nh4_mmol) < (s_sum_cations/2)
is_domcat_nh4 = ~is_no_domcat_na_nh4_k & (nh4_mmol > (na_mmol + k_mmol))
is_domcat_na = ~is_no_domcat_na_nh4_k & ~is_domcat_nh4 & (na_mmol > k_mmol)
is_domcat_k = ~is_no_domcat_na_nh4_k & ~is_domcat_nh4 & ~is_domcat_na
# abbreviation
is_domcat_na_nh4_k = is_domcat_na | is_domcat_nh4 | is_domcat_k
# - Ca, Mg
is_domcat_ca_mg = (
# not na or nh4 or k dominant
~is_domcat_na_nh4_k & (
# should be any of Ca or Mg available
((ca_mmol > 0) | (mg_mmol > 0)) |
# should be more of Ca or Mg then sum of H, Fe, Al, Mn
# (compensated for charge)
(2*ca_mmol+2*mg_mmol < h_mmol+3*al_mmol+2*fe_mmol+2*mn_mmol)))
is_domcat_ca = is_domcat_ca_mg & (ca_mmol >= mg_mmol)
is_domcat_mg = is_domcat_ca_mg & (ca_mmol < mg_mmol)
# - H, Al, Fe, Mn
# IF(IF(h_mmol+3*IF(al_mmol)>2*(fe_mol+mn_mol),IF(h_mmol>3*al_mmol,"H","Al"),IF(fe_mol>mn_mol,"Fe","Mn")))
is_domcat_fe_mn_al_h = (
# not na, nh4, k, ca or Mg dominant
~is_domcat_na_nh4_k & ~is_domcat_ca & ~is_domcat_mg & (
# should be any of Fe, Mn, Al or H available
(fe_mmol > 0) | (mn_mmol > 0) | (h_mmol > 0) | (al_mmol > 0) # |
# # should be more of Ca or Mg then sum of H, Fe, Al, Mn
# # (compensated for charge)
# (2*ca_mmol+2*mg_mmol < h_mmol+3*al_mmol+2*fe_mmol+2*mn_mmol)
)
)
is_domcat_h_al=is_domcat_fe_mn_al_h & ((h_mmol + 3*al_mmol) > (2*fe_mmol + 2*mn_mmol))
is_domcat_h = is_domcat_h_al & (h_mmol > al_mmol)
is_domcat_al = is_domcat_h_al & (al_mmol > h_mmol)
is_domcat_fe_mn = is_domcat_fe_mn_al_h & ~is_domcat_h_al
is_domcat_fe = is_domcat_fe_mn & (fe_mmol > mn_mmol)
is_domcat_mn = is_domcat_fe_mn & (mn_mmol > fe_mmol)
sr_out = pd.Series(index=df_in.index, dtype='object')
sr_out[:] = ""
sr_out[is_domcat_nh4] = "NH4"
sr_out[is_domcat_na] = "Na"
sr_out[is_domcat_k] = "K"
sr_out[is_domcat_ca] = 'Ca'
sr_out[is_domcat_mg] = 'Mg'
sr_out[is_domcat_fe] = 'Fe'
sr_out[is_domcat_mn] = 'Mn'
sr_out[is_domcat_al] = 'Al'
sr_out[is_domcat_h] = 'H'
return sr_out
def get_dominant_anions(self, inplace=True):
""" returns a series with the dominant anions."""
if inplace:
self._obj['dominant_anion'] = self._get_dominant_anions_of_df(self._obj)
else:
return self._get_dominant_anions_of_df(self._obj)
def fillna_concentrations(self, how="phreeqc"):
"""
Calculate missing concentrations based on the charge balance.
Parameters
----------
how : {'phreeqc', 'analytic'}, default 'phreeqc'
Method to compute missing concentrations.
"""
raise NotImplementedError()
def fillna_ec(self, use_phreeqc=True):
"""
Calculate missing Electrical Conductivity measurements using
known anions and cations.
"""
if use_phreeqc:
# use get_specific_conductance method on
# all N/A rows of EC columns
raise NotImplementedError()
else:
raise NotImplementedError()
def make_valid(self):
"""
Try to convert the DataFrame into a valid HGC-SamplesFrame.
"""
# Conduct conversions here. If they fail, raise error (e.g. when not a single valid column is present)
# Important: order is important, first convert strings to double, then replace negative concentrations
self._replace_detection_lim()
self._cast_datatypes()
self._replace_negative_concentrations()
self._check_validity(verbose=True)
def get_sum_anions(self, inplace=True):
"""
Calculate sum of anions according to the Stuyfzand method.
It is assumed that only HCO<sub>3</sub><sup>-</sup> contributes to
the alkalinity.
Returns
-------
pandas.Series
Series with sum of cations for each row in SamplesFrame.
"""
cols_req = ('Br', 'Cl', 'doc', 'F', 'alkalinity', 'NO2', 'NO3', 'PO4', 'SO4', 'ph')
df_in = self._make_input_df(cols_req)
s_sum_anions = | pd.Series(index=df_in.index,dtype='float64') | pandas.Series |
import tensorflow as tf
import tensorflow_graphics.geometry.transformation as tfg
import numpy as np
import pandas as pd
import random
import datetime
from tensorflow.keras import Input
from typing import Generator, Tuple, Dict
from pandas import DataFrame
from string import Template
from loguru import logger
from app.resources.constants import (
POS_NET_LOSS,
POS_NET_OPTIMIZER,
POS_NET_METRICS
)
from app.core.config import (
POS_NET_EPOCHS,
POS_NET_LEARNING_RATE,
SEED
)
from app.nn_models.nn_position import build_position
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
class PosTrainer(object):
def __init__(self, building_num: int, df: DataFrame, is_reduced: bool = False):
"""Constructor for PosTrainer
Args:
building_num (int): the building number
df (DataFrame): a data fram containing the data points.
is_reduced (bool, optional): used to do training on a reduced sample. Defaults to False.
"""
self.building_num = building_num
if is_reduced:
length = int(df.shape[0] / 12)
self.df = df[:length]
else:
self.df = df
self._preprocess_data(self.df)
def _preprocess_data(self, df: DataFrame):
"""_preprocess_data will take data frame and apply quaternion
rotations to the accelerometer and gyroscope data.
Args:
df (DataFrame): a data frame containing the orientation and position data.
"""
acc = tf.convert_to_tensor(df[["iphoneAccX", "iphoneAccY", "iphoneAccZ"]])
gyro = tf.convert_to_tensor(df[["iphoneGyroX", "iphoneGyroY", "iphoneGyroZ"]])
quats = tf.convert_to_tensor(df[["orientX", "orientY", "orientZ", "orientW"]])
#print(acc[:5,:])
new_acc = tfg.quaternion.rotate(acc, quats).numpy()
new_gyro = tfg.quaternion.rotate(gyro, quats).numpy()
#print(new_acc[:5,:])
new_df_acc = | pd.DataFrame(data=new_acc, columns=["iphoneAccX", "iphoneAccY", "iphoneAccZ"]) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: rz
@email:
"""
#%% imports
import itertools, time, copy
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
import Levenshtein as Lev
from sklearn import metrics
from .etdata import ETData
from .utils import convertToOneHot
#%% setup parameters
#%% code
def calc_k(gt, pr):
'''
Handles error if all samples are from the same class
'''
k = 1. if (gt == pr).all() else metrics.cohen_kappa_score(gt, pr)
return k
def calc_f1(gt, pr):
f1 = 1. if (gt == pr).all() else metrics.f1_score(gt, pr)
return f1
def calc_KE(etdata_gt, etdata_pr):
#calculate event level matches
gt_evt_index = [ind for i, n in enumerate(np.diff(etdata_gt.evt[['s', 'e']]).squeeze()) for ind in itertools.repeat(i, n)]
pr_evt_index = [ind for i, n in enumerate(np.diff(etdata_pr.evt[['s', 'e']]).squeeze()) for ind in itertools.repeat(i, n)]
overlap = np.vstack((gt_evt_index, pr_evt_index)).T
overlap_matrix = [_k + [len(list(_g)), False, False] for _k, _g in itertools.groupby(overlap.tolist())]
overlap_matrix = pd.DataFrame(overlap_matrix, columns=['gt', 'pr', 'l', 'matched', 'selected'])
overlap_matrix['gt_evt'] = etdata_gt.evt.loc[overlap_matrix['gt'], 'evt'].values
overlap_matrix['pr_evt'] = etdata_pr.evt.loc[overlap_matrix['pr'], 'evt'].values
while not(overlap_matrix['matched'].all()):
#select longest overlap
ind = overlap_matrix.loc[~overlap_matrix['matched'], 'l'].argmax()
overlap_matrix.loc[ind, ['selected']]=True
mask_matched = (overlap_matrix['gt']==overlap_matrix.loc[ind, 'gt']).values |\
(overlap_matrix['pr']==overlap_matrix.loc[ind, 'pr']).values
overlap_matrix.loc[mask_matched, 'matched'] = True
overlap_events = overlap_matrix.loc[overlap_matrix['selected'], ['gt', 'pr', 'gt_evt', 'pr_evt']]
#sanity check
evt_gt = etdata_gt.evt.loc[overlap_events['gt'], 'evt']
evt_pr = etdata_pr.evt.loc[overlap_events['pr'], 'evt']
#assert (evt_gt.values == evt_pr.values).all()
#add not matched events
set_gt = set(etdata_gt.evt.index.values) - set(evt_gt.index.values)
set_pr = set(etdata_pr.evt.index.values) - set(evt_pr.index.values)
evt_gt = | pd.concat((evt_gt, etdata_gt.evt.loc[set_gt, 'evt'])) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import argparse
from fastai.vision import *
from tqdm import tqdm
from pathlib import Path
import pandas as pd
import os
import sys
from fastai.callbacks import CSVLogger
# suppress anoying and irrelevant warning, see https://forums.fast.ai/t/warnings-when-trying-to-make-an-imagedatabunch/56323/9
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
parser = argparse.ArgumentParser()
parser.add_argument('--size', dest='size', help='scale images to size', default=256, type=int)
parser.add_argument('--bs', dest='bs', help='batch size', default=32, type=int)
parser.add_argument('--cuda_device', dest='cuda_device', help='cuda device index', default=0, type=int)
parser.add_argument('--confidence', dest='confidence', help='confidence cutoff in percent', default=10, type=int)
parser.add_argument('--model', dest='model', help='model, one of resnet34, resnet50, vgg16', default='resnet34', type=str)
parser.add_argument('--tfms', dest='tfms', help='transformations, one of the presets no, normal, extreme', default='normal', type=str)
parser.add_argument('--loss', dest='loss', help='loss function, one of the presets ce, focal, softdice', default='ce', type=str)
args = parser.parse_args()
our_models = {"resnet34": models.resnet34, "resnet50": models.resnet50, "vgg16": models.vgg16_bn}
our_tfms = {
"no": None,
"normal": get_transforms(do_flip=False,max_rotate=20,max_lighting=.4,max_zoom=1.2),
"extreme": get_transforms(do_flip=True,max_rotate=90,max_lighting=.4,max_zoom=1.2)
}
if args.loss not in ["ce", "focal", "softdice"]:
sys.exit("Unknown loss function")
size = args.size
bs = args.bs
cuda_device = args.cuda_device
confidence_cutoff = args.confidence/100
model = our_models[args.model]
tfms = our_tfms[args.tfms]
name = "noPretrain_{}_{}percent_size{}_{}Tfms_{}Loss".format(args.model,args.confidence,size,args.tfms,args.loss)
torch.cuda.set_device(cuda_device)
os.mkdir(name)
get_y_fn = lambda x: str(x).replace("images", "masks_2class")
imgList = pd.read_csv("nifti/image_list_filtered_score.tsv", sep="\t")
filteredList = imgList[imgList.score<=confidence_cutoff]
src = (SegmentationItemList.from_df(filteredList,path="nifti",cols="file")
.split_from_df(col='is_val')
.label_from_func(get_y_fn, classes=np.array(["background","left_ventricle","myocardium"])))
data = (src.transform(tfms,size=size,padding_mode="zeros",resize_method=ResizeMethod.PAD,tfm_y=True)
.databunch(bs=bs)
.normalize(imagenet_stats))
def acc_seg(input, target):
target = target.squeeze(1)
return (input.argmax(dim=1)==target).float().mean()
def multi_dice(input:Tensor, targs:Tensor, class_id=0, inverse=False)->Rank0Tensor:
n = targs.shape[0]
input = input.argmax(dim=1).view(n,-1)
# replace all with class_id with 1 all else with 0 to have binary case
output = (input == class_id).float()
# same for targs
targs = (targs.view(n,-1) == class_id).float()
if inverse:
output = 1 - output
targs = 1 - targs
intersect = (output * targs).sum(dim=1).float()
union = (output+targs).sum(dim=1).float()
res = 2. * intersect / union
res[torch.isnan(res)] = 1
return res.mean()
dice0inv = partial(multi_dice, class_id=0, inverse=True)
dice1 = partial(multi_dice, class_id=1)
dice2 = partial(multi_dice, class_id=2)
dice0inv.__name__ = 'diceComb'
dice1.__name__ = 'diceLV'
dice2.__name__ = 'diceMY'
class SoftDiceLoss(nn.Module):
'''
WARNING: this implementation does not work in our case, assumes one hot and channel last - need to restructure or re-write
Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions.
Assumes the `channels_last` format.
# Arguments
targets: b x X x Y( x Z...) x c One hot encoding of ground truth
inputs: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax)
epsilon: Used for numerical stability to avoid divide by zero errors
# References
https://www.jeremyjordan.me/semantic-segmentation/ (https://gist.github.com/jeremyjordan/9ea3032a32909f71dd2ab35fe3bacc08#file-soft_dice_loss-py)
V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation
https://arxiv.org/abs/1606.04797
More details on Dice loss formulation
https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72)
Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022
'''
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, inputs, targets):
# skip the batch and class axis for calculating Dice score
print(inputs.shape)
print(inputs)
print(targets.shape)
print(targets)
axes = tuple(range(1, len(inputs.shape)-1))
numerator = 2. * np.sum(inputs * targets, axes)
denominator = np.sum(np.square(inputs) + np.square(targets), axes)
return 1 - np.mean(numerator / (denominator + self.epsilon)) # average over classes and batch
# adjusted from https://forums.fast.ai/t/loss-function-of-unet-learner-flattenedloss-of-crossentropyloss/51605
class FocalLoss(nn.Module):
def __init__(self, gamma=2., reduction='mean'):
super().__init__()
self.gamma = gamma
self.reduction = reduction
def forward(self, inputs, targets):
CE_loss = CrossEntropyFlat(axis=1,reduction='none')(inputs, targets)
pt = torch.exp(-CE_loss)
F_loss = ((1 - pt)**self.gamma) * CE_loss
if self.reduction == 'sum':
return F_loss.sum()
elif self.reduction == 'mean':
return F_loss.mean()
learn = unet_learner(data, model, pretrained=False, metrics=[acc_seg,dice0inv,dice1,dice2], callback_fns=[partial(CSVLogger, append=True, filename="train_log")], path=name)
if args.loss == "focal":
learn.loss_func = FocalLoss()
if args.loss == "softdice":
learn.loss_func = SoftDiceLoss()
learn.unfreeze()
lr_find(learn)
fig = learn.recorder.plot(return_fig=True)
fig.savefig(name+"/lrfind_unfreeze.png")
lr=1e-5
learn.fit_one_cycle(5, lr)
learn.save(name+'-unfreeze-5')
fig = learn.recorder.plot_losses(return_fig=True)
fig.savefig(name+"/unfreeze-5.png")
learn.fit_one_cycle(10, lr)
learn.save(name+'-unfreeze-15')
fig = learn.recorder.plot_losses(return_fig=True)
fig.savefig(name+"/unfreeze-15.png")
learn.fit_one_cycle(15, lr)
learn.save(name+'-unfreeze-30')
fig = learn.recorder.plot_losses(return_fig=True)
fig.savefig(name+"/unfreeze-30.png")
learn.export('model.pkl')
# Make Predictions
fullImgList = | pd.read_csv("nifti/image_list.tsv", sep="\t", header=None, names=["pid","file"]) | pandas.read_csv |
import pandas as pd
import os.path
import datetime
data_path=os.path.dirname(__file__)+'/'
#combined_df = pd.read_csv(data_path+"combined_duty-b2nb_nb2b.csv")
combined_df = pd.read_csv(data_path+"combined_duty-b2b.csv")
#combined_df=combined_df.head(91)
j=1
final_df=pd.DataFrame()
combined_df["pairID"]=""
toggle=True
hour_8=datetime.timedelta(hours=+8)
temp1=""
combined_df['OrgUTC'] = | pd.to_datetime(combined_df['OrgUTC']) | pandas.to_datetime |
import pymongo
from PyQt5 import QtCore
import pandas as pd
import time
from bson.objectid import ObjectId
from nspyre.utils import get_mongo_client
import traceback
class DropEvent():
"""Represents a drop of a collection in a certain database"""
def __init__(self, db, col):
self.db, self.col = db, col
def modify_df(df, change):
# print(change)
if change['operationType'] == 'drop':
return DropEvent(change['ns']['db'], change['ns']['coll']), None
key = change['documentKey']['_id']
if change['operationType'] == 'update':
for k, val in change['updateDescription']['updatedFields'].items():
ks = k.split('.')
if len(ks) == 1:
df.loc[key, k] = val
elif len(ks) == 2:
# TODO: Figure out a more reliable way of doing this
if ks[1].isdigit():
# Assume an array here... Will see if we can get away with this
df.loc[key,ks[0]][int(ks[1])] = val
else:
df.loc[key,ks[0]][ks[1]] = val
else:
raise NotImplementedError('Cannot use a dept of more then 2 in the documents')
elif change['operationType'] == 'insert':
doc = change['fullDocument']
_id = doc.pop('_id')
s = pd.Series(doc, name=_id)
df = df.append(s)
else:
raise NotImplementedError('Cannot modify df with operationType: {}'.format(change['operationType']))
return df, df.loc[key]
class Mongo_Listenner(QtCore.QThread):
"""Qt Thread which monitors for changes to qither a collection or a database and emits a signal when something happens"""
updated = QtCore.pyqtSignal(object)
def __init__(self, db_name, col_name=None, mongodb_addr=None):
super().__init__()
self.db_name = db_name
self.col_name = col_name
self.mongodb_addr = mongodb_addr
self.exit_flag = False
def run(self):
self.exit_flag = False
# Connect
client = get_mongo_client(self.mongodb_addr)
mongo_obj = client[self.db_name]
if not self.col_name is None:
mongo_obj = mongo_obj[self.col_name]
with mongo_obj.watch() as stream:
while stream.alive:
doc = stream.try_next()
if doc is not None:
self.updated.emit(doc)
if self.exit_flag:
return
if not self.exit_flag:
self.run() #This takes care of the invalidate event which stops the change_stream cursor
class Synched_Mongo_Collection(QtCore.QObject):
updated_row = QtCore.pyqtSignal(object) # Emit the updated row
# mutex = QtCore.QMutex()
def __init__(self, db_name, col_name, mongodb_addr=None):
super().__init__()
self.watcher = Mongo_Listenner(db_name, col_name=col_name, mongodb_addr=mongodb_addr)
self.col = get_mongo_client(mongodb_addr)[db_name][col_name]
self.refresh_all()
self.watcher.start()
self.watcher.updated.connect(self._update_df)
def refresh_all(self):
col = list(self.col.find())
if col == []:
self.df = None
else:
self.df = pd.DataFrame(col)
self.df.set_index('_id', inplace=True)
def get_df(self):
# self.mutex.lock()
return self.df
@QtCore.pyqtSlot(object)
def _update_df(self, change):
if self.db is None:
self.refresh_all()
# print(change)
try:
self.df, row = modify_df(self.df, change)
self.updated_row.emit(row)
except:
traceback.print_exc()
print('Refreshing the entire collection')
self.refresh_all()
# self.refresh_all() #I will make this a little more efficient later on
def __del__(self):
self.watcher.exit_flag = True
class Synched_Mongo_Database(QtCore.QObject):
updated_row = QtCore.pyqtSignal(object, object) # Emit the updated row in the format (col_name, row)
col_added = QtCore.pyqtSignal(object) # Emit the name of the collection which was added
col_dropped = QtCore.pyqtSignal(object) # Emit the name of the collection which was dropped
db_dropped = QtCore.pyqtSignal() #Emitted when the database is dropped
def __init__(self, db_name, mongodb_addr=None):
super().__init__()
self.watcher = Mongo_Listenner(db_name, col_name=None, mongodb_addr=mongodb_addr)
self.db = get_mongo_client(mongodb_addr)[db_name]
self.refresh_all()
self.watcher.start()
self.watcher.updated.connect(self._update)
def refresh_all(self):
self.dfs = dict()
for col in self.db.list_collection_names():
col_data = list(self.db[col].find())
if not col_data == []:
self.dfs[col] = pd.DataFrame(col_data)
self.dfs[col].set_index('_id', inplace=True)
def get_df(self, col_name, timeout=0.1):
try:
if not self.dfs[col_name] is None:
return self.dfs[col_name]
finally:
t = time.time()
while time.time()-t<timeout:
if col_name in self.dfs:
return self.dfs[col_name]
@QtCore.pyqtSlot(object)
def _update(self, change):
# print(change)
try:
if change['operationType'] == 'dropDatabase':
self.dfs = dict()
self.db_dropped.emit()
return
elif change['operationType'] == 'invalidate':
return
col = change['ns']['coll']
if col in self.dfs:
df, row = modify_df(self.dfs[col], change)
if isinstance(df, DropEvent):
self.dfs.pop(col)
self.col_dropped.emit(col)
return
self.dfs[col] = df
self.updated_row.emit(col, row)
else:
doc = change['fullDocument']
row = | pd.Series(doc) | pandas.Series |
# -*- coding: utf-8 -*-
"""Primary wepy simulation database driver and access API using the
HDF5 format.
The HDF5 Format Specification
=============================
As part of the wepy framework this module provides a fully-featured
API for creating and accessing data generated in weighted ensemble
simulations run with wepy.
The need for a special purpose format is many-fold but primarily it is
the nonlinear branching structure of walker trajectories coupled with
weights.
That is for standard simulations data is organized as independent
linear trajectories of frames each related linearly to the one before
it and after it.
In weighted ensemble due to the resampling (i.e. cloning and merging)
of walkers, a single frame may have multiple 'child' frames.
This is the primary motivation for this format.
However, in practice it solves several other issues and itself is a
more general and flexible format than for just weighted ensemble
simulations.
Concretely the WepyHDF5 format is simply an informally described
schema that is commensurable with the HDF5 constructs of hierarchical
groups (similar to unix filesystem directories) arranged as a tree
with datasets as the leaves.
The hierarchy is fairly deep and so we will progress downwards from
the top and describe each broad section in turn breaking it down when
necessary.
Header
------
The items right under the root of the tree are:
- runs
- topology
- _settings
The first item 'runs' is itself a group that contains all of the
primary data from simulations. In WepyHDF5 the run is the unit
dataset. All data internal to a run is self contained. That is for
multiple dependent trajectories (e.g. from cloning and merging) all
exist within a single run.
This excludes metadata-like things that may be needed for interpreting
this data, such as the molecular topology that imposes structure over
a frame of atom positions. This information is placed in the
'topology' item.
The topology field has no specified internal structure at this
time. However, with the current implementation of the WepyHDF5Reporter
(which is the principal implementation of generating a WepyHDF5
object/file from simulations) this is simply a string dataset. This
string dataset should be a JSON compliant string. The format of which
is specified elsewhere and was borrowed from the mdtraj library.
Warning! this format and specification for the topology is subject to
change in the future and will likely be kept unspecified indefinitely.
For most intents and purposes (which we assume to be for molecular or
molecular-like simulations) the 'topology' item (and perhaps any other
item at the top level other than those proceeded by and underscore,
such as in the '_settings' item) is merely useful metadata that
applies to ALL runs and is not dynamical.
In the language of the orchestration module all data in 'runs' uses
the same 'apparatus' which is the function that takes in the initial
conditions for walkers and produces new walkers. The apparatus may
differ in the specific values of parameters but not in kind. This is
to facilitate runs that are continuations of other runs. For these
kinds of simulations the state of the resampler, boundary conditions,
etc. will not be as they were initially but are the same in kind or
type.
All of the necessary type information of data in runs is kept in the
'_settings' group. This is used to serialize information about the
data types, shapes, run to run continuations etc. This allows for the
initialization of an empty (no runs) WepyHDF5 database at one time and
filling of data at another time. Otherwise types of datasets would
have to be inferred from the data itself, which may not exist yet.
As a convention items which are preceeded by an underscore (following
the python convention) are to be considered hidden and mechanical to
the proper functioning of various WepyHDF5 API features, such as
sparse trajectory fields.
The '_settings' is specified as a simple key-value structure, however
values may be arbitrarily complex.
Runs
----
The meat of the format is contained within the runs group:
- runs
- 0
- 1
- 2
- ...
Under the runs group are a series of groups for each run. Runs are
named according to the order in which they were added to the database.
Within a run (say '0' from above) we have a number of items:
- 0
- init_walkers
- trajectories
- decision
- resampling
- resampler
- warping
- progress
- boundary_conditions
Trajectories
^^^^^^^^^^^^
The 'trajectories' group is where the data for the frames of the
walker trajectories is stored.
Even though the tree-like trajectories of weighted ensemble data may
be well suited to having a tree-like storage topology we have opted to
use something more familiar to the field, and have used a collection
of linear "trajectories".
This way of breaking up the trajectory data coupled with proper
records of resampling (see below) allows for the imposition of a tree
structure without committing to that as the data storage topology.
This allows the WepyHDF5 format to be easily used as a container
format for collections of linear trajectories. While this is not
supported in any real capacity it is one small step to convergence. We
feel that a format that contains multiple trajectories is important
for situations like weighted ensemble where trajectories are
interdependent. The transition to a storage format like HDF5 however
opens up many possibilities for new features for trajectories that
have not occurred despite several attempts to forge new formats based
on HDF5 (TODO: get references right; see work in mdtraj and MDHDF5).
Perhaps these formats have not caught on because the existing formats
(e.g. XTC, DCD) for simple linear trajectories are good enough and
there is little motivation to migrate.
However, by making the WepyHDF5 format (and related sub-formats to be
described e.g. record groups and the trajectory format) both cover a
new use case which can't be achieved with old formats and old ones
with ease.
Once users see the power of using a format like HDF5 from using wepy
they may continue to use it for simpler simulations.
In any case the 'trajectories' in the group for weighted ensemble
simulations should be thought of only as containers and not literally
as trajectories. That is frame 4 does not necessarily follow from
frame 3. So one may think of them more as "lanes" or "slots" for
trajectory data that needs to be stitched together with the
appropriate resampling records.
The routines and methods for generating contiguous trajectories from
the data in WepyHDF5 are given through the 'analysis' module, which
generates "traces" through the dataset.
With this in mind we will describe the sub-format of a trajectory now.
The 'trajectories' group is similar to the 'runs' group in that it has
sub-groups whose names are numbers. These numbers however are not the
order in which they are created but an index of that trajectory which
are typically laid out all at once.
For a wepy simulation with a constant number of walkers you will only
ever need as many trajectories/slots as there are walkers. So if you
have 8 walkers then you will have trajectories 0 through 7. Concretely:
- runs
- 0
- trajectories
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
If we look at trajectory 0 we might see the following groups within:
- positions
- box_vectors
- velocities
- weights
Which is what you would expect for a constant pressure molecular
dynamics simulation where you have the positions of the atoms, the box
size, and velocities of the atoms.
The particulars for what "fields" a trajectory in general has are not
important but this important use-case is directly supported in the
WepyHDF5 format.
In any such simulation, however, the 'weights' field will appear since
this is the weight of the walker of this frame and is a value
important to weighted ensemble and not the underlying dynamics.
The naive approach to these fields is that each is a dataset of
dimension (n_frames, feature_vector_shape[0], ...) where the first dimension
is the cycle_idx and the rest of the dimensions are determined by the
atomic feature vector for each field for a single frame.
For example, the positions for a molecular simulation with 100 atoms
with x, y, and z coordinates that ran for 1000 cycles would be a
dataset of the shape (1000, 100, 3). Similarly the box vectors would
be (1000, 3, 3) and the weights would be (1000, 1).
This uniformity vastly simplifies accessing and adding new variables
and requires that individual state values in walkers always be arrays
with shapes, even when they are single values (e.g. energy). The
exception being the weight which is handled separately.
However, this situation is actually more complex to allow for special
features.
First of all is the presence of compound fields which allow nesting of
multiple groups.
The above "trajectory fields" would have identifiers such as the
literal strings 'positions' and 'box_vectors', while a compound field
would have an identifier 'observables/rmsd' or 'alt_reps/binding_site'.
Use of trajectory field names using the '/' path separator will
automatically make a field a group and the last element of the field
name the dataset. So for the observables example we might have:
- 0
- observables
- rmsd
- sasa
Where the rmsd would be accessed as a trajectory field of trajectory 0
as 'observables/rmsd' and the solvent accessible surface area as
'observables/sasa'.
This example introduces how the WepyHDF5 format is not only useful for
storing data produced by simulation but also in the analysis of that
data and computation of by-frame quantities.
The 'observables' compound group key prefix is special and will be
used in the 'compute_observables' method.
The other special compound group key prefix is 'alt_reps' which is
used for particle simulations to store "alternate representation" of
the positions. This is useful in cooperation with the next feature of
wepy trajectory fields to allow for more economical storage of data.
The next feature (and complication of the format) is the allowance for
sparse fields. As the fields were introduced we said that they should
have as many feature vectors as there are frames for the
simulation. In the example however, you will notice that storing both
the full atomic positions and velocities for a long simulation
requires a heavy storage burden.
So perhaps you only want to store the velocities (or forces) every 100
frames so that you can be able to restart a simulation form midway
through the simulation. This is achieved through sparse fields.
A sparse field is no longer a dataset but a group with two items:
- _sparse_idxs
- data
The '_sparse_idxs' are simply a dataset of integers that assign each
element of the 'data' dataset to a frame index. Using the above
example we run a simulation for 1000 frames with 100 atoms and we save
the velocities every 100 frames we would have a 'velocities/data'
dataset of shape (100, 100, 3) which is 10 times less data than if it
were saved every frame.
While this complicates the storage format use of the proper API
methods should be transparent whether you are returning a sparse field
or not.
As alluded to above the use of sparse fields can be used for more than
just accessory fields. In many simulations, such as those with full
atomistic simulations of proteins in solvent we often don't care about
the dynamics of most of the atoms in the simulation and so would like
to not have to save them.
The 'alt_reps' compound field is meant to solve this. For example, the
WepyHDF5Reporter supports a special option to save only a subset of
the atoms in the main 'positions' field but also to save the full
atomic system as an alternate representation, which is the field name
'alt_reps/all_atoms'. So that you can still save the full system every
once in a while but be economical in what positions you save every
single frame.
Note that there really isn't a way to achieve this with other
formats. You either make a completely new trajectory with only the
atoms of interest and now you are duplicating those in two places, or
you duplicate and then filter your full systems trajectory file and
rely on some sort of index to always live with it in the filesystem,
which is a very precarious scenario. The situation is particularly
hopeless for weighted ensemble trajectories.
Init Walkers
^^^^^^^^^^^^
The data stored in the 'trajectories' section is the data that is
returned after running dynamics in a cycle. Since we view the WepyHDF5
as a completely self-contained format for simulations it seems
negligent to rely on outside sources (such as the filesystem) for the
initial structures that seeded the simulations. These states (and
weights) can be stored in this group.
The format of this group is identical to the one for trajectories
except that there is only one frame for each slot and so the shape of
the datasets for each field is just the shape of the feature vector.
Record Groups
^^^^^^^^^^^^^
TODO: add reference to reference groups
The last five items are what are called 'record groups' and all follow
the same format.
Each record group contains itself a number of datasets, where the
names of the datasets correspond to the 'field names' from the record
group specification. So each record groups is simply a key-value store
where the values must be datasets.
For instance the fields in the 'resampling' (which is particularly
important as it encodes the branching structure) record group for a
WExplore resampler simulation are:
- step_idx
- walker_idx
- decision_id
- target_idxs
- region_assignment
Where the 'step_idx' is an integer specifying which step of resampling
within the cycle the resampling action took place (the cycle index is
metadata for the group). The 'walker_idx' is the index of the walker
that this action was assigned to. The 'decision_id' is an integer that
is related to an enumeration of decision types that encodes which
discrete action is to be taken for this resampling event (the
enumeration is in the 'decision' item of the run groups). The
'target_idxs' is a variable length 1-D array of integers which assigns
the results of the action to specific target 'slots' (which was
discussed for the 'trajectories' run group). And the
'region_assignment' is specific to WExplore which reports on which
region the walker was in at that time, and is a variable length 1-D
array of integers.
Additionally, record groups are broken into two types:
- continual
- sporadic
Continual records occur once per cycle and so there is no extra
indexing necessary.
Sporadic records can happen multiple or zero times per cycle and so
require a special index for them which is contained in the extra
dataset '_cycle_idxs'.
It is worth noting that the underlying methods for each record group
are general. So while these are the official wepy record groups that
are supported if there is a use-case that demands a new record group
it is a fairly straightforward task from a developers perspective.
"""
import os.path as osp
from collections import Sequence, namedtuple, defaultdict, Counter
import itertools as it
import json
from warnings import warn
from copy import copy
import logging
import gc
import numpy as np
import h5py
import networkx as nx
from wepy.analysis.parents import resampling_panel
from wepy.util.mdtraj import mdtraj_to_json_topology, json_to_mdtraj_topology, \
traj_fields_to_mdtraj
from wepy.util.util import traj_box_vectors_to_lengths_angles
from wepy.util.json_top import json_top_subset, json_top_atom_count
# optional dependencies
try:
import mdtraj as mdj
except ModuleNotFoundError:
warn("mdtraj is not installed and that functionality will not work", RuntimeWarning)
try:
import pandas as pd
except ModuleNotFoundError:
warn("pandas is not installed and that functionality will not work", RuntimeWarning)
## h5py settings
# we set the libver to always be the latest (which should be 1.10) so
# that we know we can always use SWMR and the newest features. We
# don't care about backwards compatibility with HDF5 1.8. Just update
# in a new virtualenv if this is a problem for you
H5PY_LIBVER = 'latest'
## Header and settings keywords
TOPOLOGY = 'topology'
"""Default header apparatus dataset. The molecular topology dataset."""
SETTINGS = '_settings'
"""Name of the settings group in the header group."""
RUNS = 'runs'
"""The group name for runs."""
## metadata fields
RUN_IDX = 'run_idx'
"""Metadata field for run groups for the run index within this file."""
RUN_START_SNAPSHOT_HASH = 'start_snapshot_hash'
"""Metadata field for a run that corresponds to the hash of the
starting simulation snapshot in orchestration."""
RUN_END_SNAPSHOT_HASH = 'end_snapshot_hash'
"""Metadata field for a run that corresponds to the hash of the
ending simulation snapshot in orchestration."""
TRAJ_IDX = 'traj_idx'
"""Metadata field for trajectory groups for the trajectory index in that run."""
## Misc. Names
CYCLE_IDX = 'cycle_idx'
"""String for setting the names of cycle indices in records and
miscellaneous situations."""
## Settings field names
SPARSE_FIELDS = 'sparse_fields'
"""Settings field name for sparse field trajectory field flags."""
N_ATOMS = 'n_atoms'
"""Settings field name group for the number of atoms in the default positions field."""
N_DIMS_STR = 'n_dims'
"""Settings field name for positions field spatial dimensions."""
MAIN_REP_IDXS = 'main_rep_idxs'
"""Settings field name for the indices of the full apparatus topology in
the default positions trajectory field."""
ALT_REPS_IDXS = 'alt_reps_idxs'
"""Settings field name for the different 'alt_reps'. The indices of
the atoms from the full apparatus topology for each."""
FIELD_FEATURE_SHAPES_STR = 'field_feature_shapes'
"""Settings field name for the trajectory field shapes."""
FIELD_FEATURE_DTYPES_STR = 'field_feature_dtypes'
"""Settings field name for the trajectory field data types."""
UNITS = 'units'
"""Settings field name for the units of the trajectory fields."""
RECORD_FIELDS = 'record_fields'
"""Settings field name for the record fields that are to be included
in the truncated listing of record group fields."""
CONTINUATIONS = 'continuations'
"""Settings field name for the continuations relationships between runs."""
## Run Fields Names
TRAJECTORIES = 'trajectories'
"""Run field name for the trajectories group."""
INIT_WALKERS = 'init_walkers'
"""Run field name for the initial walkers group."""
DECISION = 'decision'
"""Run field name for the decision enumeration group."""
## Record Groups Names
RESAMPLING = 'resampling'
"""Record group run field name for the resampling records """
RESAMPLER = 'resampler'
"""Record group run field name for the resampler records """
WARPING = 'warping'
"""Record group run field name for the warping records """
PROGRESS = 'progress'
"""Record group run field name for the progress records """
BC = 'boundary_conditions'
"""Record group run field name for the boundary conditions records """
## Record groups constants
# special datatypes strings
NONE_STR = 'None'
"""String signifying a field of unspecified shape. Used for
serializing the None python object."""
CYCLE_IDXS = '_cycle_idxs'
"""Group name for the cycle indices of sporadic records."""
# records can be sporadic or continual. Continual records are
# generated every cycle and are saved every cycle and are for all
# walkers. Sporadic records are generated conditional on specific
# events taking place and thus may or may not be produced each
# cycle. There also is not a single record for each (cycle, step) like
# there would be for continual ones because they can occur for single
# walkers, boundary conditions, or resamplers.
SPORADIC_RECORDS = (RESAMPLER, WARPING, RESAMPLING, BC)
"""Enumeration of the record groups that are sporadic."""
## Trajectories Group
# Default Trajectory Constants
N_DIMS = 3
"""Number of dimensions for the default positions."""
# Required Trajectory Fields
WEIGHTS = 'weights'
"""The field name for the frame weights."""
# default fields for trajectories
POSITIONS = 'positions'
"""The field name for the default positions."""
BOX_VECTORS = 'box_vectors'
"""The field name for the default box vectors."""
VELOCITIES = 'velocities'
"""The field name for the default velocities."""
FORCES = 'forces'
"""The field name for the default forces."""
TIME = 'time'
"""The field name for the default time."""
KINETIC_ENERGY = 'kinetic_energy'
"""The field name for the default kinetic energy."""
POTENTIAL_ENERGY = 'potential_energy'
"""The field name for the default potential energy."""
BOX_VOLUME = 'box_volume'
"""The field name for the default box volume."""
PARAMETERS = 'parameters'
"""The field name for the default parameters."""
PARAMETER_DERIVATIVES = 'parameter_derivatives'
"""The field name for the default parameter derivatives."""
ALT_REPS = 'alt_reps'
"""The field name for the default compound field observables."""
OBSERVABLES = 'observables'
"""The field name for the default compound field observables."""
## Trajectory Field Constants
WEIGHT_SHAPE = (1,)
"""Weights feature vector shape."""
WEIGHT_DTYPE = np.float
"""Weights feature vector data type."""
# Default Trajectory Field Constants
FIELD_FEATURE_SHAPES = ((TIME, (1,)),
(BOX_VECTORS, (3,3)),
(BOX_VOLUME, (1,)),
(KINETIC_ENERGY, (1,)),
(POTENTIAL_ENERGY, (1,)),
)
"""Default shapes for the default fields."""
FIELD_FEATURE_DTYPES = ((POSITIONS, np.float),
(VELOCITIES, np.float),
(FORCES, np.float),
(TIME, np.float),
(BOX_VECTORS, np.float),
(BOX_VOLUME, np.float),
(KINETIC_ENERGY, np.float),
(POTENTIAL_ENERGY, np.float),
)
"""Default data types for the default fields."""
# Positions (and thus velocities and forces) are determined by the
# N_DIMS (which can be customized) and more importantly the number of
# particles which is always different. All the others are always wacky
# and different.
POSITIONS_LIKE_FIELDS = (VELOCITIES, FORCES)
"""Default trajectory fields which are the same shape as the main positions field."""
## Trajectory field features keys
# sparse trajectory fields
DATA = 'data'
"""Name of the dataset in sparse trajectory fields."""
SPARSE_IDXS = '_sparse_idxs'
"""Name of the dataset that indexes sparse trajectory fields."""
# utility for paths
def _iter_field_paths(grp):
"""Return all subgroup field name paths from a group.
Useful for compound fields. For example if you have the group
observables with multiple subfields:
- observables
- rmsd
- sasa
Passing the h5py group 'observables' will return the full field
names for each subfield:
- 'observables/rmsd'
- 'observables/sasa'
Parameters
----------
grp : h5py.Group
The group to enumerate subfield names for.
Returns
-------
subfield_names : list of str
The full names for the subfields of the group.
"""
field_paths = []
for field_name in grp:
if isinstance(grp[field_name], h5py.Group):
for subfield in grp[field_name]:
# if it is a sparse field don't do the subfields since
# they will be _sparse_idxs and data which are not
# what we want here
if field_name not in grp.file['_settings/sparse_fields']:
field_paths.append(field_name + '/' + subfield)
else:
field_paths.append(field_name)
return field_paths
class WepyHDF5(object):
"""Wrapper for h5py interface to an HDF5 file object for creation and
access of WepyHDF5 data.
This is the primary implementation of the API for creating,
accessing, and modifying data in an HDF5 file that conforms to the
WepyHDF5 specification.
"""
MODES = ('r', 'r+', 'w', 'w-', 'x', 'a')
"""The recognized modes for opening the WepyHDF5 file."""
WRITE_MODES = ('r+', 'w', 'w-', 'x', 'a')
#### dunder methods
def __init__(self, filename, mode='x',
topology=None,
units=None,
sparse_fields=None,
feature_shapes=None, feature_dtypes=None,
n_dims=None,
alt_reps=None, main_rep_idxs=None,
swmr_mode=False,
expert_mode=False
):
"""Constructor for the WepyHDF5 class.
Initialize a new Wepy HDF5 file. This will create an h5py.File
object.
The File will be closed after construction by default.
mode:
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
x or w- Create file, fail if exists
a Read/write if exists, create otherwise
Parameters
----------
filename : str
File path
mode : str
Mode specification for opening the HDF5 file.
topology : str
JSON string representing topology of system being simulated.
units : dict of str : str, optional
Mapping of trajectory field names to string specs
for units.
sparse_fields : list of str, optional
List of trajectory fields that should be initialized as sparse.
feature_shapes : dict of str : shape_spec, optional
Mapping of trajectory fields to their shape spec for initialization.
feature_dtypes : dict of str : dtype_spec, optional
Mapping of trajectory fields to their shape spec for initialization.
n_dims : int, default: 3
Set the number of spatial dimensions for the default
positions trajectory field.
alt_reps : dict of str : list of int, optional
Specifies that there will be 'alt_reps' of positions each
named by the keys of this mapping and containing the
indices in each value list.
main_rep_idxs : list of int, optional
The indices of atom positions to save as the main 'positions'
trajectory field. Defaults to all atoms.
expert_mode : bool
If True no initialization is performed other than the
setting of the filename. Useful mainly for debugging.
Raises
------
AssertionError
If the mode is not one of the supported mode specs.
AssertionError
If a topology is not given for a creation mode.
Warns
-----
If initialization data was given but the file was opened in a read mode.
"""
self._filename = filename
self._swmr_mode = swmr_mode
if expert_mode is True:
self._h5 = None
self._wepy_mode = None
self._h5py_mode = None
self.closed = None
# terminate the constructor here
return None
assert mode in self.MODES, \
"mode must be either one of: {}".format(', '.join(self.MODES))
# the top level mode enforced by wepy.hdf5
self._wepy_mode = mode
# the lower level h5py mode. THis was originally different to
# accomodate different modes at teh wepy level for
# concatenation. I will leave these separate because this is
# used elsewhere and could be a feature in the future.
self._h5py_mode = mode
# Temporary metadata: used to initialize the object but not
# used after that
self._topology = topology
self._units = units
self._n_dims = n_dims
self._n_coords = None
# set hidden feature shapes and dtype, which are only
# referenced if needed when trajectories are created. These
# will be saved in the settings section in the actual HDF5
# file
self._field_feature_shapes_kwarg = feature_shapes
self._field_feature_dtypes_kwarg = feature_dtypes
self._field_feature_dtypes = None
self._field_feature_shapes = None
# save the sparse fields as a private variable for use in the
# create constructor
if sparse_fields is None:
self._sparse_fields = []
else:
self._sparse_fields = sparse_fields
# if we specify an atom subset of the main POSITIONS field
# we must save them
self._main_rep_idxs = main_rep_idxs
# a dictionary specifying other alt_reps to be saved
if alt_reps is not None:
self._alt_reps = alt_reps
# all alt_reps are sparse
alt_rep_keys = ['{}/{}'.format(ALT_REPS, key) for key in self._alt_reps.keys()]
self._sparse_fields.extend(alt_rep_keys)
else:
self._alt_reps = {}
# open the file and then run the different constructors based
# on the mode
with h5py.File(filename, mode=self._h5py_mode,
libver=H5PY_LIBVER, swmr=self._swmr_mode) as h5:
self._h5 = h5
# set SWMR mode if asked for if we are in write mode also
if self._swmr_mode is True and mode in self.WRITE_MODES:
self._h5.swmr_mode = swmr_mode
# create file mode: 'w' will create a new file or overwrite,
# 'w-' and 'x' will not overwrite but will create a new file
if self._wepy_mode in ['w', 'w-', 'x']:
self._create_init()
# read/write mode: in this mode we do not completely overwrite
# the old file and start again but rather write over top of
# values if requested
elif self._wepy_mode in ['r+']:
self._read_write_init()
# add mode: read/write create if doesn't exist
elif self._wepy_mode in ['a']:
if osp.exists(self._filename):
self._read_write_init()
else:
self._create_init()
# read only mode
elif self._wepy_mode == 'r':
# if any data was given, warn the user
if any([kwarg is not None for kwarg in
[topology, units, sparse_fields,
feature_shapes, feature_dtypes,
n_dims, alt_reps, main_rep_idxs]]):
warn("Data was given but opening in read-only mode", RuntimeWarning)
# then run the initialization process
self._read_init()
# flush the buffers
self._h5.flush()
# set the h5py mode to the value in the actual h5py.File
# object after creation
self._h5py_mode = self._h5.mode
# get rid of the temporary variables
del self._topology
del self._units
del self._n_dims
del self._n_coords
del self._field_feature_shapes_kwarg
del self._field_feature_dtypes_kwarg
del self._field_feature_shapes
del self._field_feature_dtypes
del self._sparse_fields
del self._main_rep_idxs
del self._alt_reps
# variable to reflect if it is closed or not, should be closed
# after initialization
self.closed = True
# end of the constructor
return None
# TODO is this right? shouldn't we actually delete the data then close
def __del__(self):
self.close()
# context manager methods
def __enter__(self):
self.open()
# self._h5 = h5py.File(self._filename,
# libver=H5PY_LIBVER, swmr=self._swmr_mode)
# self.closed = False
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def swmr_mode(self):
return self._swmr_mode
@swmr_mode.setter
def swmr_mode(self, val):
self._swmr_mode = val
# TODO custom deepcopy to avoid copying the actual HDF5 object
#### hidden methods (_method_name)
### constructors
def _create_init(self):
"""Creation mode constructor.
Completely overwrite the data in the file. Reinitialize the values
and set with the new ones if given.
"""
assert self._topology is not None, \
"Topology must be given for a creation constructor"
# initialize the runs group
runs_grp = self._h5.create_group(RUNS)
# initialize the settings group
settings_grp = self._h5.create_group(SETTINGS)
# create the topology dataset
self._h5.create_dataset(TOPOLOGY, data=self._topology)
# sparse fields
if self._sparse_fields is not None:
# make a dataset for the sparse fields allowed. this requires
# a 'special' datatype for variable length strings. This is
# supported by HDF5 but not numpy.
vlen_str_dt = h5py.special_dtype(vlen=str)
# create the dataset with empty values for the length of the
# sparse fields given
sparse_fields_ds = settings_grp.create_dataset(SPARSE_FIELDS,
(len(self._sparse_fields),),
dtype=vlen_str_dt,
maxshape=(None,))
# set the flags
for i, sparse_field in enumerate(self._sparse_fields):
sparse_fields_ds[i] = sparse_field
# field feature shapes and dtypes
# initialize to the defaults, this gives values to
# self._n_coords, and self.field_feature_dtypes, and
# self.field_feature_shapes
self._set_default_init_field_attributes(n_dims=self._n_dims)
# save the number of dimensions and number of atoms in settings
settings_grp.create_dataset(N_DIMS_STR, data=np.array(self._n_dims))
settings_grp.create_dataset(N_ATOMS, data=np.array(self._n_coords))
# the main rep atom idxs
settings_grp.create_dataset(MAIN_REP_IDXS, data=self._main_rep_idxs, dtype=np.int)
# alt_reps settings
alt_reps_idxs_grp = settings_grp.create_group(ALT_REPS_IDXS)
for alt_rep_name, idxs in self._alt_reps.items():
alt_reps_idxs_grp.create_dataset(alt_rep_name, data=idxs, dtype=np.int)
# if both feature shapes and dtypes were specified overwrite
# (or initialize if not set by defaults) the defaults
if (self._field_feature_shapes_kwarg is not None) and\
(self._field_feature_dtypes_kwarg is not None):
self._field_feature_shapes.update(self._field_feature_shapes_kwarg)
self._field_feature_dtypes.update(self._field_feature_dtypes_kwarg)
# any sparse field with unspecified shape and dtype must be
# set to None so that it will be set at runtime
for sparse_field in self.sparse_fields:
if (not sparse_field in self._field_feature_shapes) or \
(not sparse_field in self._field_feature_dtypes):
self._field_feature_shapes[sparse_field] = None
self._field_feature_dtypes[sparse_field] = None
# save the field feature shapes and dtypes in the settings group
shapes_grp = settings_grp.create_group(FIELD_FEATURE_SHAPES_STR)
for field_path, field_shape in self._field_feature_shapes.items():
if field_shape is None:
# set it as a dimensionless array of NaN
field_shape = np.array(np.nan)
shapes_grp.create_dataset(field_path, data=field_shape)
dtypes_grp = settings_grp.create_group(FIELD_FEATURE_DTYPES_STR)
for field_path, field_dtype in self._field_feature_dtypes.items():
if field_dtype is None:
dt_str = NONE_STR
else:
# make a json string of the datatype that can be read
# in again, we call np.dtype again because there is no
# np.float.descr attribute
dt_str = json.dumps(np.dtype(field_dtype).descr)
dtypes_grp.create_dataset(field_path, data=dt_str)
# initialize the units group
unit_grp = self._h5.create_group(UNITS)
# if units were not given set them all to None
if self._units is None:
self._units = {}
for field_path in self._field_feature_shapes.keys():
self._units[field_path] = None
# set the units
for field_path, unit_value in self._units.items():
# ignore the field if not given
if unit_value is None:
continue
unit_path = '{}/{}'.format(UNITS, field_path)
unit_grp.create_dataset(unit_path, data=unit_value)
# create the group for the run data records
records_grp = settings_grp.create_group(RECORD_FIELDS)
# create a dataset for the continuation run tuples
# (continuation_run, base_run), where the first element
# of the new run that is continuing the run in the second
# position
self._init_continuations()
def _read_write_init(self):
"""Read-write mode constructor."""
self._read_init()
def _add_init(self):
"""The addition mode constructor.
Create the dataset if it doesn't exist and put it in r+ mode,
otherwise, just open in r+ mode.
"""
if not any(self._exist_flags):
self._create_init()
else:
self._read_write_init()
def _read_init(self):
"""Read mode constructor."""
pass
def _set_default_init_field_attributes(self, n_dims=None):
"""Sets the feature_shapes and feature_dtypes to be the default for
this module. These will be used to initialize field datasets when no
given during construction (i.e. for sparse values)
Parameters
----------
n_dims : int
"""
# we use the module defaults for the datasets to initialize them
field_feature_shapes = dict(FIELD_FEATURE_SHAPES)
field_feature_dtypes = dict(FIELD_FEATURE_DTYPES)
# get the number of coordinates of positions. If there is a
# main_reps then we have to set the number of atoms to that,
# if not we count the number of atoms in the topology
if self._main_rep_idxs is None:
self._n_coords = json_top_atom_count(self.topology)
self._main_rep_idxs = list(range(self._n_coords))
else:
self._n_coords = len(self._main_rep_idxs)
# get the number of dimensions as a default
if n_dims is None:
self._n_dims = N_DIMS
# feature shapes for positions and positions-like fields are
# not known at the module level due to different number of
# coordinates (number of atoms) and number of dimensions
# (default 3 spatial). We set them now that we know this
# information.
# add the postitions shape
field_feature_shapes[POSITIONS] = (self._n_coords, self._n_dims)
# add the positions-like field shapes (velocities and forces) as the same
for poslike_field in POSITIONS_LIKE_FIELDS:
field_feature_shapes[poslike_field] = (self._n_coords, self._n_dims)
# set the attributes
self._field_feature_shapes = field_feature_shapes
self._field_feature_dtypes = field_feature_dtypes
def _get_field_path_grp(self, run_idx, traj_idx, field_path):
"""Given a field path for the trajectory returns the group the field's
dataset goes in and the key for the field name in that group.
The field path for a simple field is just the name of the
field and for a compound field it is the compound field group
name with the subfield separated by a '/' like
'observables/observable1' where 'observables' is the compound
field group and 'observable1' is the subfield name.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Returns
-------
group : h5py.Group
field_name : str
"""
# check if it is compound
if '/' in field_path:
# split it
grp_name, field_name = field_path.split('/')
# get the hdf5 group
grp = self.h5['{}/{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx, grp_name)]
# its simple so just return the root group and the original path
else:
grp = self.h5
field_name = field_path
return grp, field_name
def _init_continuations(self):
"""This will either create a dataset in the settings for the
continuations or if continuations already exist it will reinitialize
them and delete the data that exists there.
Returns
-------
continuation_dset : h5py.Dataset
"""
# if the continuations dset already exists we reinitialize the
# data
if CONTINUATIONS in self.settings_grp:
cont_dset = self.settings_grp[CONTINUATIONS]
cont_dset.resize( (0,2) )
# otherwise we just create the data
else:
cont_dset = self.settings_grp.create_dataset(CONTINUATIONS, shape=(0,2), dtype=np.int,
maxshape=(None, 2))
return cont_dset
def _add_run_init(self, run_idx, continue_run=None):
"""Routines for creating a run includes updating and setting object
global variables, increasing the counter for the number of runs.
Parameters
----------
run_idx : int
continue_run : int
Index of the run to continue.
"""
# add the run idx as metadata in the run group
self._h5['{}/{}'.format(RUNS, run_idx)].attrs[RUN_IDX] = run_idx
# if this is continuing another run add the tuple (this_run,
# continues_run) to the continutations settings
if continue_run is not None:
self.add_continuation(run_idx, continue_run)
def _add_init_walkers(self, init_walkers_grp, init_walkers):
"""Adds the run field group for the initial walkers.
Parameters
----------
init_walkers_grp : h5py.Group
The group to add the walker data to.
init_walkers : list of objects implementing the Walker interface
The walkers to save in the group
"""
# add the initial walkers to the group by essentially making
# new trajectories here that will only have one frame
for walker_idx, walker in enumerate(init_walkers):
walker_grp = init_walkers_grp.create_group(str(walker_idx))
# weights
# get the weight from the walker and make a feature array of it
weights = np.array([[walker.weight]])
# then create the dataset and set it
walker_grp.create_dataset(WEIGHTS, data=weights)
# state fields data
for field_key, field_value in walker.state.dict().items():
# values may be None, just ignore them
if field_value is not None:
# just create the dataset by making it a feature array
# (wrapping it in another list)
walker_grp.create_dataset(field_key, data=np.array([field_value]))
def _init_run_sporadic_record_grp(self, run_idx, run_record_key, fields):
"""Initialize a sporadic record group for a run.
Parameters
----------
run_idx : int
run_record_key : str
The record group name.
fields : list of field specs
Each field spec is a 3-tuple of
(field_name : str, field_shape : shape_spec, field_dtype : dtype_spec)
Returns
-------
record_group : h5py.Group
"""
# create the group
run_grp = self.run(run_idx)
record_grp = run_grp.create_group(run_record_key)
# initialize the cycles dataset that maps when the records
# were recorded
record_grp.create_dataset(CYCLE_IDXS, (0,), dtype=np.int,
maxshape=(None,))
# for each field simply create the dataset
for field_name, field_shape, field_dtype in fields:
# initialize this field
self._init_run_records_field(run_idx, run_record_key,
field_name, field_shape, field_dtype)
return record_grp
def _init_run_continual_record_grp(self, run_idx, run_record_key, fields):
"""Initialize a continual record group for a run.
Parameters
----------
run_idx : int
run_record_key : str
The record group name.
fields : list of field specs
Each field spec is a 3-tuple of
(field_name : str, field_shape : shape_spec, field_dtype : dtype_spec)
Returns
-------
record_group : h5py.Group
"""
# create the group
run_grp = self.run(run_idx)
record_grp = run_grp.create_group(run_record_key)
# for each field simply create the dataset
for field_name, field_shape, field_dtype in fields:
self._init_run_records_field(run_idx, run_record_key,
field_name, field_shape, field_dtype)
return record_grp
def _init_run_records_field(self, run_idx, run_record_key,
field_name, field_shape, field_dtype):
"""Initialize a single field for a run record group.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group.
field_name : str
The name of the field in the record group.
field_shape : tuple of int
The shape of the dataset for the field.
field_dtype : dtype_spec
An h5py recognized data type.
Returns
-------
dataset : h5py.Dataset
"""
record_grp = self.run(run_idx)[run_record_key]
# check if it is variable length
if field_shape is Ellipsis:
# make a special dtype that allows it to be
# variable length
vlen_dt = h5py.special_dtype(vlen=field_dtype)
# this is only allowed to be a single dimension
# since no real shape was given
dset = record_grp.create_dataset(field_name, (0,), dtype=vlen_dt,
maxshape=(None,))
# its not just make it normally
else:
# create the group
dset = record_grp.create_dataset(field_name, (0, *field_shape), dtype=field_dtype,
maxshape=(None, *field_shape))
return dset
def _is_sporadic_records(self, run_record_key):
"""Tests whether a record group is sporadic or not.
Parameters
----------
run_record_key : str
Record group name.
Returns
-------
is_sporadic : bool
True if the record group is sporadic False if not.
"""
# assume it is continual and check if it is in the sporadic groups
if run_record_key in SPORADIC_RECORDS:
return True
else:
return False
def _init_traj_field(self, run_idx, traj_idx, field_path, feature_shape, dtype):
"""Initialize a trajectory field.
Initialize a data field in the trajectory to be empty but
resizeable.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name specification.
feature_shape : shape_spec
Specification of shape of a feature vector of the field.
dtype : dtype_spec
Specification of the feature vector datatype.
"""
# check whether this is a sparse field and create it
# appropriately
if field_path in self.sparse_fields:
# it is a sparse field
self._init_sparse_traj_field(run_idx, traj_idx, field_path, feature_shape, dtype)
else:
# it is not a sparse field (AKA simple)
self._init_contiguous_traj_field(run_idx, traj_idx, field_path, feature_shape, dtype)
def _init_contiguous_traj_field(self, run_idx, traj_idx, field_path, shape, dtype):
"""Initialize a contiguous (non-sparse) trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name specification.
feature_shape : tuple of int
Shape of the feature vector of the field.
dtype : dtype_spec
H5py recognized datatype
"""
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
# create the empty dataset in the correct group, setting
# maxshape so it can be resized for new feature vectors to be added
traj_grp.create_dataset(field_path, (0, *[0 for i in shape]), dtype=dtype,
maxshape=(None, *shape))
def _init_sparse_traj_field(self, run_idx, traj_idx, field_path, shape, dtype):
"""
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name specification.
feature_shape : shape_spec
Specification for the shape of the feature.
dtype : dtype_spec
Specification for the dtype of the feature.
"""
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
# check to see that neither the shape and dtype are
# None which indicates it is a runtime defined value and
# should be ignored here
if (shape is None) or (dtype is None):
# do nothing
pass
else:
# only create the group if you are going to add the
# datasets so the extend function can know if it has been
# properly initialized easier
sparse_grp = traj_grp.create_group(field_path)
# create the dataset for the feature data
sparse_grp.create_dataset(DATA, (0, *[0 for i in shape]), dtype=dtype,
maxshape=(None, *shape))
# create the dataset for the sparse indices
sparse_grp.create_dataset(SPARSE_IDXS, (0,), dtype=np.int, maxshape=(None,))
def _init_traj_fields(self, run_idx, traj_idx,
field_paths, field_feature_shapes, field_feature_dtypes):
"""Initialize a number of fields for a trajectory.
Parameters
----------
run_idx : int
traj_idx : int
field_paths : list of str
List of field names.
field_feature_shapes : list of shape_specs
field_feature_dtypes : list of dtype_specs
"""
for i, field_path in enumerate(field_paths):
self._init_traj_field(run_idx, traj_idx,
field_path, field_feature_shapes[i], field_feature_dtypes[i])
def _add_traj_field_data(self,
run_idx,
traj_idx,
field_path,
field_data,
sparse_idxs=None,
):
"""Add a trajectory field to a trajectory.
If the sparse indices are given the field will be created as a
sparse field otherwise a normal one.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name.
field_data : numpy.array
The data array to set for the field.
sparse_idxs : arraylike of int of shape (1,)
List of cycle indices that the data corresponds to.
"""
# get the traj group
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
# if it is a sparse dataset we need to add the data and add
# the idxs in a group
if sparse_idxs is None:
# first require that the dataset exist and is exactly the
# same as the one that already exists (if indeed it
# does). If it doesn't raise a specific error letting the
# user know that they will have to delete the dataset if
# they want to change it to something else
try:
dset = traj_grp.require_dataset(field_path, shape=field_data.shape, dtype=field_data.dtype,
exact=True,
maxshape=(None, *field_data.shape[1:]))
except TypeError:
raise TypeError("For changing the contents of a trajectory field it must be the same shape and dtype.")
# if that succeeds then go ahead and set the data to the
# dataset (overwriting if it is still there)
dset[...] = field_data
else:
sparse_grp = traj_grp.create_group(field_path)
# add the data to this group
sparse_grp.create_dataset(DATA, data=field_data,
maxshape=(None, *field_data.shape[1:]))
# add the sparse idxs
sparse_grp.create_dataset(SPARSE_IDXS, data=sparse_idxs,
maxshape=(None,))
def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):
"""Add multiple new frames worth of data to the end of an existing
contiguous (non-sparse)trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name
field_data : numpy.array
The frames of data to add.
"""
traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
field = traj_grp[field_path]
# make sure this is a feature vector
assert len(field_data.shape) > 1, \
"field_data must be a feature vector with the same number of dimensions as the number"
# of datase new frames
n_new_frames = field_data.shape[0]
# check the field to make sure it is not empty
if all([i == 0 for i in field.shape]):
# check the feature shape against the maxshape which gives
# the feature dimensions for an empty dataset
assert field_data.shape[1:] == field.maxshape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# if it is empty resize it to make an array the size of
# the new field_data with the maxshape for the feature
# dimensions
feature_dims = field.maxshape[1:]
field.resize( (n_new_frames, *feature_dims) )
# set the new data to this
field[0:, ...] = field_data
else:
# make sure the new data has the right dimensions against
# the shape it already has
assert field_data.shape[1:] == field.shape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# append to the dataset on the first dimension, keeping the
# others the same, these must be feature vectors and therefore
# must exist
field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )
# add the new data
field[-n_new_frames:, ...] = field_data
def _extend_sparse_traj_field(self, run_idx, traj_idx, field_path, values, sparse_idxs):
"""Add multiple new frames worth of data to the end of an existing
contiguous (non-sparse)trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name
values : numpy.array
The frames of data to add.
sparse_idxs : list of int
The cycle indices the values correspond to.
"""
field = self.h5['{}/{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx, field_path)]
field_data = field[DATA]
field_sparse_idxs = field[SPARSE_IDXS]
# number of new frames
n_new_frames = values.shape[0]
# if this sparse_field has been initialized empty we need to resize
if all([i == 0 for i in field_data.shape]):
# check the feature shape against the maxshape which gives
# the feature dimensions for an empty dataset
assert values.shape[1:] == field_data.maxshape[1:], \
"input value features have shape {}, expected {}".format(
values.shape[1:], field_data.maxshape[1:])
# if it is empty resize it to make an array the size of
# the new values with the maxshape for the feature
# dimensions
feature_dims = field_data.maxshape[1:]
field_data.resize( (n_new_frames, *feature_dims) )
# set the new data to this
field_data[0:, ...] = values
else:
# make sure the new data has the right dimensions
assert values.shape[1:] == field_data.shape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# append to the dataset on the first dimension, keeping the
# others the same, these must be feature vectors and therefore
# must exist
field_data.resize( (field_data.shape[0] + n_new_frames, *field_data.shape[1:]) )
# add the new data
field_data[-n_new_frames:, ...] = values
# add the sparse idxs in the same way
field_sparse_idxs.resize( (field_sparse_idxs.shape[0] + n_new_frames,
*field_sparse_idxs.shape[1:]) )
# add the new data
field_sparse_idxs[-n_new_frames:, ...] = sparse_idxs
def _add_sparse_field_flag(self, field_path):
"""Register a trajectory field as sparse in the header settings.
Parameters
----------
field_path : str
Name of the trajectory field you want to flag as sparse
"""
sparse_fields_ds = self._h5['{}/{}'.format(SETTINGS, SPARSE_FIELDS)]
# make sure it isn't already in the sparse_fields
if field_path in sparse_fields_ds[:]:
warn("sparse field {} already a sparse field, ignoring".format(field_path))
sparse_fields_ds.resize( (sparse_fields_ds.shape[0] + 1,) )
sparse_fields_ds[sparse_fields_ds.shape[0] - 1] = field_path
def _add_field_feature_shape(self, field_path, field_feature_shape):
"""Add the shape to the header settings for a trajectory field.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_shape : shape_spec
The shape spec to serialize as a dataset.
"""
shapes_grp = self._h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_SHAPES_STR)]
shapes_grp.create_dataset(field_path, data=np.array(field_feature_shape))
def _add_field_feature_dtype(self, field_path, field_feature_dtype):
"""Add the data type to the header settings for a trajectory field.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_dtype : dtype_spec
The dtype spec to serialize as a dataset.
"""
feature_dtype_str = json.dumps(field_feature_dtype.descr)
dtypes_grp = self._h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR)]
dtypes_grp.create_dataset(field_path, data=feature_dtype_str)
def _set_field_feature_shape(self, field_path, field_feature_shape):
"""Add the trajectory field shape to header settings or set the value.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_shape : shape_spec
The shape spec to serialize as a dataset.
"""
# check if the field_feature_shape is already set
if field_path in self.field_feature_shapes:
# check that the shape was previously saved as "None" as we
# won't overwrite anything else
if self.field_feature_shapes[field_path] is None:
full_path = '{}/{}/{}'.format(SETTINGS, FIELD_FEATURE_SHAPES_STR, field_path)
# we have to delete the old data and set new data
del self.h5[full_path]
self.h5.create_dataset(full_path, data=field_feature_shape)
else:
raise AttributeError(
"Cannot overwrite feature shape for {} with {} because it is {} not {}".format(
field_path, field_feature_shape, self.field_feature_shapes[field_path],
NONE_STR))
# it was not previously set so we must create then save it
else:
self._add_field_feature_shape(field_path, field_feature_shape)
def _set_field_feature_dtype(self, field_path, field_feature_dtype):
"""Add the trajectory field dtype to header settings or set the value.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_dtype : dtype_spec
The dtype spec to serialize as a dataset.
"""
feature_dtype_str = json.dumps(field_feature_dtype.descr)
# check if the field_feature_dtype is already set
if field_path in self.field_feature_dtypes:
# check that the dtype was previously saved as "None" as we
# won't overwrite anything else
if self.field_feature_dtypes[field_path] is None:
full_path = '{}/{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR, field_path)
# we have to delete the old data and set new data
del self.h5[full_path]
self.h5.create_dataset(full_path, data=feature_dtype_str)
else:
raise AttributeError(
"Cannot overwrite feature dtype for {} with {} because it is {} not ".format(
field_path, field_feature_dtype, self.field_feature_dtypes[field_path],
NONE_STR))
# it was not previously set so we must create then save it
else:
self._add_field_feature_dtype(field_path, field_feature_dtype)
def _extend_run_record_data_field(self, run_idx, run_record_key,
field_name, field_data):
"""Primitive record append method.
Adds data for a single field dataset in a run records group. This
is done without paying attention to whether it is sporadic or
continual and is supposed to be only the data write method.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group.
field_name : str
Name of the field in the record group to add to.
field_data : arraylike
The data to add to the field.
"""
records_grp = self.h5['{}/{}/{}'.format(RUNS, run_idx, run_record_key)]
field = records_grp[field_name]
# make sure this is a feature vector
assert len(field_data.shape) > 1, \
"field_data must be a feature vector with the same number of dimensions as the number"
# of datase new frames
n_new_frames = field_data.shape[0]
# check whether it is a variable length record, by getting the
# record dataset dtype and using the checker to see if it is
# the vlen special type in h5py
if h5py.check_dtype(vlen=field.dtype) is not None:
# if it is we have to treat it differently, since it
# cannot be multidimensional
# if the dataset has no data in it we need to reshape it
if all([i == 0 for i in field.shape]):
# initialize this array
# if it is empty resize it to make an array the size of
# the new field_data with the maxshape for the feature
# dimensions
field.resize( (n_new_frames,) )
# set the new data to this
for i, row in enumerate(field_data):
field[i] = row
# otherwise just add the data
else:
# resize the array but it is only of rank because
# of variable length data
field.resize( (field.shape[0] + n_new_frames, ) )
# add each row to the newly made space
for i, row in enumerate(field_data):
field[(field.shape[0] - 1) + i] = row
# if it is not variable length we don't have to treat it
# differently
else:
# if this is empty we need to reshape the dataset to accomodate data
if all([i == 0 for i in field.shape]):
# check the feature shape against the maxshape which gives
# the feature dimensions for an empty dataset
assert field_data.shape[1:] == field.maxshape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# if it is empty resize it to make an array the size of
# the new field_data with the maxshape for the feature
# dimensions
feature_dims = field.maxshape[1:]
field.resize( (n_new_frames, *feature_dims) )
# set the new data to this
field[0:, ...] = field_data
# otherwise just add the data
else:
# append to the dataset on the first dimension, keeping the
# others the same, these must be feature vectors and therefore
# must exist
field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )
# add the new data
field[-n_new_frames:, ...] = field_data
def _run_record_namedtuple(self, run_record_key):
"""Generate a namedtuple record type for a record group.
The class name will be formatted like '{}_Record' where the {}
will be replaced with the name of the record group.
Parameters
----------
run_record_key : str
Name of the record group
Returns
-------
RecordType : namedtuple
The record type to generate records for this record group.
"""
Record = namedtuple('{}_Record'.format(run_record_key),
[CYCLE_IDX] + self.record_fields[run_record_key])
return Record
def _convert_record_field_to_table_column(self, run_idx, run_record_key, record_field):
"""Converts a dataset of feature vectors to more palatable values for
use in external datasets.
For single value feature vectors it unwraps them into single
values.
For 1-D feature vectors it casts them as tuples.
Anything of higher rank will raise an error.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group
record_field : str
Name of the field of the record group
Returns
-------
record_dset : list
Table-ified values
Raises
------
TypeError
If the field feature vector shape rank is greater than 1.
"""
# get the field dataset
rec_grp = self.records_grp(run_idx, run_record_key)
dset = rec_grp[record_field]
# if it is variable length or if it has more than one element
# cast all elements to tuples
if h5py.check_dtype(vlen=dset.dtype) is not None:
rec_dset = [tuple(value) for value in dset[:]]
# if it is not variable length make sure it is not more than a
# 1D feature vector
elif len(dset.shape) > 2:
raise TypeError(
"cannot convert fields with feature vectors more than 1 dimension,"
" was given {} for {}/{}".format(
dset.shape[1:], run_record_key, record_field))
# if it is only a rank 1 feature vector and it has more than
# one element make a tuple out of it
elif dset.shape[1] > 1:
rec_dset = [tuple(value) for value in dset[:]]
# otherwise just get the single value instead of keeping it as
# a single valued feature vector
else:
rec_dset = [value[0] for value in dset[:]]
return rec_dset
def _convert_record_fields_to_table_columns(self, run_idx, run_record_key):
"""Convert record group data to truncated namedtuple records.
This uses the specified record fields from the header settings
to choose which record group fields to apply this to.
Does no checking to make sure the fields are
"table-ifiable". If a field is not it will raise a TypeError.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group
Returns
-------
table_fields : dict of str : list
Mapping of the record group field to the table-ified values.
"""
fields = {}
for record_field in self.record_fields[run_record_key]:
fields[record_field] = self._convert_record_field_to_table_column(
run_idx, run_record_key, record_field)
return fields
def _make_records(self, run_record_key, cycle_idxs, fields):
"""Generate a list of proper (nametuple) records for a record group.
Parameters
----------
run_record_key : str
Name of the record group
cycle_idxs : list of int
The cycle indices you want to get records for.
fields : list of str
The fields to make record entries for.
Returns
-------
records : list of namedtuple objects
"""
Record = self._run_record_namedtuple(run_record_key)
# for each record we make a tuple and yield it
records = []
for record_idx in range(len(cycle_idxs)):
# make a record for this cycle
record_d = {CYCLE_IDX : cycle_idxs[record_idx]}
for record_field, column in fields.items():
datum = column[record_idx]
record_d[record_field] = datum
record = Record(*(record_d[key] for key in Record._fields))
records.append(record)
return records
def _run_records_sporadic(self, run_idxs, run_record_key):
"""Generate records for a sporadic record group for a multi-run
contig.
If multiple run indices are given assumes that these are a
contig (e.g. the second run index is a continuation of the
first and so on). This method is considered low-level and does
no checking to make sure this is true.
The cycle indices of records from "continuation" runs will be
modified so as the records will be indexed as if they are a
single run.
Uses the record fields settings to decide which fields to use.
Parameters
----------
run_idxs : list of int
The indices of the runs in the order they are in the contig
run_record_key : str
Name of the record group
Returns
-------
records : list of namedtuple objects
"""
# we loop over the run_idxs in the contig and get the fields
# and cycle idxs for the whole contig
fields = None
cycle_idxs = np.array([], dtype=int)
# keep a cumulative total of the runs cycle idxs
prev_run_cycle_total = 0
for run_idx in run_idxs:
# get all the value columns from the datasets, and convert
# them to something amenable to a table
run_fields = self._convert_record_fields_to_table_columns(run_idx, run_record_key)
# we need to concatenate each field to the end of the
# field in the master dictionary, first we need to
# initialize it if it isn't already made
if fields is None:
# if it isn't initialized we just set it as this first
# run fields dictionary
fields = run_fields
else:
# if it is already initialized we need to go through
# each field and concatenate
for field_name, field_data in run_fields.items():
# just add it to the list of fields that will be concatenated later
fields[field_name].extend(field_data)
# get the cycle idxs for this run
rec_grp = self.records_grp(run_idx, run_record_key)
run_cycle_idxs = rec_grp[CYCLE_IDXS][:]
# add the total number of cycles that came before this run
# to each of the cycle idxs to get the cycle_idxs in terms
# of the full contig
run_contig_cycle_idxs = run_cycle_idxs + prev_run_cycle_total
# add these cycle indices to the records for the whole contig
cycle_idxs = np.hstack( (cycle_idxs, run_contig_cycle_idxs) )
# add the total number of cycle_idxs from this run to the
# running total
prev_run_cycle_total += self.num_run_cycles(run_idx)
# then make the records from the fields
records = self._make_records(run_record_key, cycle_idxs, fields)
return records
def _run_records_continual(self, run_idxs, run_record_key):
"""Generate records for a continual record group for a multi-run
contig.
If multiple run indices are given assumes that these are a
contig (e.g. the second run index is a continuation of the
first and so on). This method is considered low-level and does
no checking to make sure this is true.
The cycle indices of records from "continuation" runs will be
modified so as the records will be indexed as if they are a
single run.
Uses the record fields settings to decide which fields to use.
Parameters
----------
run_idxs : list of int
The indices of the runs in the order they are in the contig
run_record_key : str
Name of the record group
Returns
-------
records : list of namedtuple objects
"""
cycle_idxs = np.array([], dtype=int)
fields = None
prev_run_cycle_total = 0
for run_idx in run_idxs:
# get all the value columns from the datasets, and convert
# them to something amenable to a table
run_fields = self._convert_record_fields_to_table_columns(run_idx, run_record_key)
# we need to concatenate each field to the end of the
# field in the master dictionary, first we need to
# initialize it if it isn't already made
if fields is None:
# if it isn't initialized we just set it as this first
# run fields dictionary
fields = run_fields
else:
# if it is already initialized we need to go through
# each field and concatenate
for field_name, field_data in run_fields.items():
# just add it to the list of fields that will be concatenated later
fields[field_name].extend(field_data)
# get one of the fields (if any to iterate over)
record_fields = self.record_fields[run_record_key]
main_record_field = record_fields[0]
# make the cycle idxs from that
run_rec_grp = self.records_grp(run_idx, run_record_key)
run_cycle_idxs = np.array(range(run_rec_grp[main_record_field].shape[0]))
# add the total number of cycles that came before this run
# to each of the cycle idxs to get the cycle_idxs in terms
# of the full contig
run_contig_cycle_idxs = run_cycle_idxs + prev_run_cycle_total
# add these cycle indices to the records for the whole contig
cycle_idxs = np.hstack( (cycle_idxs, run_contig_cycle_idxs) )
# add the total number of cycle_idxs from this run to the
# running total
prev_run_cycle_total += self.num_run_cycles(run_idx)
# then make the records from the fields
records = self._make_records(run_record_key, cycle_idxs, fields)
return records
def _get_contiguous_traj_field(self, run_idx, traj_idx, field_path, frames=None):
"""Access actual data for a trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Trajectory field name to access
frames : list of int, optional
The indices of the frames to return if you don't want all of them.
Returns
-------
field_data : arraylike
The data requested for the field.
"""
full_path = '{}/{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx, field_path)
if frames is None:
field = self._h5[full_path][:]
else:
field = self._h5[full_path][list(frames)]
return field
def _get_sparse_traj_field(self, run_idx, traj_idx, field_path, frames=None, masked=True):
"""Access actual data for a trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Trajectory field name to access
frames : list of int, optional
The indices of the frames to return if you don't want all of them.
masked : bool
If True returns the array data as numpy masked array, and
only the available values if False.
Returns
-------
field_data : arraylike
The data requested for the field.
"""
traj_path = '{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)
traj_grp = self.h5[traj_path]
field = traj_grp[field_path]
n_frames = traj_grp[POSITIONS].shape[0]
if frames is None:
data = field[DATA][:]
# if it is to be masked make the masked array
if masked:
sparse_idxs = field[SPARSE_IDXS][:]
filled_data = np.full( (n_frames, *data.shape[1:]), np.nan)
filled_data[sparse_idxs] = data
mask = np.full( (n_frames, *data.shape[1:]), True)
mask[sparse_idxs] = False
data = np.ma.masked_array(filled_data, mask=mask)
else:
# get the sparse idxs and the frames to slice from the
# data
sparse_idxs = field[SPARSE_IDXS][:]
# we get a boolean array of the rows of the data table
# that we are to slice from
sparse_frame_idxs = np.argwhere(np.isin(sparse_idxs, frames))
data = field[DATA][list(sparse_frame_idxs)]
# if it is to be masked make the masked array
if masked:
# the empty arrays the size of the number of requested frames
filled_data = np.full( (len(frames), *field[DATA].shape[1:]), np.nan)
mask = np.full( (len(frames), *field[DATA].shape[1:]), True )
# take the data which exists and is part of the frames
# selection, and put it into the filled data where it is
# supposed to be
filled_data[np.isin(frames, sparse_idxs)] = data
# unmask the present values
mask[np.isin(frames, sparse_idxs)] = False
data = np.ma.masked_array(filled_data, mask=mask)
return data
def _add_run_field(self,
run_idx,
field_path,
data,
sparse_idxs=None,
force=False):
"""Add a trajectory field to all trajectories in a run.
By enforcing adding it to all trajectories at one time we
promote in-run consistency.
Parameters
----------
run_idx : int
field_path : str
Name to set the trajectory field as. Can be compound.
data : arraylike of shape (n_trajectories, n_cycles, feature_vector_shape[0],...)
The data for all trajectories to be added.
sparse_idxs : list of int
If the data you are adding is sparse specify which cycles to apply them to.
If 'force' is turned on, no checking for constraints will be done.
"""
# TODO, SNIPPET: check that we have the right permissions
# if field_exists:
# # if we are in a permissive write mode we delete the
# # old dataset and add the new one, overwriting old data
# if self.mode in ['w', 'w-', 'x', 'r+']:
# logging.info("Dataset already present. Overwriting.")
# del obs_grp[field_name]
# obs_grp.create_dataset(field_name, data=results)
# # this will happen in 'c' and 'c-' modes
# else:
# raise RuntimeError(
# "Dataset already exists and file is in concatenate mode ('c' or 'c-')")
# check that the data has the correct number of trajectories
if not force:
assert len(data) == self.num_run_trajs(run_idx),\
"The number of trajectories in data, {}, is different than the number"\
"of trajectories in the run, {}.".format(len(data), self.num_run_trajs(run_idx))
# for each trajectory check that the data is compliant
for traj_idx, traj_data in enumerate(data):
if not force:
# check that the number of frames is not larger than that for the run
if traj_data.shape[0] > self.num_run_cycles(run_idx):
raise ValueError("The number of frames in data for traj {} , {},"
"is larger than the number of frames"
"for this run, {}.".format(
traj_idx, data.shape[1], self.num_run_cycles(run_idx)))
# if the number of frames given is the same or less than
# the number of frames in the run
elif (traj_data.shape[0] <= self.num_run_cycles(run_idx)):
# if sparse idxs were given we check to see there is
# the right number of them
# and that they match the number of frames given
if data.shape[0] != len(sparse_idxs[traj_idx]):
raise ValueError("The number of frames provided for traj {}, {},"
"was less than the total number of frames, {},"
"but an incorrect number of sparse idxs were supplied, {}."\
.format(traj_idx, traj_data.shape[0],
self.num_run_cycles(run_idx), len(sparse_idxs[traj_idx])))
# if there were strictly fewer frames given and the
# sparse idxs were not given we need to raise an error
elif (traj_data.shape[0] < self.num_run_cycles(run_idx)):
raise ValueError("The number of frames provided for traj {}, {},"
"was less than the total number of frames, {},"
"but sparse_idxs were not supplied.".format(
traj_idx, traj_data.shape[0],
self.num_run_cycles(run_idx)))
# add it to each traj
for i, idx_tup in enumerate(self.run_traj_idx_tuples([run_idx])):
if sparse_idxs is None:
self._add_traj_field_data(*idx_tup, field_path, data[i])
else:
self._add_traj_field_data(*idx_tup, field_path, data[i],
sparse_idxs=sparse_idxs[i])
def _add_field(self, field_path, data, sparse_idxs=None,
force=False):
"""Add a trajectory field to all runs in a file.
Parameters
----------
field_path : str
Name of trajectory field
data : list of arraylike
Each element of this list corresponds to a single run. The
elements of which are arraylikes of shape (n_trajectories,
n_cycles, feature_vector_shape[0],...) for each run.
sparse_idxs : list of list of int
The list of cycle indices to set for the sparse fields. If
None, no trajectories are set as sparse.
"""
for i, run_idx in enumerate(self.run_idxs):
if sparse_idxs is not None:
self._add_run_field(run_idx, field_path, data[i], sparse_idxs=sparse_idxs[i],
force=force)
else:
self._add_run_field(run_idx, field_path, data[i],
force=force)
#### Public Methods
### File Utilities
@property
def filename(self):
"""The path to the underlying HDF5 file."""
return self._filename
def open(self, mode=None):
"""Open the underlying HDF5 file for access.
Parameters
----------
mode : str
Valid mode spec. Opens the HDF5 file in this mode if given
otherwise uses the existing mode.
"""
if mode is None:
mode = self.mode
if self.closed:
self.set_mode(mode)
self._h5 = h5py.File(self._filename, mode,
libver=H5PY_LIBVER, swmr=self.swmr_mode)
self.closed = False
else:
raise IOError("This file is already open")
def close(self):
"""Close the underlying HDF5 file. """
if not self.closed:
self._h5.flush()
self._h5.close()
self.closed = True
@property
def mode(self):
"""The WepyHDF5 mode this object was created with."""
return self._wepy_mode
@mode.setter
def mode(self, mode):
"""Set the mode for opening the file with."""
self.set_mode(mode)
def set_mode(self, mode):
"""Set the mode for opening the file with."""
if not self.closed:
raise AttributeError("Cannot set the mode while the file is open.")
self._set_h5_mode(mode)
self._wepy_mode = mode
@property
def h5_mode(self):
"""The h5py.File mode the HDF5 file currently has."""
return self._h5.mode
def _set_h5_mode(self, h5_mode):
"""Set the mode to open the HDF5 file with.
This really shouldn't be set without using the main wepy mode
as they need to be aligned.
"""
if not self.closed:
raise AttributeError("Cannot set the mode while the file is open.")
self._h5py_mode = h5_mode
@property
def h5(self):
"""The underlying h5py.File object."""
return self._h5
### h5py object access
def run(self, run_idx):
"""Get the h5py.Group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_group : h5py.Group
"""
return self._h5['{}/{}'.format(RUNS, int(run_idx))]
def traj(self, run_idx, traj_idx):
"""Get an h5py.Group trajectory group.
Parameters
----------
run_idx : int
traj_idx : int
Returns
-------
traj_group : h5py.Group
"""
return self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
def run_trajs(self, run_idx):
"""Get the trajectories group for a run.
Parameters
----------
run_idx : int
Returns
-------
trajectories_grp : h5py.Group
"""
return self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)]
@property
def runs(self):
"""The runs group."""
return self.h5[RUNS]
def run_grp(self, run_idx):
"""A group for a single run."""
return self.runs["{}".format(run_idx)]
def run_start_snapshot_hash(self, run_idx):
"""Hash identifier for the starting snapshot of a run from
orchestration.
"""
return self.run_grp(run_idx).attrs[RUN_START_SNAPSHOT_HASH]
def run_end_snapshot_hash(self, run_idx):
"""Hash identifier for the ending snapshot of a run from
orchestration.
"""
return self.run_grp(run_idx).attrs[RUN_END_SNAPSHOT_HASH]
def set_run_start_snapshot_hash(self, run_idx, snaphash):
"""Set the starting snapshot hash identifier for a run from
orchestration.
"""
if RUN_START_SNAPSHOT_HASH not in self.run_grp(run_idx).attrs:
self.run_grp(run_idx).attrs[RUN_START_SNAPSHOT_HASH] = snaphash
else:
raise AttributeError("The snapshot has already been set.")
def set_run_end_snapshot_hash(self, run_idx, snaphash):
"""Set the ending snapshot hash identifier for a run from
orchestration.
"""
if RUN_END_SNAPSHOT_HASH not in self.run_grp(run_idx).attrs:
self.run_grp(run_idx).attrs[RUN_END_SNAPSHOT_HASH] = snaphash
else:
raise AttributeError("The snapshot has already been set.")
@property
def settings_grp(self):
"""The header settings group."""
settings_grp = self.h5[SETTINGS]
return settings_grp
def decision_grp(self, run_idx):
"""Get the decision enumeration group for a run.
Parameters
----------
run_idx : int
Returns
-------
decision_grp : h5py.Group
"""
return self.run(run_idx)[DECISION]
def init_walkers_grp(self, run_idx):
"""Get the group for the initial walkers for a run.
Parameters
----------
run_idx : int
Returns
-------
init_walkers_grp : h5py.Group
"""
return self.run(run_idx)[INIT_WALKERS]
def records_grp(self, run_idx, run_record_key):
"""Get a record group h5py.Group for a run.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group
Returns
-------
run_record_group : h5py.Group
"""
path = '{}/{}/{}'.format(RUNS, run_idx, run_record_key)
return self.h5[path]
def resampling_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, RESAMPLING)
def resampler_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, RESAMPLER)
def warping_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, WARPING)
def bc_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, BC)
def progress_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, PROGRESS)
def iter_runs(self, idxs=False, run_sel=None):
"""Generator for iterating through the runs of a file.
Parameters
----------
idxs : bool
If True yields the run index in addition to the group.
run_sel : list of int, optional
If not None should be a list of the runs you want to iterate over.
Yields
------
run_idx : int, if idxs is True
run_group : h5py.Group
"""
if run_sel is None:
run_sel = self.run_idxs
for run_idx in self.run_idxs:
if run_idx in run_sel:
run = self.run(run_idx)
if idxs:
yield run_idx, run
else:
yield run
def iter_trajs(self, idxs=False, traj_sel=None):
"""Generator for iterating over trajectories in a file.
Parameters
----------
idxs : bool
If True returns a tuple of the run index and trajectory
index in addition to the trajectory group.
traj_sel : list of int, optional
If not None is a list of tuples of (run_idx, traj_idx)
selecting which trajectories to iterate over.
Yields
------
traj_id : tuple of int, if idxs is True
A tuple of (run_idx, traj_idx) for the group
trajectory : h5py.Group
"""
# set the selection of trajectories to iterate over
if traj_sel is None:
idx_tups = self.run_traj_idx_tuples()
else:
idx_tups = traj_sel
# get each traj for each idx_tup and yield them for the generator
for run_idx, traj_idx in idx_tups:
traj = self.traj(run_idx, traj_idx)
if idxs:
yield (run_idx, traj_idx), traj
else:
yield traj
def iter_run_trajs(self, run_idx, idxs=False):
"""Iterate over the trajectories of a run.
Parameters
----------
run_idx : int
idxs : bool
If True returns a tuple of the run index and trajectory
index in addition to the trajectory group.
Returns
-------
iter_trajs_generator : generator for the iter_trajs method
"""
run_sel = self.run_traj_idx_tuples([run_idx])
return self.iter_trajs(idxs=idxs, traj_sel=run_sel)
### Settings
@property
def defined_traj_field_names(self):
"""A list of the settings defined field names all trajectories have in the file."""
return list(self.field_feature_shapes.keys())
@property
def observable_field_names(self):
"""Returns a list of the names of the observables that all trajectories have.
If this encounters observable fields that don't occur in all
trajectories (inconsistency) raises an inconsistency error.
"""
n_trajs = self.num_trajs
field_names = Counter()
for traj in self.iter_trajs():
for name in list(traj['observables']):
field_names[name] += 1
# if any of the field names has not occured for every
# trajectory we raise an error
for field_name, count in field_names.items():
if count != n_trajs:
raise TypeError("observable field names are inconsistent")
# otherwise return the field names for the observables
return list(field_names.keys())
def _check_traj_field_consistency(self, field_names):
"""Checks that every trajectory has the given fields across
the entire dataset.
Parameters
----------
field_names : list of str
The field names to check for.
Returns
-------
consistent : bool
True if all trajs have the fields, False otherwise
"""
n_trajs = self.num_trajs
field_names = Counter()
for traj in self.iter_trajs():
for name in field_names:
if name in traj:
field_names[name] += 1
# if any of the field names has not occured for every
# trajectory we raise an error
for field_name, count in field_names:
if count != n_trajs:
return False
return True
@property
def record_fields(self):
"""The record fields for each record group which are selected for inclusion in the truncated records.
These are the fields which are considered to be table-ified.
Returns
-------
record_fields : dict of str : list of str
Mapping of record group name to alist of the record group fields.
"""
record_fields_grp = self.settings_grp[RECORD_FIELDS]
record_fields_dict = {}
for group_name, dset in record_fields_grp.items():
record_fields_dict[group_name] = list(dset)
return record_fields_dict
@property
def sparse_fields(self):
"""The trajectory fields that are sparse."""
return self.h5['{}/{}'.format(SETTINGS, SPARSE_FIELDS)][:]
@property
def main_rep_idxs(self):
"""The indices of the atoms included from the full topology in the default 'positions' trajectory """
if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:
return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]
else:
return None
@property
def alt_reps_idxs(self):
"""Mapping of the names of the alt reps to the indices of the atoms
from the topology that they include in their datasets."""
idxs_grp = self.h5['{}/{}'.format(SETTINGS, ALT_REPS_IDXS)]
return {name : ds[:] for name, ds in idxs_grp.items()}
@property
def alt_reps(self):
"""Names of the alt reps."""
idxs_grp = self.h5['{}/{}'.format(SETTINGS, ALT_REPS_IDXS)]
return {name for name in idxs_grp.keys()}
@property
def field_feature_shapes(self):
"""Mapping of the names of the trajectory fields to their feature
vector shapes."""
shapes_grp = self.h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_SHAPES_STR)]
field_paths = _iter_field_paths(shapes_grp)
shapes = {}
for field_path in field_paths:
shape = shapes_grp[field_path][()]
if np.isnan(shape).all():
shapes[field_path] = None
else:
shapes[field_path] = shape
return shapes
@property
def field_feature_dtypes(self):
"""Mapping of the names of the trajectory fields to their feature
vector numpy dtypes."""
dtypes_grp = self.h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR)]
field_paths = _iter_field_paths(dtypes_grp)
dtypes = {}
for field_path in field_paths:
dtype_str = dtypes_grp[field_path][()]
# if there is 'None' flag for the dtype then return None
if dtype_str == NONE_STR:
dtypes[field_path] = None
else:
dtype_obj = json.loads(dtype_str)
dtype_obj = [tuple(d) for d in dtype_obj]
dtype = np.dtype(dtype_obj)
dtypes[field_path] = dtype
return dtypes
@property
def continuations(self):
"""The continuation relationships in this file."""
return self.settings_grp[CONTINUATIONS][:]
@property
def metadata(self):
"""File metadata (h5py.attrs)."""
return dict(self._h5.attrs)
def decision_enum(self, run_idx):
"""Mapping of decision enumerated names to their integer representations.
Parameters
----------
run_idx : int
Returns
-------
decision_enum : dict of str : int
Mapping of the decision ID string to the integer representation.
See Also
--------
WepyHDF5.decision_value_names : for the reverse mapping
"""
enum_grp = self.decision_grp(run_idx)
enum = {}
for decision_name, dset in enum_grp.items():
enum[decision_name] = dset[()]
return enum
def decision_value_names(self, run_idx):
"""Mapping of the integer values for decisions to the decision ID strings.
Parameters
----------
run_idx : int
Returns
-------
decision_enum : dict of int : str
Mapping of the decision integer to the decision ID string representation.
See Also
--------
WepyHDF5.decision_enum : for the reverse mapping
"""
enum_grp = self.decision_grp(run_idx)
rev_enum = {}
for decision_name, dset in enum_grp.items():
value = dset[()]
rev_enum[value] = decision_name
return rev_enum
### Topology
def get_topology(self, alt_rep=POSITIONS):
"""Get the JSON topology for a particular represenation of the positions.
By default gives the topology for the main 'positions' field
(when alt_rep 'positions'). To get the full topology the file
was initialized with set `alt_rep` to `None`. Topologies for
alternative representations (subfields of 'alt_reps') can be
obtained by passing in the key for that alt_rep. For example,
'all_atoms' for the field in alt_reps called 'all_atoms'.
Parameters
----------
alt_rep : str
The base name of the alternate representation, or 'positions', or None.
Returns
-------
topology : str
The JSON topology string for the representation.
"""
top = self.topology
# if no alternative representation is given we just return the
# full topology
if alt_rep is None:
pass
# otherwise we either give the main representation topology
# subset
elif alt_rep == POSITIONS:
top = json_top_subset(top, self.main_rep_idxs)
# or choose one of the alternative representations
elif alt_rep in self.alt_reps_idxs:
top = json_top_subset(top, self.alt_reps_idxs[alt_rep])
# and raise an error if the given alternative representation
# is not given
else:
raise ValueError("alt_rep {} not found".format(alt_rep))
return top
@property
def topology(self):
"""The topology for the full simulated system.
May not be the main representation in the POSITIONS field; for
that use the `get_topology` method.
Returns
-------
topology : str
The JSON topology string for the full representation.
"""
return self._h5[TOPOLOGY][()]
def get_mdtraj_topology(self, alt_rep=POSITIONS):
"""Get an mdtraj.Topology object for a system representation.
By default gives the topology for the main 'positions' field
(when alt_rep 'positions'). To get the full topology the file
was initialized with set `alt_rep` to `None`. Topologies for
alternative representations (subfields of 'alt_reps') can be
obtained by passing in the key for that alt_rep. For example,
'all_atoms' for the field in alt_reps called 'all_atoms'.
Parameters
----------
alt_rep : str
The base name of the alternate representation, or 'positions', or None.
Returns
-------
topology : str
The JSON topology string for the full representation.
"""
json_top = self.get_topology(alt_rep=alt_rep)
return json_to_mdtraj_topology(json_top)
## Initial walkers
def initial_walker_fields(self, run_idx, fields, walker_idxs=None):
"""Get fields from the initial walkers of the simulation.
Parameters
----------
run_idx : int
Run to get initial walkers for.
fields : list of str
Names of the fields you want to retrieve.
walker_idxs : None or list of int
If None returns all of the walkers fields, otherwise a
list of ints that are a selection from those walkers.
Returns
-------
walker_fields : dict of str : array of shape
Dictionary mapping fields to the values for all
walkers. Frames will be either in counting order if no
indices were requested or the order of the walker indices
as given.
"""
# set the walker indices if not specified
if walker_idxs is None:
walker_idxs = range(self.num_init_walkers(run_idx))
init_walker_fields = {field : [] for field in fields}
# for each walker go through and add the selected fields
for walker_idx in walker_idxs:
init_walker_grp = self.init_walkers_grp(run_idx)[str(walker_idx)]
for field in fields:
# we remove the first dimension because we just want
# them as a single frame
init_walker_fields[field].append(init_walker_grp[field][:][0])
# convert the field values to arrays
init_walker_fields = {field : np.array(val) for field, val in init_walker_fields.items()}
return init_walker_fields
def initial_walkers_to_mdtraj(self, run_idx, walker_idxs=None, alt_rep=POSITIONS):
"""Generate an mdtraj Trajectory from a trace of frames from the runs.
Uses the default fields for positions (unless an alternate
representation is specified) and box vectors which are assumed
to be present in the trajectory fields.
The time value for the mdtraj trajectory is set to the cycle
indices for each trace frame.
This is useful for converting WepyHDF5 data to common
molecular dynamics data formats accessible through the mdtraj
library.
Parameters
----------
run_idx : int
Run to get initial walkers for.
fields : list of str
Names of the fields you want to retrieve.
walker_idxs : None or list of int
If None returns all of the walkers fields, otherwise a
list of ints that are a selection from those walkers.
alt_rep : None or str
If None uses default 'positions' representation otherwise
chooses the representation from the 'alt_reps' compound field.
Returns
-------
traj : mdtraj.Trajectory
"""
rep_path = self._choose_rep_path(alt_rep)
init_walker_fields = self.initial_walker_fields(run_idx, [rep_path, BOX_VECTORS],
walker_idxs=walker_idxs)
return self.traj_fields_to_mdtraj(init_walker_fields, alt_rep=alt_rep)
### Counts and Indexing
@property
def num_atoms(self):
"""The number of atoms in the full topology representation."""
return self.h5['{}/{}'.format(SETTINGS, N_ATOMS)][()]
@property
def num_dims(self):
"""The number of spatial dimensions in the positions and alt_reps trajectory fields."""
return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]
@property
def num_runs(self):
"""The number of runs in the file."""
return len(self._h5[RUNS])
@property
def num_trajs(self):
"""The total number of trajectories in the entire file."""
return len(list(self.run_traj_idx_tuples()))
def num_init_walkers(self, run_idx):
"""The number of initial walkers for a run.
Parameters
----------
run_idx : int
Returns
-------
n_walkers : int
"""
return len(self.init_walkers_grp(run_idx))
def num_walkers(self, run_idx, cycle_idx):
"""Get the number of walkers at a given cycle in a run.
Parameters
----------
run_idx : int
cycle_idx : int
Returns
-------
n_walkers : int
"""
if cycle_idx >= self.num_run_cycles(run_idx):
raise ValueError(
f"Run {run_idx} has {self.num_run_cycles(run_idx)} cycles, {cycle_idx} requested")
# TODO: currently we do not have a well-defined mechanism for
# actually storing variable number of walkers in the
# trajectory data so just return the number of trajectories
return self.num_run_trajs(run_idx)
def num_run_trajs(self, run_idx):
"""The number of trajectories in a run.
Parameters
----------
run_idx : int
Returns
-------
n_trajs : int
"""
return len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])
def num_run_cycles(self, run_idx):
"""The number of cycles in a run.
Parameters
----------
run_idx : int
Returns
-------
n_cycles : int
"""
return self.num_traj_frames(run_idx, 0)
def num_traj_frames(self, run_idx, traj_idx):
"""The number of frames in a given trajectory.
Parameters
----------
run_idx : int
traj_idx : int
Returns
-------
n_frames : int
"""
return self.traj(run_idx, traj_idx)[POSITIONS].shape[0]
@property
def run_idxs(self):
"""The indices of the runs in the file."""
return list(range(len(self._h5[RUNS])))
def run_traj_idxs(self, run_idx):
"""The indices of trajectories in a run.
Parameters
----------
run_idx : int
Returns
-------
traj_idxs : list of int
"""
return list(range(len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])))
def run_traj_idx_tuples(self, runs=None):
"""Get identifier tuples (run_idx, traj_idx) for all trajectories in
all runs.
Parameters
----------
runs : list of int, optional
If not None, a list of run indices to restrict to.
Returns
-------
run_traj_tuples : list of tuple of int
A listing of all trajectories by their identifying tuple
of (run_idx, traj_idx).
"""
tups = []
if runs is None:
run_idxs = self.run_idxs
else:
run_idxs = runs
for run_idx in run_idxs:
for traj_idx in self.run_traj_idxs(run_idx):
tups.append((run_idx, traj_idx))
return tups
def get_traj_field_cycle_idxs(self, run_idx, traj_idx, field_path):
"""Returns the cycle indices for a sparse trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Name of the trajectory field
Returns
-------
cycle_idxs : arraylike of int
"""
traj_path = '{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)
if not field_path in self._h5[traj_path]:
raise KeyError("key for field {} not found".format(field_path))
# if the field is not sparse just return the cycle indices for
# that run
if field_path not in self.sparse_fields:
cycle_idxs = np.array(range(self.num_run_cycles(run_idx)))
else:
cycle_idxs = self._h5[traj_path][field_path][SPARSE_IDXS][:]
return cycle_idxs
def next_run_idx(self):
"""The index of the next run if it were to be added.
Because runs are named as the integer value of the order they
were added this gives the index of the next run that would be
added.
Returns
-------
next_run_idx : int
"""
return self.num_runs
def next_run_traj_idx(self, run_idx):
"""The index of the next trajectory for this run.
Parameters
----------
run_idx : int
Returns
-------
next_traj_idx : int
"""
return self.num_run_trajs(run_idx)
### Aggregation
def is_run_contig(self, run_idxs):
"""This method checks that if a given list of run indices is a valid
contig or not.
Parameters
----------
run_idxs : list of int
The run indices that would make up the contig in order.
Returns
-------
is_contig : bool
"""
run_idx_continuations = [np.array([run_idxs[idx+1], run_idxs[idx]])
for idx in range(len(run_idxs)-1)]
#gets the contigs array
continuations = self.settings_grp[CONTINUATIONS][:]
# checks if sub contigs are in contigs list or not.
for run_continuous in run_idx_continuations:
contig = False
for continuous in continuations:
if np.array_equal(run_continuous, continuous):
contig = True
if not contig:
return False
return True
def clone(self, path, mode='x'):
"""Clone the header information of this file into another file.
Clones this WepyHDF5 file without any of the actual runs and run
data. This includes the topology, units, sparse_fields,
feature shapes and dtypes, alt_reps, and main representation
information.
This method will flush the buffers for this file.
Does not preserve metadata pertaining to inter-run
relationships like continuations.
Parameters
----------
path : str
File path to save the new file.
mode : str
The mode to open the new file with.
Returns
-------
new_file : h5py.File
The handle to the new file. It will be closed.
"""
assert mode in ['w', 'w-', 'x'], "must be opened in a file creation mode"
# we manually construct an HDF5 and copy the groups over
new_h5 = h5py.File(path, mode=mode, libver=H5PY_LIBVER)
new_h5.require_group(RUNS)
# flush the datasets buffers
self.h5.flush()
new_h5.flush()
# copy the existing datasets to the new one
h5py.h5o.copy(self._h5.id, TOPOLOGY.encode(), new_h5.id, TOPOLOGY.encode())
h5py.h5o.copy(self._h5.id, UNITS.encode(), new_h5.id, UNITS.encode())
h5py.h5o.copy(self._h5.id, SETTINGS.encode(), new_h5.id, SETTINGS.encode())
# now make a WepyHDF5 object in "expert_mode" which means it
# is just empy and we construct it manually, "surgically" as I
# like to call it
new_wepy_h5 = WepyHDF5(path, expert_mode=True)
# perform the surgery:
# attach the h5py.File
new_wepy_h5._h5 = new_h5
# set the wepy mode to read-write since the creation flags
# were already used in construction of the h5py.File object
new_wepy_h5._wepy_mode = 'r+'
new_wepy_h5._h5py_mode = 'r+'
# for the settings we need to get rid of the data for interun
# relationships like the continuations, so we reinitialize the
# continuations for the new file
new_wepy_h5._init_continuations()
# close the h5py.File and set the attribute to closed
new_wepy_h5._h5.close()
new_wepy_h5.closed = True
# return the runless WepyHDF5 object
return new_wepy_h5
def link_run(self, filepath, run_idx, continue_run=None, **kwargs):
"""Add a run from another file to this one as an HDF5 external
link.
Parameters
----------
filepath : str
File path to the HDF5 file that the run is on.
run_idx : int
The run index from the target file you want to link.
continue_run : int, optional
The run from the linking WepyHDF5 file you want the target
linked run to continue.
kwargs : dict
Adds metadata (h5py.attrs) to the linked run.
Returns
-------
linked_run_idx : int
The index of the linked run in the linking file.
"""
# link to the external run
ext_run_link = h5py.ExternalLink(filepath, '{}/{}'.format(RUNS, run_idx))
# the run index in this file, as determined by the counter
here_run_idx = self.next_run_idx()
# set the local run as the external link to the other run
self._h5['{}/{}'.format(RUNS, here_run_idx)] = ext_run_link
# run the initialization routines for adding a run
self._add_run_init(here_run_idx, continue_run=continue_run)
run_grp = self._h5['{}/{}'.format(RUNS, here_run_idx)]
# add metadata if given
for key, val in kwargs.items():
if key != RUN_IDX:
run_grp.attrs[key] = val
else:
warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)
return here_run_idx
def link_file_runs(self, wepy_h5_path):
"""Link all runs from another WepyHDF5 file.
This preserves continuations within that file. This will open
the file if not already opened.
Parameters
----------
wepy_h5_path : str
Filepath to the file you want to link runs from.
Returns
-------
new_run_idxs : list of int
The new run idxs from the linking file.
"""
wepy_h5 = WepyHDF5(wepy_h5_path, mode='r')
with wepy_h5:
ext_run_idxs = wepy_h5.run_idxs
continuations = wepy_h5.continuations
# add the runs
new_run_idxs = []
for ext_run_idx in ext_run_idxs:
# link the next run, and get its new run index
new_run_idx = self.link_run(wepy_h5_path, ext_run_idx)
# save that run idx
new_run_idxs.append(new_run_idx)
# copy the continuations over translating the run idxs,
# for each continuation in the other files continuations
for continuation in continuations:
# translate each run index from the external file
# continuations to the run idxs they were just assigned in
# this file
self.add_continuation(new_run_idxs[continuation[0]],
new_run_idxs[continuation[1]])
return new_run_idxs
def extract_run(self, filepath, run_idx,
continue_run=None,
run_slice=None,
**kwargs):
"""Add a run from another file to this one by copying it and
truncating it if necessary.
Parameters
----------
filepath : str
File path to the HDF5 file that the run is on.
run_idx : int
The run index from the target file you want to link.
continue_run : int, optional
The run from the linking WepyHDF5 file you want the target
linked run to continue.
run_slice :
kwargs : dict
Adds metadata (h5py.attrs) to the linked run.
Returns
-------
linked_run_idx : int
The index of the linked run in the linking file.
"""
# close ourselves if not already done, so we can write using
# the lower level API
was_open = False
if not self.closed:
self.close()
was_open = True
# do the copying
# open the other file and get the runs in it and the
# continuations it has
wepy_h5 = WepyHDF5(filepath, mode='r')
with self:
# normalize our HDF5s path
self_path = osp.realpath(self.filename)
# the run index in this file, as determined by the counter
here_run_idx = self.next_run_idx()
# get the group name for the new run in this HDF5
target_grp_path = "/runs/{}".format(here_run_idx)
with wepy_h5:
# link the next run, and get its new run index
new_h5 = wepy_h5.copy_run_slice(run_idx, self_path,
target_grp_path,
run_slice=run_slice,
mode='r+')
# close it since we are done
new_h5.close()
with self:
# run the initialization routines for adding a run, just
# sets some metadata
self._add_run_init(here_run_idx, continue_run=continue_run)
run_grp = self._h5['{}/{}'.format(RUNS, here_run_idx)]
# add metadata if given
for key, val in kwargs.items():
if key != RUN_IDX:
run_grp.attrs[key] = val
else:
warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)
if was_open:
self.open()
return here_run_idx
def extract_file_runs(self, wepy_h5_path,
run_slices=None):
"""Extract (copying and truncating appropriately) all runs from
another WepyHDF5 file.
This preserves continuations within that file. This will open
the file if not already opened.
Parameters
----------
wepy_h5_path : str
Filepath to the file you want to link runs from.
Returns
-------
new_run_idxs : list of int
The new run idxs from the linking file.
"""
if run_slices is None:
run_slices = {}
# open the other file and get the runs in it and the
# continuations it has
wepy_h5 = WepyHDF5(wepy_h5_path, mode='r')
with wepy_h5:
# the run idx in the external file
ext_run_idxs = wepy_h5.run_idxs
continuations = wepy_h5.continuations
# then for each run in it copy them to this file
new_run_idxs = []
for ext_run_idx in ext_run_idxs:
# get the run_slice spec for the run in the other file
run_slice = run_slices[ext_run_idx]
# get the index this run should be when it is added
new_run_idx = self.extract_run(wepy_h5_path, ext_run_idx,
run_slice=run_slice)
# save that run idx
new_run_idxs.append(new_run_idx)
was_closed = False
if self.closed:
self.open()
was_closed = True
# copy the continuations over translating the run idxs,
# for each continuation in the other files continuations
for continuation in continuations:
# translate each run index from the external file
# continuations to the run idxs they were just assigned in
# this file
self.add_continuation(new_run_idxs[continuation[0]],
new_run_idxs[continuation[1]])
if was_closed:
self.close()
return new_run_idxs
def join(self, other_h5):
"""Given another WepyHDF5 file object does a left join on this
file, renumbering the runs starting from this file.
This function uses the H5O function for copying. Data will be
copied not linked.
Parameters
----------
other_h5 : h5py.File
File handle to the file you want to join to this one.
"""
with other_h5 as h5:
for run_idx in h5.run_idxs:
# the other run group handle
other_run = h5.run(run_idx)
# copy this run to this file in the next run_idx group
self.h5.copy(other_run, '{}/{}'.format(RUNS, self.next_run_idx()))
### initialization and data generation
def add_metadata(self, key, value):
"""Add metadata for the whole file.
Parameters
----------
key : str
value : h5py value
h5py valid metadata value.
"""
self._h5.attrs[key] = value
def init_record_fields(self, run_record_key, record_fields):
"""Initialize the settings record fields for a record group in the
settings group.
Save which records are to be considered from a run record group's
datasets to be in the table like representation. This exists
to allow there to large and small datasets for records to be
stored together but allow for a more compact single table like
representation to be produced for serialization.
Parameters
----------
run_record_key : str
Name of the record group you want to set this for.
record_fields : list of str
Names of the fields you want to set as record fields.
"""
record_fields_grp = self.settings_grp[RECORD_FIELDS]
# make a dataset for the sparse fields allowed. this requires
# a 'special' datatype for variable length strings. This is
# supported by HDF5 but not numpy.
vlen_str_dt = h5py.special_dtype(vlen=str)
# create the dataset with the strings of the fields which are records
record_group_fields_ds = record_fields_grp.create_dataset(run_record_key,
(len(record_fields),),
dtype=vlen_str_dt,
maxshape=(None,))
# set the flags
for i, record_field in enumerate(record_fields):
record_group_fields_ds[i] = record_field
def init_resampling_record_fields(self, resampler):
"""Initialize the record fields for this record group.
Parameters
----------
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
"""
self.init_record_fields(RESAMPLING, resampler.resampling_record_field_names())
def init_resampler_record_fields(self, resampler):
"""Initialize the record fields for this record group.
Parameters
----------
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
"""
self.init_record_fields(RESAMPLER, resampler.resampler_record_field_names())
def init_bc_record_fields(self, bc):
"""Initialize the record fields for this record group.
Parameters
----------
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
"""
self.init_record_fields(BC, bc.bc_record_field_names())
def init_warping_record_fields(self, bc):
"""Initialize the record fields for this record group.
Parameters
----------
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
"""
self.init_record_fields(WARPING, bc.warping_record_field_names())
def init_progress_record_fields(self, bc):
"""Initialize the record fields for this record group.
Parameters
----------
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
"""
self.init_record_fields(PROGRESS, bc.progress_record_field_names())
def add_continuation(self, continuation_run, base_run):
"""Add a continuation between runs.
Parameters
----------
continuation_run : int
The run index of the run that will be continuing another
base_run : int
The run that is being continued.
"""
continuations_dset = self.settings_grp[CONTINUATIONS]
continuations_dset.resize((continuations_dset.shape[0] + 1, continuations_dset.shape[1],))
continuations_dset[continuations_dset.shape[0] - 1] = np.array([continuation_run, base_run])
def new_run(self, init_walkers, continue_run=None, **kwargs):
"""Initialize a new run.
Parameters
----------
init_walkers : list of objects implementing the Walker interface
The walkers that will be the start of this run.
continue_run : int, optional
If this run is a continuation of another set which one it is continuing.
kwargs : dict
Metadata to set for the run.
Returns
-------
run_grp : h5py.Group
The group of the newly created run.
"""
# check to see if the continue_run is actually in this file
if continue_run is not None:
if continue_run not in self.run_idxs:
raise ValueError("The continue_run idx given, {}, is not present in this file".format(
continue_run))
# get the index for this run
new_run_idx = self.next_run_idx()
# create a new group named the next integer in the counter
run_grp = self._h5.create_group('{}/{}'.format(RUNS, new_run_idx))
# set the initial walkers group
init_walkers_grp = run_grp.create_group(INIT_WALKERS)
self._add_init_walkers(init_walkers_grp, init_walkers)
# initialize the walkers group
traj_grp = run_grp.create_group(TRAJECTORIES)
# run the initialization routines for adding a run
self._add_run_init(new_run_idx, continue_run=continue_run)
# add metadata if given
for key, val in kwargs.items():
if key != RUN_IDX:
run_grp.attrs[key] = val
else:
warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)
return run_grp
# application level methods for setting the fields for run record
# groups given the objects themselves
def init_run_resampling(self, run_idx, resampler):
"""Initialize data for resampling records.
Initialized the run record group as well as settings for the
fields.
This method also creates the decision group for the run.
Parameters
----------
run_idx : int
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
# set the enumeration of the decisions
self.init_run_resampling_decision(0, resampler)
# set the data fields that can be used for table like records
resampler.resampler_record_field_names()
resampler.resampling_record_field_names()
# then make the records group
fields = resampler.resampling_fields()
grp = self.init_run_record_grp(run_idx, RESAMPLING, fields)
return grp
def init_run_resampling_decision(self, run_idx, resampler):
"""Initialize the decision group for the run resampling records.
Parameters
----------
run_idx : int
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
"""
self.init_run_fields_resampling_decision(run_idx, resampler.DECISION.enum_dict_by_name())
def init_run_resampler(self, run_idx, resampler):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = resampler.resampler_fields()
grp = self.init_run_record_grp(run_idx, RESAMPLER, fields)
return grp
def init_run_warping(self, run_idx, bc):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = bc.warping_fields()
grp = self.init_run_record_grp(run_idx, WARPING, fields)
return grp
def init_run_progress(self, run_idx, bc):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = bc.progress_fields()
grp = self.init_run_record_grp(run_idx, PROGRESS, fields)
return grp
def init_run_bc(self, run_idx, bc):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = bc.bc_fields()
grp = self.init_run_record_grp(run_idx, BC, fields)
return grp
# application level methods for initializing the run records
# groups with just the fields and without the objects
def init_run_fields_resampling(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, RESAMPLING, fields)
return grp
def init_run_fields_resampling_decision(self, run_idx, decision_enum_dict):
"""Initialize the decision group for this run.
Parameters
----------
run_idx : int
decision_enum_dict : dict of str : int
Mapping of decision ID strings to integer representation.
"""
decision_grp = self.run(run_idx).create_group(DECISION)
for name, value in decision_enum_dict.items():
decision_grp.create_dataset(name, data=value)
def init_run_fields_resampler(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, RESAMPLER, fields)
return grp
def init_run_fields_warping(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, WARPING, fields)
return grp
def init_run_fields_progress(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, PROGRESS, fields)
return grp
def init_run_fields_bc(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, BC, fields)
return grp
def init_run_record_grp(self, run_idx, run_record_key, fields):
"""Initialize a record group for a run.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group.
fields : list of str
The names of the fields to set for the record group.
"""
# initialize the record group based on whether it is sporadic
# or continual
if self._is_sporadic_records(run_record_key):
grp = self._init_run_sporadic_record_grp(run_idx, run_record_key,
fields)
else:
grp = self._init_run_continual_record_grp(run_idx, run_record_key,
fields)
# TODO: should've been removed already just double checking things are good without it
# def traj_n_frames(self, run_idx, traj_idx):
# """
# Parameters
# ----------
# run_idx :
# traj_idx :
# Returns
# -------
# """
# return self.traj(run_idx, traj_idx)[POSITIONS].shape[0]
def add_traj(self, run_idx, data, weights=None, sparse_idxs=None, metadata=None):
"""Add a full trajectory to a run.
Parameters
----------
run_idx : int
data : dict of str : arraylike
Mapping of trajectory fields to the data for them to add.
weights : 1-D arraylike of float
The weights of each frame. If None defaults all frames to 1.0.
sparse_idxs : list of int
Cycle indices the data corresponds to.
metadata : dict of str : value
Metadata for the trajectory.
Returns
-------
traj_grp : h5py.Group
"""
# convenient alias
traj_data = data
# initialize None kwargs
if sparse_idxs is None:
sparse_idxs = {}
if metadata is None:
metadata = {}
# positions are mandatory
assert POSITIONS in traj_data, "positions must be given to create a trajectory"
assert isinstance(traj_data[POSITIONS], np.ndarray)
n_frames = traj_data[POSITIONS].shape[0]
# if weights are None then we assume they are 1.0
if weights is None:
weights = np.ones((n_frames, 1), dtype=float)
else:
assert isinstance(weights, np.ndarray), "weights must be a numpy.ndarray"
assert weights.shape[0] == n_frames,\
"weights and the number of frames must be the same length"
# current traj_idx
traj_idx = self.next_run_traj_idx(run_idx)
# make a group for this trajectory, with the current traj_idx
# for this run
traj_grp = self._h5.create_group(
'{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx))
# add the run_idx as metadata
traj_grp.attrs[RUN_IDX] = run_idx
# add the traj_idx as metadata
traj_grp.attrs[TRAJ_IDX] = traj_idx
# add the rest of the metadata if given
for key, val in metadata.items():
if not key in [RUN_IDX, TRAJ_IDX]:
traj_grp.attrs[key] = val
else:
warn("run_idx and traj_idx are used by wepy and cannot be set", RuntimeWarning)
# check to make sure the positions are the right shape
assert traj_data[POSITIONS].shape[1] == self.num_atoms, \
"positions given have different number of atoms: {}, should be {}".format(
traj_data[POSITIONS].shape[1], self.num_atoms)
assert traj_data[POSITIONS].shape[2] == self.num_dims, \
"positions given have different number of dims: {}, should be {}".format(
traj_data[POSITIONS].shape[2], self.num_dims)
# add datasets to the traj group
# weights
traj_grp.create_dataset(WEIGHTS, data=weights, dtype=WEIGHT_DTYPE,
maxshape=(None, *WEIGHT_SHAPE))
# positions
positions_shape = traj_data[POSITIONS].shape
# add the rest of the traj_data
for field_path, field_data in traj_data.items():
# if there were sparse idxs for this field pass them in
if field_path in sparse_idxs:
field_sparse_idxs = sparse_idxs[field_path]
# if this is a sparse field and no sparse_idxs were given
# we still need to initialize it as a sparse field so it
# can be extended properly so we make sparse_idxs to match
# the full length of this initial trajectory data
elif field_path in self.sparse_fields:
field_sparse_idxs = np.arange(positions_shape[0])
# otherwise it is not a sparse field so we just pass in None
else:
field_sparse_idxs = None
self._add_traj_field_data(run_idx, traj_idx, field_path, field_data,
sparse_idxs=field_sparse_idxs)
## initialize empty sparse fields
# get the sparse field datasets that haven't been initialized
traj_init_fields = list(sparse_idxs.keys()) + list(traj_data.keys())
uninit_sparse_fields = set(self.sparse_fields).difference(traj_init_fields)
# the shapes
uninit_sparse_shapes = [self.field_feature_shapes[field] for field in uninit_sparse_fields]
# the dtypes
uninit_sparse_dtypes = [self.field_feature_dtypes[field] for field in uninit_sparse_fields]
# initialize the sparse fields in the hdf5
self._init_traj_fields(run_idx, traj_idx,
uninit_sparse_fields, uninit_sparse_shapes, uninit_sparse_dtypes)
return traj_grp
def extend_traj(self, run_idx, traj_idx, data, weights=None):
"""Extend a trajectory with data for all fields.
Parameters
----------
run_idx : int
traj_idx : int
data : dict of str : arraylike
The data to add for each field of the trajectory. Must all
have the same first dimension.
weights : arraylike
Weights for the frames of the trajectory. If None defaults all frames to 1.0.
"""
if self._wepy_mode == 'c-':
assert self._append_flags[dataset_key], "dataset is not available for appending to"
# convenient alias
traj_data = data
# number of frames to add
n_new_frames = traj_data[POSITIONS].shape[0]
n_frames = self.num_traj_frames(run_idx, traj_idx)
# calculate the new sparse idxs for sparse fields that may be
# being added
sparse_idxs = np.array(range(n_frames, n_frames + n_new_frames))
# get the trajectory group
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
## weights
# if weights are None then we assume they are 1.0
if weights is None:
weights = np.ones((n_new_frames, 1), dtype=float)
else:
assert isinstance(weights, np.ndarray), "weights must be a numpy.ndarray"
assert weights.shape[0] == n_new_frames,\
"weights and the number of frames must be the same length"
# add the weights
weights_ds = traj_grp[WEIGHTS]
# append to the dataset on the first dimension, keeping the
# others the same, if they exist
if len(weights_ds.shape) > 1:
weights_ds.resize( (weights_ds.shape[0] + n_new_frames, *weights_ds.shape[1:]) )
else:
weights_ds.resize( (weights_ds.shape[0] + n_new_frames, ) )
# add the new data
weights_ds[-n_new_frames:, ...] = weights
# add the other fields
for field_path, field_data in traj_data.items():
# if the field hasn't been initialized yet initialize it,
# unless we are in SWMR mode
if not field_path in traj_grp:
# if in SWMR mode you cannot create groups so if we
# are in SWMR mode raise a warning that the data won't
# be recorded
if self.swmr_mode:
warn("New datasets cannot be created while in SWMR mode. The field {} will"
"not be saved. If you want to save this it must be"
"previously created".format(field_path))
else:
feature_shape = field_data.shape[1:]
feature_dtype = field_data.dtype
# not specified as sparse_field, no settings
if (not field_path in self.field_feature_shapes) and \
(not field_path in self.field_feature_dtypes) and \
not field_path in self.sparse_fields:
# only save if it is an observable
is_observable = False
if '/' in field_path:
group_name = field_path.split('/')[0]
if group_name == OBSERVABLES:
is_observable = True
if is_observable:
warn("the field '{}' was received but not previously specified"
" but is being added because it is in observables.".format(field_path))
# save sparse_field flag, shape, and dtype
self._add_sparse_field_flag(field_path)
self._set_field_feature_shape(field_path, feature_shape)
self._set_field_feature_dtype(field_path, feature_dtype)
else:
raise ValueError("the field '{}' was received but not previously specified"
"it is being ignored because it is not an observable.".format(field_path))
# specified as sparse_field but no settings given
elif (self.field_feature_shapes[field_path] is None and
self.field_feature_dtypes[field_path] is None) and \
field_path in self.sparse_fields:
# set the feature shape and dtype since these
# should be 0 in the settings
self._set_field_feature_shape(field_path, feature_shape)
self._set_field_feature_dtype(field_path, feature_dtype)
# initialize
self._init_traj_field(run_idx, traj_idx, field_path, feature_shape, feature_dtype)
# extend it either as a sparse field or a contiguous field
if field_path in self.sparse_fields:
self._extend_sparse_traj_field(run_idx, traj_idx, field_path, field_data, sparse_idxs)
else:
self._extend_contiguous_traj_field(run_idx, traj_idx, field_path, field_data)
## application level append methods for run records groups
def extend_cycle_warping_records(self, run_idx, cycle_idx, warping_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
warping_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, WARPING, cycle_idx, warping_data)
def extend_cycle_bc_records(self, run_idx, cycle_idx, bc_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
bc_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, BC, cycle_idx, bc_data)
def extend_cycle_progress_records(self, run_idx, cycle_idx, progress_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
progress_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, PROGRESS, cycle_idx, progress_data)
def extend_cycle_resampling_records(self, run_idx, cycle_idx, resampling_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
resampling_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, RESAMPLING, cycle_idx, resampling_data)
def extend_cycle_resampler_records(self, run_idx, cycle_idx, resampler_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
resampler_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, RESAMPLER, cycle_idx, resampler_data)
def extend_cycle_run_group_records(self, run_idx, run_record_key, cycle_idx, fields_data):
"""Extend data for a whole records group.
This must have the cycle index for the data it is appending as
this is done for sporadic and continual datasets.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group.
cycle_idx : int
The cycle index these records correspond to.
fields_data : dict of str : arraylike
Mapping of the field name to the values for the records being added.
"""
record_grp = self.records_grp(run_idx, run_record_key)
# if it is sporadic add the cycle idx
if self._is_sporadic_records(run_record_key):
# get the cycle idxs dataset
record_cycle_idxs_ds = record_grp[CYCLE_IDXS]
# number of old and new records
n_new_records = len(fields_data)
n_existing_records = record_cycle_idxs_ds.shape[0]
# make a new chunk for the new records
record_cycle_idxs_ds.resize( (n_existing_records + n_new_records,) )
# add an array of the cycle idx for each record
record_cycle_idxs_ds[n_existing_records:] = np.full((n_new_records,), cycle_idx)
# then add all the data for the field
for record_dict in fields_data:
for field_name, field_data in record_dict.items():
self._extend_run_record_data_field(run_idx, run_record_key,
field_name, np.array([field_data]))
### Analysis Routines
## Record Getters
def run_records(self, run_idx, run_record_key):
"""Get the records for a record group for a single run.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group.
Returns
-------
records : list of namedtuple objects
The list of records for the run's record group.
"""
# wrap this in a list since the underlying functions accept a
# list of records
run_idxs = [run_idx]
return self.run_contig_records(run_idxs, run_record_key)
def run_contig_records(self, run_idxs, run_record_key):
"""Get the records for a record group for the contig that is formed by
the run indices.
This alters the cycle indices for the records so that they
appear to have come from a single run. That is they are the
cycle indices of the contig.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
run_record_key : str
Name of the record group.
Returns
-------
records : list of namedtuple objects
The list of records for the contig's record group.
"""
# if there are no fields return an empty list
record_fields = self.record_fields[run_record_key]
if len(record_fields) == 0:
return []
# get the iterator for the record idxs, if the group is
# sporadic then we just use the cycle idxs
if self._is_sporadic_records(run_record_key):
records = self._run_records_sporadic(run_idxs, run_record_key)
else:
records = self._run_records_continual(run_idxs, run_record_key)
return records
def run_records_dataframe(self, run_idx, run_record_key):
"""Get the records for a record group for a single run in the form of
a pandas DataFrame.
Parameters
----------
run_idx : int
run_record_key : str
Name of record group.
Returns
-------
record_df : pandas.DataFrame
"""
records = self.run_records(run_idx, run_record_key)
return pd.DataFrame(records)
def run_contig_records_dataframe(self, run_idxs, run_record_key):
"""Get the records for a record group for a contig of runs in the form
of a pandas DataFrame.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
run_record_key : str
The name of the record group.
Returns
-------
records_df : pandas.DataFrame
"""
records = self.run_contig_records(run_idxs, run_record_key)
return | pd.DataFrame(records) | pandas.DataFrame |
"""
Binary Transport
================
Example of binary transport in pydeck. This notebook renders 10k points via the web sockets within
a Jupyter notebook if you run with ``generate_vis(notebook_display=True)``
Since binary transfer relies on Jupyter's kernel communication,
note that the .html in the pydeck documentation does not use binary transfer
and is just for illustration.
"""
import pydeck
import pandas as pd
NODES_URL = "https://raw.githubusercontent.com/ajduberstein/geo_datasets/master/social_nodes.csv"
def generate_graph_data(num_nodes, random_seed):
"""Generates a graph of 10k nodes with a 3D force layout
This function is unused but serves as an example of how the data in
this visualization was generated
"""
import networkx as nx # noqa
g = nx.random_internet_as_graph(num_nodes, random_seed)
node_positions = nx.fruchterman_reingold_layout(g, dim=3)
force_layout_df = | pd.DataFrame.from_records(node_positions) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, LSTM, GRU, Dropout
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers
from bayes_opt import BayesianOptimization
import random
import time
from manipulate_data import *
def divide_data(reshaped_x, reshaped_y, look_back, study_periods):
train_size, valid_size, test_size = data_split(study_periods)
train_size -= look_back
train_x = reshaped_x[:train_size, :, :]
train_y = reshaped_y[:train_size]
valid_x = reshaped_x[train_size:train_size + valid_size, :, :]
valid_y = reshaped_y[train_size:train_size + valid_size]
test_x = reshaped_x[train_size + valid_size:, :, :]
test_y = reshaped_y[train_size + valid_size:]
return train_x, train_y, valid_x, valid_y, test_x, test_y
def reshape(Returns, look_back):
# Ensure all data is float
values = Returns.astype('float32')
# Reshape the data
values = values.reshape(np.size(values, 0), 1, 1)
reshaped = np.empty([np.size(values, 0)-look_back+1, 0, np.size(values, 2)])
# Timesteps in order of time
for i in range(1, look_back+1):
reshaped = np.concatenate((reshaped, np.roll(values, look_back-i, axis=0)[look_back-1:, :, :]), axis=1)
return reshaped
def GRU_network(input_dim,layers,dropout):
model_input = Input(shape=input_dim)
x = GRU(layers[0], input_shape=input_dim,\
return_sequences=False if np.size(layers,0)==1 else True)(model_input)
x = Dropout(dropout)(x)
if np.size(layers,0) > 2:
for i in range(1,np.size(layers,0)-1):
x = GRU(layers[i], return_sequences=True)(x)
x = Dropout(dropout)(x)
if np.size(layers,0) > 1:
x = GRU(layers[-1])(x)
x = Dropout(dropout)(x)
model_output = Dense(1)(x)
return Model(inputs=model_input, outputs=model_output)
def LSTM_network(input_dim,layers,dropout):
model_input = Input(shape=input_dim)
x = LSTM(layers[0], input_shape=input_dim,\
return_sequences=False if np.size(layers,0)==1 else True)(model_input)
x = Dropout(dropout)(x)
if np.size(layers,0) > 2:
for i in range(1,np.size(layers,0)-1):
x = LSTM(layers[i], return_sequences=True)(x)
x = Dropout(dropout)(x)
if np.size(layers,0) > 1:
x = LSTM(layers[-1])(x)
x = Dropout(dropout)(x)
model_output = Dense(1)(x)
return Model(inputs=model_input, outputs=model_output)
def train_recurrent_model(cell_type, number_of_study_periods, study_periods, frequency_index, frequencies, frequencies_number_of_samples):
class recurrent_model():
def __init__(self, cell_type, number_of_study_periods, study_periods, frequency_index, frequencies, frequencies_number_of_samples):
self.cell_type = cell_type
self.number_of_study_periods = number_of_study_periods
self.study_periods = study_periods
self.frequency_index = frequency_index
self.frequencies = frequencies
self.frequencies_number_of_samples = frequencies_number_of_samples
self.recurrent_start_time = time.time()
self.init_points = 1
self.n_iter = 2
self.model_results = np.ones((number_of_study_periods,4))*np.Inf
self.model_names = [None]*number_of_study_periods
self.model_predictions = np.zeros((number_of_study_periods,study_periods.shape[2]))
self.model_predictions[:] = np.nan
def black_box_function(self, look_back, batch_size, optimizer, dropout, n_layers, first_layer, layer_decay, learning_rate):
# start_time = time.time()
# Convert hyperparameters
look_back = int(look_back)
batch_size = 2**int(batch_size)
n_epochs = batch_size
optimizer = ['sgd','rmsprop','adam'][int(optimizer)]
n_layers = int(n_layers)
first_layer = int(first_layer)
learning_rate = np.exp(-learning_rate)
layers = []
for k in range(0,n_layers):
layers = np.append(layers, [first_layer*layer_decay**k+0.5]).astype(int)
layers = np.clip(layers,1,None).tolist()
# Reshape the data
Reshaped = reshape(self.study_periods[0,self.period], look_back)
# Get X and Y
reshaped_x = Reshaped[:-1, :, :]
reshaped_y = Reshaped[1:, -1, 0]
# Divide in train, valid and test set
train_x, train_y, valid_x, valid_y, test_x, test_y =\
divide_data(reshaped_x, reshaped_y, look_back, self.study_periods)
mean = np.mean(np.append(train_x[0], train_y))
std = np.std(np.append(train_x[0], train_y))
train_norm_x, valid_norm_x, test_norm_x = (train_x-mean)/std, (valid_x-mean)/std, (test_x-mean)/std
train_norm_y, valid_norm_y, test_norm_y = (train_y-mean)/std, (valid_y-mean)/std, (test_y-mean)/std
train_valid_x = np.concatenate((train_x, valid_x))
train_valid_y = np.concatenate((train_y, valid_y))
mean_tv = np.mean(np.append(train_valid_x[0], train_valid_y))
std_tv = np.std(np.append(train_valid_x[0], train_valid_y))
train_valid_norm_x, test_norm_tv_x = (train_valid_x-mean_tv)/std_tv, (test_x-mean_tv)/std_tv
train_valid_norm_y, test_norm_tv_y = (train_valid_y-mean_tv)/std_tv, (test_y-mean_tv)/std_tv
# Name the model
NAME = 'look_back-'+str(look_back)+\
', n_epochs-'+str(n_epochs)+\
', batch_size-'+str(batch_size)+\
', optimizer-'+optimizer+\
', layers-'+str(layers)+\
', dropout-'+str(dropout)
# print('Model name:', NAME)
#Design model
if optimizer == 'sgd':
optimizer_k = optimizers.SGD(lr=learning_rate)
elif optimizer == 'rmsprop':
optimizer_k = optimizers.RMSprop(lr=learning_rate)
elif optimizer == 'adam':
optimizer_k = optimizers.Adam(lr=learning_rate)
input_dim = (look_back, np.size(Reshaped,2))
if cell_type == 'LSTM':
model = LSTM_network(input_dim,layers,dropout)
else:
model = GRU_network(input_dim,layers,dropout)
model.compile(loss='mse', optimizer=optimizer)
# Print model summary
#model.summary()
# Train model
# Fit network
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=8,\
verbose=0, mode='auto', baseline=None, restore_best_weights=True)
# print(f'Time: {np.round((time.time()-start_time)/60,2)}')
history = model.fit(train_norm_x, train_norm_y, epochs=n_epochs, batch_size=batch_size,\
validation_data=(valid_norm_x, valid_norm_y), verbose=0, shuffle=False, callbacks=[earlystopping])
# print(f'Time: {np.round((time.time()-start_time)/60,2)}')
# plt.plot(history.history['loss'],label='loss')
# plt.plot(history.history['val_loss'],label='val loss')
# plt.legend()
# plt.show()
mse = np.mean(np.square((model.predict(valid_norm_x)*std+mean).flatten()-valid_y))
if mse < self.model_results[self.period,1]:
self.model_names[self.period] = NAME
self.model_results[self.period, 0] = np.mean(np.square((model.predict(train_norm_x)*std+mean).flatten()-train_y))
self.model_results[self.period, 1] = mse
#Design model
del model
K.clear_session()
input_dim = (look_back, np.size(Reshaped,2))
if cell_type == 'LSTM':
model = LSTM_network(input_dim,layers,dropout)
else:
model = GRU_network(input_dim,layers,dropout)
model.compile(loss='mse', optimizer=optimizer)
model.fit(train_valid_norm_x, train_valid_norm_y, epochs=n_epochs, batch_size=batch_size,\
validation_data=(test_norm_tv_x, test_norm_tv_y), verbose=0, shuffle=False, callbacks=[earlystopping])
self.model_results[self.period, 2] = np.mean(np.square((model.predict(train_valid_norm_x)*std_tv+mean_tv)\
.flatten()-train_valid_y))
self.model_results[self.period, 3] = np.mean(np.square((model.predict(test_norm_tv_x)*std_tv+mean_tv).flatten()-test_y))
self.model_predictions[self.period, -len(test_x):] = (model.predict(test_norm_tv_x)*std_tv+mean_tv)[:,0]
# Clear model
del model
K.clear_session()
# print(f'Time: {np.round((time.time()-start_time)/60,2)}')
return -mse
def train(self):
for self.period in range(self.number_of_study_periods):
print(f'Period: {self.period}')
pbounds = {'look_back' : (1, 40),\
'batch_size' : (4, 10),\
'optimizer' : (0, 2),\
'dropout' : (0, 0.5),\
'n_layers' : (1, 4),\
'first_layer' : (1, 40),\
'layer_decay' : (0.3, 1),\
'learning_rate' : (0, 15)}
optimizer = BayesianOptimization(f=self.black_box_function, pbounds=pbounds, random_state=None)
start_time = time.time()
optimizer.maximize(init_points=self.init_points, n_iter=self.n_iter)
print(f'Period time: {np.round((time.time()-start_time)/60,2)} minutes')
pd.DataFrame(self.model_names).to_csv('results/'+str(self.cell_type)+'_names_frequency_'\
+str(self.frequencies[self.frequency_index])+'.csv',index=False, header=False)
| pd.DataFrame(self.model_results) | pandas.DataFrame |
Subsets and Splits