prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np, pandas as pd, dask.bag as db
from scipy.stats import normaltest
import time
def test_normality(x,alpha = 0.05):
'''performs D'Agostino and Pearson's omnibus test for normality.
Returns p, True if significantly different from normal distribution'''
_, p = normaltest(x)
is_significant = p < alpha
return p, is_significant
def print_test_normality(x,alpha = 0.05):
_, p = normaltest(x)
is_significant = p < alpha
print(f"p = {p:.10g}")
if is_significant: # null hypothesis: x comes from a normal distribution
print("\tThe null hypothesis can be rejected. The data is significantly different from the normal distribution.")
else:
print("\tThe null hypothesis cannot be rejected. The data is not significantly different from the normal distribution.")
def bootstrap_mean(x,num_samples=1000):
mean_values=np.zeros(num_samples)
sizex=x.shape[0]
for i in range(num_samples):
randint_values=np.random.randint(low=0, high=sizex, size=sizex, dtype=int)
x_bootstrap=x[randint_values]
mean_values[i]=np.mean(x_bootstrap)
return mean_values
def bootstrap_stdev_of_mean(x,num_samples=1000):
mean_values=bootstrap_mean(x,num_samples=num_samples)
sig=np.std(mean_values)
return sig
def bootstrap_95CI_Delta_mean(x,num_samples=1000):
mean_values=bootstrap_mean(x,num_samples=num_samples)
sig=np.std(mean_values)
_, p = normaltest(mean_values)
Delta_mean=1.96*sig
return Delta_mean,p
def bin_and_bootstrap_xy_values(x,y,xlabel,ylabel,bins='auto',min_numobs=None,num_bootstrap_samples=1000,npartitions=1,**kwargs):
'''see docstring for bin_and_bootstrap_xy_values_parallel'''
type_in=type(np.ndarray([]))
if type(x)!=type_in:
raise "InputError: x and y must have type np.ndarray!"
if type(y)!=type_in:
raise "InputError: x and y must have type np.ndarray!"
R_values=x
dRdt_values=y
num_samples=num_bootstrap_samples
#implement measure of dRdt that explicitely bins by radius
counts,r_edges=np.histogram(R_values,bins=bins)
range_values=r_edges
if min_numobs is None:
min_numobs=np.mean(counts)/8
r_lst=[];drdt_lst=[];Delta_r_lst=[];Delta_drdt_lst=[];
count_lst=[];p_r_lst=[];p_drdt_lst=[]
if npartitions==1:
#for a single core in base python
for j in range(r_edges.shape[0]-1):
numobs=counts[j]
if numobs>min_numobs:
boo=(R_values>=r_edges[j])&(R_values<r_edges[j+1])
r_values=R_values[boo]
drdt_values=dRdt_values[boo]
#compute mean values in bin
r=np.mean(r_values)
drdt=np.mean(drdt_values)
# compute 95% CI for mean
Delta_r,p_r=bootstrap_95CI_Delta_mean(r_values,
num_samples=num_samples)
Delta_drdt,p_drdt=bootstrap_95CI_Delta_mean(drdt_values,
num_samples=num_samples)
#append results to list
r_lst.append(r)
drdt_lst.append(drdt)
Delta_r_lst.append(Delta_r)
Delta_drdt_lst.append(Delta_drdt)
p_r_lst.append(p_r)
p_drdt_lst.append(p_drdt)
count_lst.append(numobs)
r_values=np.array(r_lst)
drdt_values=np.array(drdt_lst)
Delta_r_values=np.array(Delta_r_lst)
Delta_drdt_values=np.array(Delta_drdt_lst)
p_r_values=np.array(p_r_lst)
p_drdt_values=np.array(p_drdt_lst)
count_values=np.array(count_lst)
dict_out={
xlabel:r_values,
ylabel:drdt_values,
f'Delta_{xlabel}':Delta_r_values,
f'Delta_{ylabel}':Delta_drdt_values,
f'p_{xlabel}':p_r_values,
f'p_{ylabel}':p_drdt_values,
'counts':count_values
}
return pd.DataFrame(dict_out)
else:
#perform in parallel on multiple cores
return bin_and_bootstrap_xy_values_parallel(x,
y,
xlabel,
ylabel,
bins=bins,
min_numobs=min_numobs,
num_bootstrap_samples=num_bootstrap_samples,
npartitions=npartitions,**kwargs)
###########################################
# Parallel implementation on multiple cores
###########################################
def get_routine_bootstrap_bin(x_values,y_values,x_bin_edges,counts,num_samples=100,min_numobs=100,**kwargs):
'''x_values,y_values,x_bin_edges are 1 dimensional numpy arrays.
returns the function, routine_bootstrap_bin.'''
type_in=type(np.ndarray([]))
if type(x_values)!=type_in:
raise "InputError: x and y must have type np.ndarray!"
if type(y_values)!=type_in:
raise "InputError: x and y must have type np.ndarray!"
R_values=x_values
dRdt_values=y_values
r_edges=x_bin_edges
def routine_bootstrap_bin(bin_id):
j=bin_id
numobs=counts[j]
if numobs>min_numobs:
boo=(R_values>=r_edges[j])&(R_values<r_edges[j+1])
r_values=R_values[boo]
drdt_values=dRdt_values[boo]
#compute mean values in bin
r=np.mean(r_values)
drdt=np.mean(drdt_values)
# compute 95% CI for mean
Delta_r,p_r=bootstrap_95CI_Delta_mean(r_values,
num_samples=num_samples)
Delta_drdt,p_drdt=bootstrap_95CI_Delta_mean(drdt_values,
num_samples=num_samples)
return np.array((r,drdt,Delta_r,Delta_drdt,p_r,p_drdt,numobs))
else:
return None
return routine_bootstrap_bin
def bin_and_bootstrap_xy_values_parallel(x,
y,
xlabel='x',
ylabel='y',
bins='auto',
min_numobs=None,
num_bootstrap_samples=1000,
npartitions=1,
use_test=True,
test_val=0,printing=False,**kwargs):
'''
bin_and_bootstrap_xy_values_parallel returns a pandas.DataFrame instance with the following columns
columns=[xlabel,ylabel,f'Delta_{xlabel}',f'Delta_{ylabel}',f'p_{xlabel}',f'p_{ylabel}','counts']
Output Field for a given x bin include:
- y : the simple mean value of y
- Delta_y : difference between the mean value and the edge of the 95% confidence interval of the mean value
- p_y : p value estimating the liklihood to reject the null hypothesis which claims the boostrapped distribution is normally distributed.
- counts : the number of observations in this bin
Arguments x and y are assumed to be 1 dimensional numpy.array instances.
If p_y reliably has values less than 0.05, then consider increasing num_bootstrap_samples
bin_and_bootstrap_xy_values_parallel passes the kwarg, bins, to numpy.histogram
to generate x bins, it then passes each x bin to npartitions dask workers, each of which
selects the y values that correspond to the x values within its respective bin. With these
x and y values in hand, that worker bootstraps num_bootstrap_samples samples of approximations of
the mean value for those x and y values.
A bin is ignored if it contains no more than min_numobs
observations. If min_numobs=None, then min_numobs=np.mean(counts)/8, where counts is the array of
counts in each bin.
'''
x_values=x
y_values=y
num_samples=num_bootstrap_samples
counts,x_bin_edges=np.histogram(x_values,bins=bins)
bin_id_lst=list(range(x_bin_edges.shape[0]-1))
if min_numobs is None:
min_numobs=np.mean(counts)/8
#bake method to bootstrap 95%CI of mean of y conditioned on x being within a given bin
routine_bootstrap_bin=get_routine_bootstrap_bin(x_values,y_values,x_bin_edges,counts,num_samples=num_samples,min_numobs=min_numobs)
def routine(input_val):
try:
return routine_bootstrap_bin(input_val)
except Exception as e:
return f"Warning: something went wrong, {e}"
#optionally test the routine
if use_test:
retval=routine(test_val)
#all CPU version
b = db.from_sequence(bin_id_lst, npartitions=npartitions).map(routine)
start = time.time()
retval = list(b)
if printing:
print(f"run time for bootstrapping was {time.time()-start:.2f} seconds.")
array_out=np.stack([x for x in retval if x is not None])
columns=[xlabel,ylabel,f'Delta_{xlabel}',f'Delta_{ylabel}',f'p_{xlabel}',f'p_{ylabel}','counts']
# columns=['r','drdt','Delta_r','Delta_drdt','p_r','p_drdt','counts']
df=
|
pd.DataFrame(data=array_out,columns=columns)
|
pandas.DataFrame
|
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='',
verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_mamm(self):
"""
# unit test for function ld50_bg_mamm (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Granular',
'Broadcast-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_mamm(self):
"""
# unit test for function ld50_rl_mamm (LD50ft-2 for Row/Band/In-furrow liquid mammals)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 0.6119317, 0.0024497], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular',
'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
result = trex_empty.ld50_rl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_mamm(self):
"""
# unit test for function ld50_rg_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([33.9737, 7.192681, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm =
|
pd.Series([350., 275., 410.], dtype='float')
|
pandas.Series
|
"""
Applying Box-Jenkins Forecasting Methodology
to Predict Massachusetts Cannabis Data
Copyright (c) 2021 Cannlytics and the Cannabis Data Science Meetup Group
Authors: <NAME> <<EMAIL>>
Created: 10/6/2021
Updated: 11/3/2021
License: MIT License <https://opensource.org/licenses/MIT>
References:
- Time Series forecasting using Auto ARIMA in Python
https://towardsdatascience.com/time-series-forecasting-using-auto-arima-in-python-bb83e49210cd
Data Sources:
MA Cannabis Control Commission
- Retail Sales by Date and Product Type: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/xwf2-j7g9
- Approved Massachusetts Licensees: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/hmwt-yiqy
- Average Monthly Price per Ounce for Adult-Use Cannabis: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/rqtv-uenj
- Plant Activity and Volume: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/j3q7-3usu
- Weekly sales by product type: https://dev.socrata.com/foundry/opendata.mass-cannabis-control.com/87rp-xn9v
Fed Fred
- MA Gross Domestic Product: https://fred.stlouisfed.org/series/MANQGSP
- MA Civilian Labor Force: https://fred.stlouisfed.org/series/MALF
- MA All Employees: https://fred.stlouisfed.org/series/MANA
- MA Avg. Weekly Wage: https://fred.stlouisfed.org/series/LES1252881600Q
- MA Minimum Wage: https://fred.stlouisfed.org/series/STTMINWGMA
- MA Population: https://fred.stlouisfed.org/series/MAPOP
"""
from dotenv import dotenv_values
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import pmdarima as pm
import requests
import seaborn as sns
# Internal imports
from utils import (
forecast_arima,
format_millions,
reverse_dataframe,
set_training_period,
)
#--------------------------------------------------------------------------
# Get MA public cannabis data.
#--------------------------------------------------------------------------
# Setup Socrata API, get the App Token, and define the headers.
config = dotenv_values('../.env')
app_token = config.get('APP_TOKEN', None)
headers = {'X-App-Token': app_token}
base = 'https://opendata.mass-cannabis-control.com/resource'
# Get production stats (total employees, total plants, etc.) j3q7-3usu
url = f'{base}/j3q7-3usu.json'
params = {'$limit': 2000, '$order': 'activitysummarydate DESC'}
response = requests.get(url, headers=headers, params=params)
production = pd.DataFrame(response.json(), dtype=float)
production = reverse_dataframe(production)
# Calculate sales difference.
production['sales'] = production['salestotal'].diff()
# FIX: Fix outlier that appears to have an extra 0.
outlier = production.loc[production.sales >= 10000000]
production.at[outlier.index, 'sales'] = 0
# FIX: Remove negative values.
negatives = production.loc[production.sales < 0]
production.at[negatives.index, 'sales'] = 0
# Aggregate daily production data into monthly and quarterly averages.
production['date'] = pd.to_datetime(production['activitysummarydate'])
production.set_index('date', inplace=True)
monthly_avg_production = production.resample('M').mean()
quarterly_avg_production = production.resample('Q').mean()
monthly_total_production = production.resample('M').sum()
quarterly_total_production = production.resample('Q').sum()
# Get licensees data.
url = f'{base}/hmwt-yiqy.json'
params = {'$limit': 10000, '$order': 'app_create_date DESC'}
response = requests.get(url, headers=headers, params=params)
licensees = pd.DataFrame(response.json(), dtype=float)
# Get the monthly average price per ounce.
url = f'{base}/rqtv-uenj.json'
params = {'$limit': 10000, '$order': 'date DESC'}
response = requests.get(url, headers=headers, params=params)
prices = pd.DataFrame(response.json(), dtype=float)
prices = reverse_dataframe(prices)
prices.set_index('date', inplace=True)
# Calculate the average price per specific quantity.
price_per_gram = prices.avg_1oz.astype(float).divide(28).round(2)
price_per_teenth = prices.avg_1oz.astype(float).divide(16).round(2)
price_per_eighth = prices.avg_1oz.astype(float).divide(8).round(2)
price_per_quarter = prices.avg_1oz.astype(float).divide(4).round(2)
# Get the products.
url = f'{base}/xwf2-j7g9.json'
params = {'$limit': 10000, '$order': 'saledate DESC'}
response = requests.get(url, headers=headers, params=params)
products = pd.DataFrame(response.json(), dtype=float)
products = reverse_dataframe(products)
products.set_index('saledate', inplace=True)
# Plot sales by product type.
product_types = list(products.productcategoryname.unique())
for product_type in product_types:
print(product_type)
products.loc[products.productcategoryname == product_type].dollartotal.plot()
#--------------------------------------------------------------------------
# Estimate sales, plants, employees in 2021 and 2022,
#--------------------------------------------------------------------------
# Specifiy training time periods.
train_start = '2020-06-01'
train_end = '2021-10-25'
# Create weekly series.
weekly_sales = production.sales.resample('W-SUN').sum()
weekly_plants = production.total_planttrackedcount.resample('W-SUN').mean()
weekly_employees = production.total_employees.resample('W-SUN').mean()
# Define forecast horizon.
forecast_horizon = pd.date_range(
pd.to_datetime(train_end),
periods=60,
freq='w'
)
# Create month fixed effects (dummy variables),
# excluding 1 month (January) for comparison.
month_effects = pd.get_dummies(weekly_sales.index.month)
month_effects.index = weekly_sales.index
month_effects = set_training_period(month_effects, train_start, train_end)
forecast_month_effects = pd.get_dummies(forecast_horizon.month)
del month_effects[1]
try:
del forecast_month_effects[1]
except:
pass
# Estimate sales forecasting model.
model = pm.auto_arima(
set_training_period(weekly_sales, train_start, train_end),
X=month_effects,
start_p=0,
d=0,
start_q=0,
max_p=6,
max_d=6,
max_q=6,
seasonal=True,
start_P=0,
D=0,
start_Q=0,
max_P=6,
max_D=6,
max_Q=6,
information_criterion='bic',
alpha=0.2,
)
print(model.summary())
# Make sales forecasts.
sales_forecast, sales_conf = forecast_arima(model, forecast_horizon, X=month_effects)
#--------------------------------------------------------------------------
# Visualize the forecasts.
#--------------------------------------------------------------------------
# Define the plot style.
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'Times New Roman'
palette = sns.color_palette('tab10')
primary_color = palette[0]
secondary_color = palette[-1]
# Plot sales forecast.
fig, ax = plt.subplots(figsize=(15, 5))
weekly_sales[-25:-1].plot(ax=ax, color=primary_color, label='Historic')
sales_forecast.plot(ax=ax, color=secondary_color, style='--', label='Forecast')
plt.fill_between(
sales_forecast.index,
sales_conf[:, 0],
sales_conf[:, 1],
alpha=0.1,
color=secondary_color,
)
plt.legend(loc='lower left', fontsize=18)
plt.title('Massachusetts Cannabis Sales Forecast', fontsize=24, pad=10)
yaxis_format = FuncFormatter(format_millions)
ax.yaxis.set_major_formatter(yaxis_format)
plt.setp(ax.get_yticklabels()[0], visible=False)
plt.xlabel('')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.show()
#--------------------------------------------------------------------------
# Estimate sales per retialer, plants per cultivator,
# and employees per licensee.
#--------------------------------------------------------------------------
# Find total retailers and cultivators.
retailers = licensees.loc[licensees.license_type == 'Marijuana Retailer']
cultivators = licensees.loc[licensees.license_type == 'Marijuana Cultivator']
total_retailers = len(retailers)
total_cultivators = len(cultivators)
total_licensees = len(licensees)
# Create total licensees series.
production['total_retailers'] = 0
production['total_cultivators'] = 0
production['total_licensees'] = 0
for index, _ in production.iterrows():
timestamp = index.isoformat()
production.at[index, 'total_retailers'] = len(licensees.loc[
(licensees.license_type == 'Marijuana Retailer') &
(licensees.app_create_date <= timestamp)
])
production.at[index, 'total_cultivators'] = len(licensees.loc[
(licensees.license_type == 'Marijuana Cultivator') &
(licensees.app_create_date <= timestamp)
])
production.at[index, 'total_licensees'] = len(licensees.loc[
(licensees.app_create_date <= timestamp)
])
# Create weekly averages.
weekly_total_retailers = production['total_retailers'].resample('W-SUN').mean()
weekly_total_cultivators = production['total_cultivators'].resample('W-SUN').mean()
weekly_total_licensees = production['total_licensees'].resample('W-SUN').mean()
# Plot sales per retailer.
sales_per_retailer = weekly_sales / weekly_total_retailers
sales_per_retailer.plot()
plt.show()
# Plot plants per cultivator.
plants_per_cultivator = weekly_plants / weekly_total_cultivators
plants_per_cultivator.plot()
plt.show()
# Plot employees per licensee.
employees_per_license = weekly_employees / weekly_total_licensees
employees_per_license.plot()
plt.show()
#--------------------------------------------------------------------------
# Forecast sales, plants grown, and employees using Box-Jenkins methodology.
# Optional: Also forecast total retailers, total cultivators, and total licensees.
# Optional: Attempt to forecast with daily series with day-of-the-week fixed effects.
# Attempt to forecast with weekly series with month fixed effects.
#--------------------------------------------------------------------------
# Estimate plants forecasting model and make plant forecasts.
model = pm.auto_arima(
weekly_plants[73:-1],
X=month_effects[73:-1],
start_p=0,
d=0,
start_q=0,
max_p=6,
max_d=6,
max_q=6,
seasonal=True,
start_P=0,
D=0,
start_Q=0,
max_P=6,
max_D=6,
max_Q=6,
information_criterion='bic',
alpha=0.2,
# m=12,
)
print(model.summary())
plants_forecast, plants_conf = model.predict(
n_periods=len(forecast_horizon),
return_conf_int=True,
X=forecast_month_effects,
)
plants_forecast = pd.Series(plants_forecast)
plants_forecast.index = forecast_horizon
weekly_plants[73:-1].plot()
plants_forecast.plot()
plt.show()
# Estimate employees forecasting model and make employees forecasts.
model = pm.auto_arima(
weekly_employees[73:-1],
X=month_effects[73:-1],
start_p=0,
d=1,
start_q=0,
max_p=6,
max_d=6,
max_q=6,
seasonal=True,
start_P=0,
D=0,
start_Q=0,
max_P=6,
max_D=6,
max_Q=6,
information_criterion='bic',
alpha=0.2,
# m=12,
)
print(model.summary())
employees_forecast, plants_conf = model.predict(
n_periods=len(forecast_horizon),
return_conf_int=True,
X=forecast_month_effects,
)
employees_forecast =
|
pd.Series(employees_forecast)
|
pandas.Series
|
#%% Imports
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import tree
from os.path import dirname, abspath
import os
import xgboost as xgb
import pandas as pd
import numpy as np
from sklearn import linear_model
d = dirname(dirname(abspath(__file__)))
os.chdir(d)
import starboost_up
path_data = r'E:\datasetsDART'
from pathlib import Path
import pickle
#%% Create data
def load_data(data_name = 'breast_cancer', drop = True, path_data = r'E:\datasetsDART'):
"""
Options for data_name:
- breast_cancer
- iris
- wine
- CT
- student
"""
if data_name == 'breast_cancer':
X, y = datasets.load_breast_cancer(return_X_y=True)
problem_type = 'classification'
elif data_name == 'iris':
X, y = datasets.load_iris(return_X_y=True)
problem_type = 'multi_classification'
elif data_name == 'wine':
X, y = datasets.load_wine(return_X_y=True)
X = X[y<2,:]
y = y[y<2]
problem_type = 'classification'
elif data_name == 'CT':
data_all = pd.read_csv(path_data+ '\slice_localization_data_regression\slice_localization_data.csv')
y = data_all['reference']
cols = data_all.describe().columns
remove_cols = [col for col in cols if col not in ('reference','patientId')]
X = data_all[remove_cols]
split_id = data_all['patientId']
#X = pd.concat([X, data_all_patient], 1)
problem_type = 'regression'
return X,y, problem_type, split_id
elif data_name == 'student':
data_all = pd.read_csv(path_data+ '\student\student-mat.csv', sep=';')
cols = data_all.describe().columns
remove_cols = [col for col in cols if col not in ['G1','G2','G3']]
y = data_all['G3']
X = pd.get_dummies(data_all[remove_cols], drop_first= drop)
problem_type = 'regression'
elif data_name == 'mushroom':
data_all = pd.read_csv(path_data+ '\MushroomDataset\MushroomDataset\secondary_data.csv', sep=';')
cols = data_all.describe().columns
remove_cols = [col for col in cols if col != 'class']
y = pd.get_dummies(data_all['class'] , drop_first = True)
X = pd.get_dummies(data_all[remove_cols], drop_first= drop)
problem_type = 'classification'
else:
raise NameError('Unknown data')
return X,y, problem_type
data_type = input('data type (or def)')
if data_type == 'def':
X,y, problem_type = load_data( 'breast_cancer', True , path_data)
elif data_type == 'CT':
X,y, problem_type, split_id = load_data(data_type, True , path_data)
else:
X,y, problem_type = load_data(data_type, True , path_data)
#X, y = datasets.load_breast_cancer(return_X_y=True)
#%% funmctions
def micro_f1_score(y_true, y_pred):
"""
Calculate micro_f1_score
"""
return metrics.f1_score(y_true, y_pred, average='micro')
def rmse(y_true, y_pred):
"""
Calculate RMSE
"""
return metrics.mean_squared_error(y_true, y_pred) ** 0.5
def create_inner_path(list_path):
if isinstance(list_path, list):
real_path = ''
for el in list_path:
real_path = real_path + '/' + str(el)
else:
real_path = list_path
Path(real_path).mkdir(parents=True, exist_ok=True)
return real_path
def split_train_test_val(X,y, test_ratio = 0.2, val_ratio = 0, split_id = None, rand_seed = 0):
"""
A function to split train and test, with an option to use an id column for reference (for instace, in case of different subjects etc.)
X: data: [samples X features matrix]
y: labels vector: [samples]
test_ratio, val_ratio = ratios of the test set and validation set. If no val -> val_ratio =0
split_id: id to split according to. [samples]
"""
np.random.seed(0)
if val_ratio > 0 and test_ratio == 0:
val_ratio, test_ratio = test_ratio, val_ratio
if test_ratio + val_ratio > 1:
raise ValueError('Test and Val ratios should be <1')
if split_id:
n_test = int(np.floor(test_ratio *len(np.unique(split_id))))
choose_test = np.random.choice(np.unique(split_id), n_test)
X_test = X[[spl for spl in split_id if spl in choose_test] ,:]
y_test = y[[spl for spl in split_id if spl in choose_test]]
remained_id = [id_val for id_val in np.unique(split_id) if id_val not in choose_test]
if val_ratio > 0:
n_val = int(np.floor(val_ratio * len(np.unique(split_id))))
choose_val = np.random.choice(np.unique(remained_id), n_val)
X_val = X[[spl for spl in split_id if spl in choose_val] ,:]
y_val = y[[spl for spl in split_id if spl in choose_val]]
remained_id = [id_val for id_val in np.unique(remained_id) if id_val not in choose_val]
X_train = X[[spl for spl in split_id if spl in remained_id] ,:]
y_train = y[[spl for spl in split_id if spl in remained_id]]
choose_train = remained_id
if val_ratio > 0:
return X_train, y_train , X_val, y_val, X_test, y_test, choose_train, choose_val,choose_test
else: X_train, y_train , X_test, y_test, choose_train,choose_test
else:
if val_ratio == 0:
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=test_ratio, random_state=42)
return X_train, y_train , X_test, y_test
else:
X_train, X_test_val, y_train, y_test_val = model_selection.train_test_split(X, y, test_size=test_ratio + val_ratio, random_state=42)
X_test, X_val, y_test, y_val = model_selection.train_test_split(X_test_val, y_test_val, test_size = val_ratio /(test_ratio + val_ratio), random_state=42)
return X_train, y_train , X_val, y_val, X_test, y_test
def run_model(X_fit, y_fit, X_val, y_val, type_model = 'classification', max_depth = 1, n_estimators = 50,
learning_rate = 0.1, early_stopping_rounds = False, col_sampling = 0.8,
is_DART = True, DART_params = {'n_drop':1, 'dist_drop': 'random' , 'min_1':True, 'weights_list' : None},
limit_type = False,
):
"""
"""
if type_model == 'classification':
model = starboost_up.boosting.BoostingClassifier(loss=starboost_up.losses.LogLoss(),
base_estimator= xgb.XGBRegressor(max_depth = 1), base_estimator_is_tree=True,
n_estimators=n_estimators, init_estimator=starboost_up.init.LogOddsEstimator(),
learning_rate= learning_rate, row_sampling=0.8, col_sampling=col_sampling, eval_metric=micro_f1_score,
early_stopping_rounds=early_stopping_rounds, random_state=42, type_class ='classification',
is_DART = is_DART, DART_params = DART_params )
elif type_model == 'regression':
model = starboost_up.boosting.BoostingRegressor(
loss=sb.losses.L2Loss(),
base_estimator=tree.DecisionTreeRegressor(max_depth= max_depth),
base_estimator_is_tree=True,
n_estimators=n_estimators, init_estimator=linear_model.LinearRegression(),
learning_rate= learning_rate, row_sampling=0.8,
col_sampling=col_sampling, eval_metric=rmse,
early_stopping_rounds=early_stopping_rounds, random_state=42 ,
is_DART = is_DART, DART_params = DART_params)
else:
raise NameError('Unknown problem type') #
model = model.fit(X_fit, y_fit, eval_set=(X_val, y_val))
y_pred = model.predict(X_val)#_proba
#
if type_model == 'regression': eva = rmse(y_val, y_pred)
else: eva = metrics.roc_auc_score(y_val, y_pred)
evas = {}; inter_predictions = {}
for ib_num, ib in enumerate(model.iter_predict(X_val)):
inter_predictions[ib_num] = ib
if type_model == 'regression': evas[ib_num] = rmse(y_val,ib)
else: evas[ib_num] = metrics.roc_auc_score(y_val,ib)
return model, y_pred, eva, evas, inter_predictions
def run_model_x_y(X, y , test_ratio = 0.2, split_id = None, type_model = 'classification', max_depth = 1, n_estimators = 50,
learning_rate = 0.1, early_stopping_rounds = False, col_sampling = 0.8,
is_DART = True, DART_params = {'n_drop':1, 'dist_drop': 'random' , 'min_1':True, 'weights_list' : None}, limit_type = False):
if split_id:
X_fit, y_fit , X_val, y_val, choose_train, choose_test = split_train_test_val(X,y, test_ratio = test_ratio, val_ratio = 0, split_id = split_id, rand_seed = 0)
else:
X_fit, y_fit , X_val, y_val = split_train_test_val(X,y, test_ratio = test_ratio, val_ratio = 0, split_id = None, rand_seed = 0)
model, y_pred, eva, evas, inter_predictions = run_model(X_fit, y_fit, X_val, y_val, type_model = type_model, max_depth = max_depth, n_estimators = n_estimators,
learning_rate = learning_rate, early_stopping_rounds = early_stopping_rounds, col_sampling = col_sampling,
is_DART = is_DART, DART_params = DART_params)
return X_fit, X_val, y_fit, y_val, model, y_pred, eva, evas, inter_predictions
#%%
isdart = bool(input('is dart?') )
ndrop = float(input('ndrop'))
params = {'isdart':isdart, 'n_estimators':150, 'ndrop': ndrop,'min_1' : False, 'limit_type' : False }
if ndrop == 1: ndrop = int(ndrop)
X_fit, X_val, y_fit, y_val, model, y_pred, eva, evas, inter_predictions = run_model_x_y(X, y,
is_DART =isdart , n_estimators= n_estimators,
DART_params = {'n_drop':ndrop,'min_1':params['min_1']})
real_path = create_inner_path([params['isdart'], params['n_estimators'], params['ndrop'], params['min_1'],params['limit_type']],
['isdart', 'n_estimators', 'ndrop', 'min_1','limit_type'])
def make_file(array_to_save, path='',file_name ='', to_rewrite= False, type_file = '.npy') :
"""
This function creates a an npy or jpg or png file
array_to_save - numpy array to save
path - path to save
file_name - name of saved file
to_rewrite - If file already exist -> whether to rewrite it.
type_file = '.npy' or 'jpg' or 'png'
"""
if not type_file.startswith('.'): type_file = '.' + type_file
if not file_name.endswith(type_file): file_name = file_name +type_file
my_file = Path('%s\%s'%(path,file_name))
if not my_file.is_file() or to_rewrite:
if len(array_to_save) > 0:
if type_file == '.npy':
np.save(my_file, array_to_save)
elif type_file == '.png' or type_file == '.jpg':
print('Fig. saved')
array_to_save.savefig(my_file, dpi=300)
else:
plt.savefig(my_file)
#%%
plt.plot(
|
pd.DataFrame(evas,index =[0])
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import pandas as pd
from epimargin.estimators import box_filter, analytical_MPVS
from epimargin.plots import plot_RR_est, plot_T_anomalies
from epimargin.utils import cwd
from etl import download_data, get_time_series, load_all_data
# model details
CI = 0.99
smoothing = 5
root = cwd()
data = root/"data"
figs = root/"figs/comparison/kaggle"
states = ["Maharashtra"]#, "Bihar", "Delhi", "Andhra Pradesh", "Telangana", "Tamil Nadu", "Madhya Pradesh"]
kaggle =
|
pd.read_csv(data/"covid_19_india.csv", parse_dates=[1], dayfirst=True)
|
pandas.read_csv
|
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
#if enough_reads(window,w,complete=True):
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
coverage = cov_context = 0
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
MU=np.zeros((2,w))
start=datetime.datetime.now()
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(
|
pd.DataFrame(matforMH)
|
pandas.DataFrame
|
import psycopg2
import pandas as pd
import db.db_access as access
from datetime import timedelta, datetime
from sqlalchemy import create_engine
""" part for the normal keyword twitter pipeline"""
CONST_SQL_GET_MAIN_COMPANY = 'SELECT * FROM maincompany'
CONST_SQL_UPDATE_EMPTY_COMPANY_TABLE = "UPDATE company SET twitter_keyword = NULL WHERE twitter_keyword ='';"
CONST_SQL_LAST_DATE_TWEET = 'SELECT MAX(date) FROM tweet WHERE company_id = {COMPANY_ID};'
CONST_SQL_GET_TWEET_LIST = 'Select id from tweet'
CONST_SQL_GET_TWEET_LIST_FOR_SENTIMENT = "SELECT id, tweet, date, timezone FROM tweet WHERE date_utc is null and timezone <> 'UTC' limit 1000"
CONST_SQL_GET_TWEET_SPECIFIC = 'Select * from tweet WHERE id = {TWEET_ID}'
CONST_SQL_UPDATE_SENTIMENT_DATE = 'UPDATE tweet SET date_utc=%s, sentiment_score_textblob=%s, sentiment_score_vader=%s WHERE id = %s'
CONST_SQL_MIN_DATE_TWEET = 'SELECT MIN(date) FROM tweet WHERE company_id = {COMPANY_ID};'
CONST_SQL_UPDATE_UTC_TIMEZONE = """UPDATE tweet SET timezone = '+0000' WHERE timezone = 'UTC';"""
CONST_SQL_GET_TICKER_WITH_COMPANY_ID = 'SELECT c.id, c.main_company_id, m.ticker FROM company c LEFT JOIN maincompany m ON c.main_company_id = m.id'
CONST_SQL_GET_TWEET_SPECIFIC_DATE = """Select * from tweet WHERE company_id = {COMPANY_ID} AND date >= '{DATE}' AND date < '{DATE2}'"""
CONST_SQL_GET_ID_LAST_TWEET = """SELECT id FROM tweet WHERE company_id = {COMPANY_ID} AND date = '{DATE}' """
CONST_SQL_GET_LIST_DATE = """Select to_char(t.date, 'yyyy-mm-dd'::text) AS date from tweet t WHERE company_id = {COMPANY_ID} group by to_char(t.date, 'yyyy-mm-dd'::text) order by date"""
"""part for the cashtag twitter pipeline"""
CONST_SQL_GET_MAIN_COMPANY = 'SELECT * FROM maincompany'
CONST_SQL_UPDATE_EMPTY_COMPANY_TABLE_CASHTAG = "UPDATE company SET twitter_cashtag = NULL WHERE twitter_cashtag ='';"
CONST_SQL_LAST_DATE_TWEET_CASHTAG = 'SELECT MAX(date) FROM tweet_cashtag WHERE company_id = {COMPANY_ID};'
CONST_SQL_MIN_DATE_TWEET_CASHTAG = 'SELECT MIN(date) FROM tweet_cashtag WHERE company_id = {COMPANY_ID};'
CONST_SQL_GET_TWEET_LIST_CASHTAG = 'Select id from tweet'
CONST_SQL_GET_TWEET_LIST_FOR_SENTIMENT_CASHTAG = "SELECT id, tweet, date, timezone FROM tweet_cashtag WHERE date_utc is null and timezone <> 'UTC' limit 1000"
CONST_SQL_GET_TWEET_SPECIFIC_CASHTAG = 'Select * from tweet_cashtag WHERE id = {TWEET_ID}'
CONST_SQL_UPDATE_SENTIMENT_DATE_CASHTAG = 'UPDATE tweet_cashtag SET date_utc=%s, sentiment_score_textblob=%s, sentiment_score_vader=%s WHERE id = %s'
CONST_SQL_UPDATE_UTC_TIMEZONE_CASHTAG = """UPDATE tweet_cashtag SET timezone = '+0000' WHERE timezone = 'UTC';"""
CONST_SQL_GET_TWEET_SPECIFIC_DATE_CASHTAG = """Select * from tweet_cashtag WHERE company_id = {COMPANY_ID} AND date >= '{DATE}' AND date < '{DATE2}'"""
CONST_SQL_GET_ID_LAST_TWEET_CASHTAG = """SELECT id FROM tweet_cashtag WHERE company_id = {COMPANY_ID} AND date = '{DATE}'"""
CONST_SQL_GET_LIST_DATE_CASGTAG = """Select to_char(t.date, 'yyyy-mm-dd'::text) AS date from tweet_cashtag t WHERE company_id = {COMPANY_ID} group by to_char(t.date, 'yyyy-mm-dd'::text) order by date"""
#for daily twitter webservice
CONST_SQL_GET_QUARTER_ENDS = """SELECT * FROM "public"."parsing_twitter_cashtag" t WHERE t.search = {CASHTAG} AND TO_CHAR(t.date, 'yyyy-mm-dd')= + {DATE}"""
"""part for the old parser with more columns"""
CONST_SQL_INSERT_TWEET = """
INSERT INTO tweet(tweet_id, conversation_id, date, timezone, tweet, hashtags, cashtags, user_id_str, username, nlikes,
nreplies, nretweets, quote_url, reply_to, date_update, company_id, link) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
CONST_SQL_INSERT_TWEET_CASHTAG ="""
INSERT INTO tweet_cashtag(tweet_id, conversation_id, date, timezone, tweet, hashtags, cashtags, user_id_str, username, nlikes,
nreplies, nretweets, quote_url, reply_to, date_update, company_id, link) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
""" this part is for the tweet long parsing"""
CONST_SQL_LAST_DATE_TWEET_LONG = 'SELECT MAX(date) FROM parsing_twitter WHERE company_id = {COMPANY_ID};'
CONST_SQL_ALL_CASHTAG_TWEET = 'SELECT * FROM parsing_twitter_cashtag LIMIT 1000'
DELETE_SQL_ALLCASHTAG_TWEET = 'DELETE FROM parsing_twitter_cashtag WHERE tweet_id = {TWEET_ID};'
CONST_SQL_LAST_DATE_TWEET_CASHTAG_LONG = 'SELECT MAX(date) FROM parsing_twitter_cashtag WHERE company_id = {COMPANY_ID};'
CONST_SQL_INSERT_PARSING_TWITTER_LONG = """
INSERT INTO parsing_twitter(tweet_id, conversation_id, created_at, date, timezone, place, tweet, hashtags, cashtags, user_id, user_id_str, username, name, day, hour, link, retweet, nlikes
, nreplies, nretweets, quote_url, search, near, geo, source, user_rt_id, user_rt, retweet_id, reply_to, retweet_date, translate, trans_src, trans_dest, date_update, company_id) VALUES (%s
, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
CONST_SQL_INSERT_PARSING_TWITTER_CASHTAG_LONG = """
INSERT INTO parsing_twitter_cashtag(tweet_id, conversation_id, created_at, date, timezone, place, tweet, hashtags, cashtags, user_id, user_id_str, username, name, day, hour, link, retweet, nlikes
, nreplies, nretweets, quote_url, search, near, geo, source, user_rt_id, user_rt, retweet_id, reply_to, retweet_date, translate, trans_src, trans_dest, date_update, company_id) VALUES (%s
, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
CONST_SQL_REFRESH_KEYWORD_MAT_VIEW = """REFRESH MATERIALIZED VIEW day_keyword_materialised;"""
CONST_SQL_REFRESH_KEYWORD_MAT_VIEW_WITH_FILTER = """REFRESH MATERIALIZED VIEW day_keyword_like_filter_materialised;"""
CONST_SQL_REFRESH_CASHTAG_MAT_VIEW = """REFRESH MATERIALIZED VIEW day_cashtag_materialised;"""
CONST_SQL_REFRESH_CASHTAG_MAT_VIEW_WITH_FILTER = """REFRESH MATERIALIZED VIEW day_CASHTAG_like_filter_materialised;"""
CONST_SQL_REMOVE_DUPLICATES = """DELETE FROM tweet a USING tweet b WHERE a.id < b.id AND a.tweet_id = b.tweet_id AND a.company_id = b.company_id;"""
CONST_SQL_REMOVE_DUPLICATES_CASHTAG = """DELETE FROM tweet_cashtag a USING tweet b WHERE a.id < b.id AND a.tweet_id = b.tweet_id AND a.company_id = b.company_id;"""
CONST_SQL_REMOVE_OLD_TWEET = """DELETE FROM tweet WHERE date <='{DATE}' """
CONST_SQL_REMOVE_OLD_TWEET_CASHTAG = """DELETE FROM tweet_cashtag WHERE date <='{DATE}' """
def first_column(array_2d):
return list(zip(*array_2d))[0]
def get_ticker_from_company_id():
host, port, database, user, password = access.postgre_access_google_cloud()
cnx = psycopg2.connect(host=host, port=port, database=database, user=user, password=password)
cur = cnx.cursor()
cur.execute(CONST_SQL_GET_TICKER_WITH_COMPANY_ID)
result = cur.fetchall()
result =
|
pd.DataFrame.from_records(result, columns=[x[0] for x in cur.description])
|
pandas.DataFrame.from_records
|
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
class FunctionTransformer(TransformerMixin, BaseEstimator):
def __init__(self, func, **kwargs):
self.func = func
self.func_kwargs = kwargs
def fit(self, X, y=None):
return self
def transform(self, X):
return self.func(X, **self.func_kwargs)
def get_params(self, deep=True):
return {'func': self.func, **self.func_kwargs}
class FeatureSelector(TransformerMixin, BaseEstimator):
def __init__(self, feats_to_drop=None):
if feats_to_drop is None:
feats_to_drop = []
self.feats_to_drop = feats_to_drop
def fit(self, X, y=None):
self.feats_to_drop = [feat for feat in self.feats_to_drop if feat in X.columns.tolist()]
return self
def transform(self, y):
return y.drop(self.feats_to_drop, axis=1)
def get_params(self, deep=True):
return {'feats_to_drop': self.feats_to_drop}
class TypeSelector(TransformerMixin, BaseEstimator):
def __init__(self, dtype):
self.dtype = dtype
def fit(self, X, y=None):
return self
def transform(self, y):
return y.select_dtypes(include=self.dtype).copy()
def get_params(self, deep=True):
return {'dtype': self.dtype}
class Imputer(TransformerMixin, BaseEstimator):
CONST_METHOD = 'const'
MEAN_METHOD = 'mean'
MEDIAN_METHOD = 'median'
MODE_METHOD = 'mode'
def __init__(self, features, method='const', value=None):
self.features = features
self.method = method
self.impute_values = [value] * len(features)
self.simple_value = value
def fit(self, X, y=None):
self.features = [feat for feat in self.features if feat in X.columns.tolist()]
self.impute_values = (self._compute_fill_values(X)).iloc[0]
return self
def transform(self, y):
return y.fillna(self.impute_values)
def get_params(self, deep=True):
return {'data_preparation': self.features, 'method': self.method, 'value': self.simple_value}
def _compute_fill_values(self, X):
if self.method == Imputer.CONST_METHOD:
impute_values = pd.DataFrame([self.simple_value] * len(self.features), index=self.features).transpose()
elif self.method == Imputer.MEAN_METHOD:
impute_values = X[self.features].mean(axis=0)
elif self.method == Imputer.MEDIAN_METHOD:
impute_values = X[self.features].median(axis=1)
elif self.method == Imputer.MODE_METHOD:
impute_values = X[self.features].mode(axis=0)
else:
raise ValueError(self.method + ' not in available imputing methods')
return impute_values
class DummyEncoder(TransformerMixin, BaseEstimator):
def __init__(self, drop_first=False):
self.drop_first = drop_first
def fit_transform(self, X, y=None, **fit_params):
X = pd.get_dummies(X, drop_first=self.drop_first)
self.dummied_features = X.columns
return X
def fit(self, X, y=None):
self.dummied_features = pd.get_dummies(X, drop_first=self.drop_first).columns
return self
def transform(self, y, *_):
y =
|
pd.get_dummies(y, drop_first=self.drop_first)
|
pandas.get_dummies
|
"""figures of merit is a collection of financial calculations for energy.
This module contains financial calculations based on solar power and batteries
in a given network. The networks used are defined as network objects (see evolve parsers).
TODO: Add inverters: Inverters are not considered at the moment and Improve Nan Handeling
"""
import numpy
import pandas as pd
from c3x.data_cleaning import unit_conversion
#Todo:
#Add inverters: Inverters are not considered at the moment
#Improve Nan Handeling
def meter_power(meas_dict: dict, meter: int, axis: int = 0, column: int = 0) -> pd.Series:
"""
calculates the power for a meter of individual measurement points
by summing load, solar and battery power
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
meter(int): Id for a meter
axis (int): how data is concatenated for results
column (int): column index to be used
return:
meter_p (pd.Series): combined power (solar, battery, load)
"""
meter_p = pd.DataFrame()
if meas_dict[meter]:
meter_p = pd.DataFrame()
for meas in meas_dict[meter]:
if 'load' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
elif 'solar' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
elif 'batteries' in meas:
meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis)
meter_p = meter_p.sum(axis=1)
return meter_p
def financial(meter_p: pd.Series, import_tariff: pd.Series, export_tariff: pd.Series) -> pd.Series:
"""
Evaluate the financial outcome for a customer.
A conversion from kW to kWh is handled internally
Note: assumes constant step size in timestamps (use forth index beforehand)
Args:
meter_p (pd.Series ): Power of a node
import_tariff (pd.Series): Expects this to be in $/kWh.
export_tariff (pd.Series): Expects this to be in $/kWh.
Returns:
cost (pd.Series): cost per measurement point, using import and export tariffs
"""
# Note: need to ensure meter data is converted to kWh
timestep = numpy.timedelta64(meter_p.index[1] - meter_p.index[0])
meter = unit_conversion.convert_watt_to_watt_hour(meter_p, timedelta=timestep)
import_power_cost = meter.where(meter >= 0).fillna(value=0.0)
export_power_revenue = meter.where(meter < 0).fillna(value=0.0)
cost = import_power_cost * import_tariff + export_power_revenue*export_tariff
return cost
def customer_financial(meas_dict: dict, node_keys: list = None, tariff: dict = None) -> dict:
"""
Evaluate the financial outcome for a selected customer or for all customers.
Note: not currently setup to handle missing data (eg NANs)
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list nodes for which financials are calculated.
tariff (dict): nodes tariff data. Expects this to be in $/kWh.
Returns:
results_dict: cost per node and the average cost over all nodes
"""
results_dict = {}
average = []
nodes = node_keys if node_keys else meas_dict.keys()
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
if key in tariff:
meter_p = meter_power(meas_dict, key, axis=1)
meter_p_cost = financial(meter_p,
tariff[key]['import_tariff'],
tariff[key]['export_tariff'])
results_dict[key] = meter_p_cost
initiate = 0
for node in results_dict.values():
average = node if initiate == 0 else average.append(node)
initiate = 1
average = numpy.nanmean(average)
results_dict["average"] = average
return results_dict
def customer_cost_financial(tariff: dict, energy_grid_load: pd.Series, energy_solar_grid: pd.Series,
energy_battery_load: pd.Series, energy_solar_battery: pd.Series,
energy_solar_load: pd.Series) -> pd.Series:
"""
evaluates the customers cost
Args:
tariff: specifies tariffs to be applied to aggregation of customers.
energy_grid_load: specifies the energy flow between grid and load
energy_solar_grid: specifies the energy flow between solar and gird
energy_battery_load: specifies the energy flow between battery and load
energy_solar_battery: specifies the energy flow between solar and battery
energy_solar_load: specifies the energy flow between solar and load
Returns:
customer_cost (pd.Series):
"""
customer_cost = financial(energy_grid_load, tariff['re_import_tariff'], 0)
customer_cost += financial(energy_grid_load, tariff['rt_import_tariff'], 0)
customer_cost += financial(energy_battery_load, tariff['le_import_tariff'], 0)
customer_cost += financial(energy_battery_load, tariff['lt_import_tariff'], 0)
customer_cost -= financial(energy_solar_grid, tariff['re_export_tariff'], 0)
customer_cost += financial(energy_solar_grid, tariff['rt_export_tariff'], 0)
customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0)
customer_cost += financial(energy_solar_battery, tariff['lt_export_tariff'], 0)
customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0)
customer_cost += financial(energy_solar_load, tariff['lt_import_tariff'], 0)
customer_cost += financial(energy_solar_load, tariff['lt_export_tariff'], 0)
return customer_cost
def battery_cost_financial(tariff: dict, energy_grid_battery: pd.Series,
energy_battery_grid: pd.Series, energy_battery_load: pd.Series,
energy_solar_battery: pd.Series) -> pd.Series:
"""
evaluates the battery cost
Args:
tariff (dict): specifies tariffs to be applied to aggregation of customers.
energy_grid_battery (pd.Series): specifies the energy flow between grid and battery
energy_battery_grid (pd.Series): specifies the energy flow between battery and gird
energy_battery_load (pd.Series): specifies the energy flow between battery and load
energy_solar_battery (pd.Series): specifies the energy flow between solar and battery
Returns:
battery_cost (pd.Series):
"""
battery_cost = financial(energy_solar_battery, tariff['le_import_tariff'], 0)
battery_cost += financial(energy_solar_battery, tariff['lt_import_tariff'], 0)
battery_cost -= financial(energy_battery_load, tariff['le_export_tariff'], 0)
battery_cost += financial(energy_battery_load, tariff['lt_export_tariff'], 0)
battery_cost += financial(energy_grid_battery, tariff['re_import_tariff'], 0)
battery_cost += financial(energy_grid_battery, tariff['rt_import_tariff'], 0)
battery_cost -= financial(energy_battery_grid, tariff['re_export_tariff'], 0)
battery_cost += financial(energy_battery_grid, tariff['rt_export_tariff'], 0)
return battery_cost
def network_cost_financial(tariff: dict, energy_grid_load: pd.Series,
energy_grid_battery: pd.Series, energy_battery_grid: pd.Series,
energy_battery_load: pd.Series, energy_solar_battery: pd.Series,
energy_solar_load: pd.Series) -> pd.Series:
"""
evaluates the network cost
Args:
tariff (dict): specifies tariffs to be applied to aggregation of customers.
energy_grid_load (pd.Series): specifies the energy flow between grid and load
energy_grid_battery (pd.Series): specifies the energy flow between grid and battery
energy_battery_grid (pd.Series): specifies the energy flow between battery and grid
energy_battery_load (pd.Series): specifies the energy flow between battery and solar
energy_solar_battery (pd.Series) : specifies the energy flow between solar and battery
energy_solar_load (pd.Series): specifies the energy flow between solar and load
Returns:
network_cost(pd.Series)
"""
network_cost = -financial(energy_grid_load, tariff['rt_import_tariff'], 0)
network_cost -= financial(energy_battery_load, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_battery_load, tariff['lt_export_tariff'], 0)
network_cost -= financial(energy_solar_battery, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_solar_battery, tariff['lt_export_tariff'], 0)
network_cost -= financial(energy_grid_battery, tariff['rt_import_tariff'], 0)
network_cost -= financial(energy_battery_grid, tariff['rt_export_tariff'], 0)
network_cost -= financial(energy_solar_load, tariff['lt_import_tariff'], 0)
network_cost -= financial(energy_solar_load, tariff['lt_export_tariff'], 0)
return network_cost
def lem_financial(customer_tariffs, energy_grid_load, energy_grid_battery, energy_solar_grid,
energy_battery_grid, energy_battery_load, energy_solar_battery,
energy_solar_load, battery_tariffs=None):
"""
evaluate the cost for the local energy model
Args:
customer_tariffs: specifies tariffs to be applied to aggregation of customers.
energy_grid_load (pd.series): specifies the energy flow between grid and load
energy_grid_battery: specifies the energy flow between grid and battery
energy_solar_grid: specifies the energy flow between solar and grid
energy_battery_grid: specifies the energy flow between battery and grid
energy_battery_load: specifies the energy flow between battery and solar
energy_solar_battery: specifies the energy flow between solar and battery
energy_solar_load: specifies the energy flow between solar and load
battery_tariffs: specifies tariffs to be applied to aggregation of battery.
(if none given customer_tariffs ware used)
Returns:
customer_cost, battery_cost, network_cost
"""
customer_cost = customer_cost_financial(customer_tariffs, energy_grid_load, energy_solar_grid,
energy_battery_load, energy_solar_battery,
energy_solar_load)
bt_choice = battery_tariffs if battery_tariffs else customer_tariffs
battery_cost = battery_cost_financial(bt_choice, energy_grid_battery, energy_battery_grid,
energy_battery_load, energy_solar_battery)
network_cost = network_cost_financial(customer_tariffs, energy_grid_load, energy_grid_battery,
energy_battery_grid, energy_battery_load,
energy_solar_battery, energy_solar_load)
return customer_cost, battery_cost, network_cost
def peak_powers(meas_dict: dict, node_keys: list = None) -> dict:
"""
Calculate the peak power flows into and out of the network.
#TODO: consider selecting peak powers per phase
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
meas_dict (dict): dict with measurement for one or multiple nodes.
node_keys (list): list of Node.names in Network.nodes.
Returns:
results_dict (dict): dictionary of peak power into and out of network in kW,
and in kW/connection point.
"""
nodes = node_keys if node_keys else meas_dict.keys()
sum_meter_power = pd.DataFrame([])
for key in nodes:
if type(key) == int:
key = str(key)
if meas_dict[key]:
meter_p = meter_power(meas_dict, key, axis=1)
if sum_meter_power.empty:
sum_meter_power = meter_p.copy()
else:
sum_meter_power = pd.concat([sum_meter_power, meter_p], axis=1, sort=True)
sum_power = sum_meter_power.sum(axis=1)
aver_power = numpy.nanmean(sum_meter_power, axis=1)
return {"peak_power_import": numpy.max(sum_power),
"peak_power_export": numpy.min(sum_power),
"peak_power_import_av": numpy.max(aver_power),
"peak_power_export_av": numpy.min(aver_power),
"peak_power_import_index": sum_power.idxmax(),
"peak_power_export_index": sum_power.idxmax()}
def self_sufficiency(load_p: pd.DataFrame, solar_p: pd.DataFrame, battery_p: pd.DataFrame):
"""
Self-sufficiency = 1 - imports / consumption
Note: the function expects a full index
#TODO: consider inverters and how to avoid double counting with solar, batteries
Args:
load_p (pd.dataframe): measurement data for load of a s single node.
solar_p (pd.dataframe): measurement data for solar of a s single node.
battery_p(pd.dataframe): measurement data for battery of a s single node.
Returns:
results_dict: self_consumption_solar, self_consumption_batteries
"""
self_sufficiency_solar = numpy.nan
self_sufficiency_battery = numpy.nan
if not load_p.empty:
net_load_solar = pd.concat((load_p, solar_p), axis=1).sum(axis=1)
net_load_solar_battery =
|
pd.concat((load_p, solar_p, battery_p), axis=1)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
"""
This script joins the following datasets`claim_vehicle_employee_line.csv`,
`Preventable and Non Preventable_tabDelimited.txt` and `employee_experience_V2.csv`
to create a CSV file that contains the required information for the interactive plot.
It also cleans the resulting CSV file to get a successful result from the Google
Maps API. Assumes `get-data.py` is run before.
Usage: prepare_data.py --claims_file_path=<claims_file_path> --collisions_file_path=<collisions_file_path> --employee_file_path=<employee_file_path> --output_file_path=<output_file_path>
Options:
--claims_file_path=<claims_file_path> A file path for claims dataset that contains information about the claims.
--collisions_file_path=<collisions_file_path> A file path for collisions dataset that contains information about the collisions.
--employee_file_path=<employee_file_path> A file path for employees dataset that contains information about the employees.
--output_file_path=<output_file_path> A file path for resulting joined dataset.
Example:
python src/interactive_map/prepare_data.py \
--claims_file_path "data/TransLink Raw Data/claim_vehicle_employee_line.csv" \
--collisions_file_path "data/TransLink Raw Data/Preventable and Non Preventable_tabDelimited.txt"\
--employee_file_path "data/TransLink Raw Data/employee_experience_V2.csv"\
--output_file_path "results/processed_data/collision_with_claim_and_employee_info.csv"
"""
import pandas as pd
import numpy as np
from docopt import docopt
import glob
import os
import googlemaps
from pathlib import Path
import math
opt = docopt(__doc__)
def create_dirs_if_not_exists(file_path_list):
"""
It creates directories if they don't already exist.
Parameters:
file_path_list (list): A list of paths to be created if they don't exist.
"""
for path in file_path_list:
Path(os.path.dirname(path)).mkdir(parents=True, exist_ok=True)
def compare_loss_date(row):
"""
A helper function to be used in the main function to take
non null values for the loss_date.
"""
if (row['loss_date_x'] is not pd.NaT) & (row['loss_date_y'] is pd.NaT):
val = row.loss_date_x
else:
val = row.loss_date_y
return val
def get_experience_in_months(row):
"""
A helper function to be used in the main functon. It creates the
experiences of the operators in terms of months by using the incident date and
the operators' hiring date.
"""
difference = row['loss_date']- row['hire_date']
return round(difference.days / 30,0)
def main(claims_file_path, collisions_file_path, employee_file_path, output_file_path):
#read the collisions dataset
collision =
|
pd.read_csv(collisions_file_path, delimiter="\t")
|
pandas.read_csv
|
import os
from model import NN
import torch
import pandas as pd
class DataStore():
def __init__(self, part, Learning_r, Layer_s ):
"""
Class that trains, saves the predicted dimesnions and saves/loads the network states.
Inputs:
part - number of the part in k-fold splitting.
Learning_r - learning rate of the Neural Network
Layer_s - number of nodes in each of all of the layers
"""
super().__init__()
#Values that are later passed to MATLAB for faster computations
self.part = part
self.Learning_r = Learning_r
self.Layer_s = Layer_s
# Folders
self.results_dir = 'Results'
self.part_number = 'Part_Number_' + str(self.part)
self.folder_name = self.part_number + '_lr_' + str(self.Learning_r) + '_layer_size_'+str(self.Layer_s)
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
self.part_dir = os.path.join(
self.results_dir, self.part_number)
self.checkpoint_dir = os.path.join(
self.part_dir, self.folder_name)
os.makedirs(self.part_dir, exist_ok = True)
os.makedirs(self.checkpoint_dir, exist_ok = True)
def net_saver(self, model_to_save):
model_name = self.part_number + '_model_lr_' + str(self.Learning_r) + '_layer_size_' + str(self.Layer_s) + '.pt'
model_path = os.path.join(self.checkpoint_dir, model_name)
torch.save(model_to_save.state_dict(), model_path)
print(model_name, 'Was saved successfully \t\t\t[saved]')
def net_loader(self, path = None):
testm = NN(self.Layer_s).to(device)
if path is None:
model_name = self.part_number + '_model_lr_' + str(self.Learning_r) + '_layer_size_' + str(self.Layer_s) + '.pt'
path = os.path.join(self.checkpoint_dir, model_name)
testm.load_state_dict(torch.load(path))
print(model_name,' Was loaded successfully loaded.\t\t\t [loaded]')
else:
testm.load_state_dict(torch.load(path))
print(model_name,' Was loaded successfully loaded from the path.\t\t\t [loaded from Path]')
return testm
def records_saver(self, records):
self.records = records
self.name_records = 'Part_Number_' + str(self.part) + '_records.csv'
self.records.to_csv(os.path.join(self.checkpoint_dir, name_records),index = True,header = True)
def dimensions_saver(self, d_values, threshold):
"""
Method that saves the predicted dimesnion and thresholds of constant scattering values
Inputs:
split - number of the split in k-fold splitting.
Learning_r - learning rate of the Neural Network
Layer_s - number of nodes in each of all of the layers
"""
#used in MATLAB
self.vals_matlab =
|
pd.DataFrame(d_values)
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/01/02 17:37
Desc: 获取交易法门-工具: https://www.jiaoyifamen.com/tools/
交易法门首页: https://www.jiaoyifamen.com/
# 交易法门-工具-套利分析
交易法门-工具-套利分析-跨期价差(自由价差)
交易法门-工具-套利分析-自由价比
交易法门-工具-套利分析-多腿组合
交易法门-工具-套利分析-FullCarry
交易法门-工具-套利分析-套利价差矩阵
# 交易法门-工具-资讯汇总
交易法门-工具-资讯汇总-研报查询
交易法门-工具-资讯汇总-交易日历
# 交易法门-工具-持仓分析
交易法门-工具-持仓分析-期货持仓
交易法门-工具-持仓分析-席位持仓
交易法门-工具-持仓分析-持仓季节性
# 交易法门-工具-资金分析
交易法门-工具-资金分析-资金流向
交易法门-工具-资金分析-沉淀资金
交易法门-工具-资金分析-资金季节性
交易法门-工具-资金分析-成交排名
# 交易法门-工具-席位分析
交易法门-工具-席位分析-持仓结构
交易法门-工具-席位分析-持仓成本
交易法门-工具-席位分析-建仓过程
# 交易法门-工具-仓单分析
交易法门-工具-仓单分析-仓单日报
交易法门-工具-仓单分析-仓单查询
交易法门-工具-仓单分析-虚实盘比日报
交易法门-工具-仓单分析-虚实盘比查询
# 交易法门-工具-期限分析
交易法门-工具-期限分析-基差日报
交易法门-工具-期限分析-基差分析
交易法门-工具-期限分析-期限结构
交易法门-工具-期限分析-价格季节性
# 交易法门-工具-行情分析
交易法门-工具-行情分析-行情数据
# 交易法门-工具-交易规则
交易法门-工具-交易规则-限仓规定
交易法门-工具-交易规则-仓单有效期
交易法门-工具-交易规则-品种手册
"""
import time
import matplotlib.pyplot as plt
import pandas as pd
import requests
from mssdk.futures_derivative.cons import (
csa_payload,
csa_url_spread,
csa_url_ratio,
csa_url_customize,
)
from mssdk.futures_derivative.jyfm_login_func import jyfm_login
# pd.set_option('display.max_columns', None)
# 交易法门-工具-套利分析
def jyfm_tools_futures_spread(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-跨期价差(自由价差)
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_spread, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_ratio(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-自由价比
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
2013-01-04 -121
2013-01-07 -124
2013-01-08 -150
2013-01-09 -143
2013-01-10 -195
...
2019-10-21 116
2019-10-22 126
2019-10-23 123
2019-10-24 126
2019-10-25 134
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_ratio, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_customize(
formula="RB01-1.6*I01-0.5*J01-1200", headers="", plot=True
):
"""
交易法门-工具-套利分析-多腿组合
:param formula: str
:param plot: Bool
:return: pandas.Series or pic
"""
params = {"formula": formula}
res = requests.get(csa_url_customize, params=params, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_full_carry(
begin_code="05", end_code="09", ratio="4", headers=""
):
"""
交易法门-工具-套利分析-FullCarry
https://www.jiaoyifamen.com/tools/future/full/carry?beginCode=05&endCode=09&ratio=4
注: 正向转抛成本主要是仓储费和资金成本,手续费占比很小,故忽略。增值税不确定,故也未列入计算。使用该表时注意仓单有效期问题、升贴水问题以及生鲜品种其他较高费用的问题。实际Full Carry水平要略高于这里的测算水平。
:param begin_code: 开始月份
:type begin_code: str
:param end_code: 结束月份
:type end_code: str
:param ratio: 百分比, 这里输入绝对值
:type ratio: str
:param headers: 请求头
:type headers: dict
:return: 正向市场转抛成本估算
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/full/carry"
params = {
"beginCode": begin_code,
"endCode": end_code,
"ratio": ratio,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["table_data"])
def jyfm_tools_futures_arbitrage_matrix(
category="1", type1="RB", type2="RB", headers=""
):
"""
交易法门-工具-套利分析-跨期价差矩阵
https://www.jiaoyifamen.com/tools/future/arbitrage/matrix
:param category: 1: 跨期价差; 2: 自由价差; 3: 自由价比
:type category: str
:param type1: 种类一
:type type1: str
:param type2: 种类二
:type type2: str
:param headers: 请求头
:type headers: dict
:return: 对应的矩阵
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/arbitrage/matrix"
params = {
"category": category,
"type1": type1,
"type2": type2,
"_": "1583846468579",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_exchange_symbol_dict():
jyfm_exchange_symbol_dict_inner = {
"中国金融期货交易所": {
"TF": "五债",
"T": "十债",
"IC": "中证500",
"IF": "沪深300",
"IH": "上证50",
"TS": "二债",
},
"郑州商品交易所": {
"FG": "玻璃",
"RS": "菜籽",
"CF": "棉花",
"LR": "晚稻",
"CJ": "红枣",
"JR": "粳稻",
"ZC": "动力煤",
"TA": "PTA",
"SA": "纯碱",
"AP": "苹果",
"WH": "强麦",
"SF": "硅铁",
"MA": "甲醇",
"CY": "棉纱",
"RI": "早稻",
"OI": "菜油",
"SM": "硅锰",
"RM": "菜粕",
"UR": "尿素",
"PM": "普麦",
"SR": "白糖",
},
"大连商品交易所": {
"PP": "PP",
"RR": "粳米",
"BB": "纤板",
"A": "豆一",
"EG": "乙二醇",
"B": "豆二",
"C": "玉米",
"JM": "焦煤",
"I": "铁矿",
"J": "焦炭",
"L": "塑料",
"M": "豆粕",
"P": "棕榈",
"CS": "淀粉",
"V": "PVC",
"Y": "豆油",
"JD": "鸡蛋",
"FB": "胶板",
"EB": "苯乙烯",
},
"上海期货交易所": {
"SS": "不锈钢",
"RU": "橡胶",
"AG": "沪银",
"AL": "沪铝",
"FU": "燃油",
"RB": "螺纹",
"CU": "沪铜",
"PB": "沪铅",
"BU": "沥青",
"AU": "沪金",
"ZN": "沪锌",
"SN": "沪锡",
"HC": "热卷",
"NI": "沪镍",
"WR": "线材",
"SP": "纸浆",
},
"上海国际能源交易中心": {"SC": "原油", "NR": "20号胶"},
}
return jyfm_exchange_symbol_dict_inner
# 交易法门-工具-资讯汇总
def jyfm_tools_research_query(limit="100", headers=""):
"""
交易法门-工具-资讯汇总-研报查询
https://www.jiaoyifamen.com/tools/research/qryPageList
:param limit: 返回条数
:type limit: str
:return: 返回研报信息数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/research/qryPageList"
params = {
"page": "1",
"limit": limit,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_trade_calendar(trade_date="2020-01-03", headers=""):
"""
交易法门-工具-资讯汇总-交易日历
此函数可以返回未来的交易日历数据
https://www.jiaoyifamen.com/tools/trade-calendar/events
:param trade_date: 指定交易日
:type trade_date: str
:return: 返回指定交易日的交易日历数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/trade-calendar/events"
params = {
"page": "1",
"limit": "1000",
"day": trade_date,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
# 交易法门-工具-持仓分析
def jyfm_tools_position_detail(
symbol="JM", code="jm2005", trade_date="2020-01-03", headers=""
):
"""
交易法门-工具-持仓分析-期货持仓
:param symbol: 指定品种
:type symbol: str
:param code: 指定合约
:type code: str
:param trade_date: 指定交易日
:type trade_date: str
:param headers: headers with cookies
:type headers:dict
:return: 指定品种的指定合约的指定交易日的期货持仓数据
:rtype: pandas.DataFrame
"""
url = f"https://www.jiaoyifamen.com/tools/position/details/{symbol}?code={code}&day={trade_date}&_=1578040551329"
res = requests.get(url, headers=headers)
return pd.DataFrame(res.json()["short_rank_table"])
def jyfm_tools_position_seat(seat="永安期货", trade_date="2020-01-03", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-席位持仓
:param seat: 指定期货公司
:type seat: str
:param trade_date: 具体交易日
:type trade_date: str
:param headers: headers with cookies
:type headers: dict
:return: 指定期货公司指定交易日的席位持仓数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/seat"
params = {
"seat": seat,
"day": trade_date,
"type": "",
"_": "1578040989932",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_position_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-持仓季节性
https://www.jiaoyifamen.com/tools/position/season
:param symbol: 具体品种
:type symbol: str
:param code: 具体合约月份
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 合约持仓季节性规律
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/season"
params = {
"type": symbol,
"code": code,
}
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
temp_df = pd.DataFrame(
[
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
columns=data_json["dataCategory"],
).T
temp_df.columns = ["2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"]
return temp_df
# 交易法门-工具-资金分析
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主力合约资金流向排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金流向数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种资金流向排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["flowCategory"]),
data_json["flowCategory"],
data_json["flowValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantFlowCategory"]),
data_json["dominantFlowCategory"],
data_json["dominantFlowValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_down(
trade_date="2020-02-24", indicator="期货品种沉淀资金排名", headers=""
):
"""
交易法门-工具-资金分析-沉淀资金
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种沉淀资金排名" or "期货主力合约沉淀资金排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的沉淀资金
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种沉淀资金排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["precipitationCategory"]),
data_json["precipitationCategory"],
data_json["precipitationValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]]
* len(data_json["dominantPrecipitationCategory"]),
data_json["dominantPrecipitationCategory"],
data_json["dominantPrecipitationValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-资金分析-资金季节性
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param symbol: 指定品种
:type symbol: str
:param code: 合约到期月
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金资金季节性
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
"code": code,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/season"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
data_df = pd.DataFrame(
[
data_json["dataCategory"],
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
index=["date", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"],
).T
return data_df
def jyfm_tools_position_fund_deal(
trade_date="2020-02-24", indicator="期货品种成交量排名", headers=""
):
"""
交易法门-工具-资金分析-成交排名
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种成交量排名" or "期货主力合约成交量排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金成交排名
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种成交量排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["turnOverCategory"]),
data_json["turnOverCategory"],
data_json["turnOverValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantTurnOverCategory"]),
data_json["dominantTurnOverCategory"],
data_json["dominantTurnOverValue"],
],
index=["date", "symbol", "fund"],
).T
# 交易法门-工具-席位分析-持仓结构
def jyfm_tools_position_structure(
trade_date="2020-03-02", seat="永安期货", indicator="持仓变化", headers=""
):
"""
交易法门-工具-席位分析-持仓结构
https://www.jiaoyifamen.com/tools/position/seat
:param trade_date: 指定交易日
:type trade_date: str
:param seat: broker name, e.g., seat="永安期货"
:type seat: str
:param indicator: 持仓变化,净持仓分布,总持仓分布; 持仓变化总,净持仓分布总,总持仓分布总
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日指定机构的持仓结构
:rtype: pandas.DataFrame
"""
indicator_dict = {"持仓变化": 1, "净持仓分布": 2, "总持仓分布": 3}
params = {
"seat": seat,
"day": trade_date,
"type": indicator_dict[indicator],
"_": int(time.time() * 1000),
}
url = "https://www.jiaoyifamen.com/tools/position/struct"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "持仓变化":
return pd.DataFrame(data_json["varieties"])
if indicator == "净持仓分布":
return pd.DataFrame(data_json["varieties"])
if indicator == "总持仓分布":
return
|
pd.DataFrame(data_json["varieties"])
|
pandas.DataFrame
|
###############
#
# Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
fish_num_climate_4 = pandas.read_csv('4-3-1-fish-num-4.csv')
print(fish_num_climate_4.head())
print(fish_num_climate_4.describe())
sns.scatterplot(
x='temperature',
y='fish_num',
hue='human',
data=fish_num_climate_4
)
plt.show()
fish_num_climate_4_d = pandas.get_dummies(fish_num_climate_4, columns=["human"])
print(fish_num_climate_4_d.head())
fish_num = fish_num_climate_4_d['fish_num']
sample_num = len(fish_num)
temperature = fish_num_climate_4_d['temperature']
# creating teamID
le = LabelEncoder()
le = le.fit(fish_num_climate_4['human'])
fish_num_climate_4['human'] = le.transform(fish_num_climate_4['human'])
sns.scatterplot(
x='temperature',
y='fish_num',
hue='human',
legend="full",
data=fish_num_climate_4
)
plt.show()
human_id = fish_num_climate_4['human'].values
human_num = len(np.unique(human_id))
human_id = (human_id + 1)
print(human_id)
stan_data = {
'N': sample_num,
'fish_num': fish_num,
'temp': temperature,
'human_id': human_id
}
if os.path.exists('4-3-1-poisson-glmm.pkl'):
sm = pickle.load(open('4-3-1-poisson-glmm.pkl', 'rb'))
# sm = pystan.StanModel(file='4-3-1-poisson-glmm.stan')
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='4-3-1-poisson-glmm.stan')
mcmc_result = sm.sampling(
data=stan_data,
seed=1,
chains=4,
iter=2000,
warmup=1000,
thin=1
)
print(mcmc_result)
mcmc_result.plot()
plt.show()
# saving compiled model
if not os.path.exists('4-3-1-poisson-glmm.pkl'):
with open('4-3-1-poisson-glmm.pkl', 'wb') as f:
pickle.dump(sm, f)
mcmc_sample = mcmc_result.extract()
print(mcmc_sample)
# visualization
label_temp = np.arange(10,21)
df =
|
pandas.DataFrame(mcmc_sample, columns=['Intercept', 'b_temp', 'b_human', 'b_temp_human'])
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Import OptionMetrics data.
"""
from __future__ import print_function, division
import os
import zipfile
import numpy as np
import pandas as pd
import datetime as dt
from scipy.interpolate import interp1d
from impvol import lfmoneyness, delta, vega
from datastorage.quandlweb import load_spx
path = os.getenv("HOME") + '/Dropbox/Research/data/OptionMetrics/data/'
# __location__ = os.path.realpath(os.path.join(os.getcwd(),
# os.path.dirname(__file__)))
# path = os.path.join(__location__, path + 'OptionMetrics/data/')
def convert_dates(string):
return dt.datetime.strptime(string, '%d-%m-%Y')
def import_dividends():
"""Import dividends.
"""
zf = zipfile.ZipFile(path + 'SPX_dividend.zip', 'r')
name = zf.namelist()[0]
dividends = pd.read_csv(zf.open(name), converters={'date': convert_dates})
dividends.set_index('date', inplace=True)
dividends.sort_index(inplace=True)
print(dividends.head())
dividends.to_hdf(path + 'dividends.h5', 'dividends')
def import_yield_curve():
"""Import zero yield curve.
"""
zf = zipfile.ZipFile(path + 'yield_curve.zip', 'r')
name = zf.namelist()[0]
yields = pd.read_csv(zf.open(name), converters={'date': convert_dates})
# Remove weird observations
yields = yields[yields['rate'] < 10]
# Fill in the blanks in the yield curve
# yields = interpolate_curve(yields)
yields.rename(columns={'rate': 'riskfree'}, inplace=True)
yields.set_index(['date', 'days'], inplace=True)
yields.sort_index(inplace=True)
print(yields.head())
yields.to_hdf(path + 'yields.h5', 'yields')
def interpolate_curve_group(group):
"""Interpolate yields for one day.
"""
y = np.array(group.riskfree)
x = np.array(group.days)
a = group.days.min()
b = group.days.max()
new_x = np.linspace(a, b, b-a+1).astype(int)
try:
new_y = interp1d(x, y, kind='cubic')(new_x)
except:
new_y = interp1d(x, y, kind='linear')(new_x)
group = pd.DataFrame(new_y, index=pd.Index(new_x, name='days'))
return group
def interpolate_curve(yields):
"""Fill in the blanks in the yield curve.
"""
yields.reset_index(inplace=True)
yields = yields.groupby('date').apply(interpolate_curve_group)
yields = yields.unstack('days')
yields.fillna(method='ffill', axis=1, inplace=True)
yields.fillna(method='bfill', axis=1, inplace=True)
yields = yields.stack('days')
yields.rename(columns={0: 'riskfree'}, inplace=True)
return yields
def import_riskfree():
"""Take the last value of the yield curve as a risk-free rate.
Saves annualized rate in percentage points.
"""
yields = load_yields()
riskfree = yields.groupby(level='date').last()
print(riskfree.head())
riskfree.to_hdf(path + 'riskfree.h5', 'riskfree')
def import_standard_options():
"""Import standardized options.
"""
zf = zipfile.ZipFile(path + 'SPX_standard_options.zip', 'r')
name = zf.namelist()[0]
data = pd.read_csv(zf.open(name), converters={'date': convert_dates})
cols = {'forward_price': 'forward', 'impl_volatility': 'imp_vol'}
data.rename(columns=cols, inplace=True)
data = data.set_index(['cp_flag', 'date', 'days']).sort_index()
print(data.head())
data.to_hdf(path + 'std_options.h5', 'std_options')
def import_vol_surface():
"""Import volatility surface.
Infer risk-free rate directly from data.
"""
zf = zipfile.ZipFile(path + 'SPX_surface.zip', 'r')
name = zf.namelist()[0]
df = pd.read_csv(zf.open(name), converters={'date': convert_dates})
df.loc[:, 'weekday'] = df['date'].apply(lambda x: x.weekday())
# Apply some filters
df = df[df['weekday'] == 2]
df = df[df['days'] <= 365]
df = df.drop('weekday', axis=1)
surface = df#.set_index(['cp_flag', 'date', 'days']).sort_index()
cols = {'impl_volatility': 'imp_vol', 'impl_strike': 'strike',
'impl_premium': 'premium'}
surface.rename(columns=cols, inplace=True)
# TODO : who term structure should be imported and merged!
riskfree = load_riskfree().reset_index()
dividends = load_dividends().reset_index()
spx = load_spx().reset_index()
surface = pd.merge(surface, riskfree)
surface = pd.merge(surface, spx)
surface = pd.merge(surface, dividends)
# Adjust riskfree by dividend yield
surface['riskfree'] -= surface['rate']
# Remove percentage point
surface['riskfree'] /= 100
# Replace 'cp_flag' with True/False 'call' variable
surface.loc[:, 'call'] = True
surface.loc[surface['cp_flag'] == 'P', 'call'] = False
# Normalize maturity to being a share of the year
surface['maturity'] = surface['days'] / 365
# Rename columns
surface.rename(columns={'spx': 'price'}, inplace=True)
# Compute lf-moneyness
surface['moneyness'] = lfmoneyness(surface['price'], surface['strike'],
surface['riskfree'],
surface['maturity'])
# Compute option Delta normalized by current price
surface['delta'] = delta(surface['moneyness'], surface['maturity'],
surface['imp_vol'], surface['call'])
# Compute option Vega normalized by current price
surface['vega'] = vega(surface['moneyness'], surface['maturity'],
surface['imp_vol'])
# Sort index
surface.sort_index(by=['date', 'maturity', 'moneyness'], inplace=True)
print(surface.head())
surface.to_hdf(path + 'surface.h5', 'surface')
def import_vol_surface_simple():
"""Import volatility surface. Simple version.
"""
zf = zipfile.ZipFile(path + 'SPX_surface.zip', 'r')
name = zf.namelist()[0]
df = pd.read_csv(zf.open(name), converters={'date': convert_dates})
df.loc[:, 'weekday'] = df['date'].apply(lambda x: x.weekday())
# Apply some filters
df = df[df['weekday'] == 2]
df = df[df['days'] <= 365]
surface = df.drop('weekday', axis=1)
cols = {'impl_volatility': 'imp_vol', 'impl_strike': 'strike',
'impl_premium': 'premium'}
surface.rename(columns=cols, inplace=True)
spx = load_spx().reset_index()
standard_options = load_standard_options()[['forward']].reset_index()
surface = pd.merge(surface, standard_options)
surface = pd.merge(surface, spx)
# Normalize maturity to being a share of the year
surface['maturity'] = surface['days'] / 365
surface['riskfree'] = np.log(surface['forward'] / surface['spx'])
surface['riskfree'] /= surface['maturity']
# Remove percentage point
# Replace 'cp_flag' with True/False 'call' variable
surface.loc[:, 'call'] = True
surface.loc[surface['cp_flag'] == 'P', 'call'] = False
# Rename columns
surface.rename(columns={'spx': 'price'}, inplace=True)
# Compute lf-moneyness
surface['moneyness'] = lfmoneyness(surface['price'], surface['strike'],
surface['riskfree'],
surface['maturity'])
# Compute option Delta normalized by current price
surface['delta'] = delta(surface['moneyness'], surface['maturity'],
surface['imp_vol'], surface['call'])
# Compute option Vega normalized by current price
surface['vega'] = vega(surface['moneyness'], surface['maturity'],
surface['imp_vol'])
# Take out-of-the-money options
calls = surface['call'] & (surface['moneyness'] >= 0)
puts = np.logical_not(surface['call']) & (surface['moneyness'] < 0)
surface = pd.concat([surface[calls], surface[puts]])
# Sort index
surface.sort_index(by=['date', 'maturity', 'moneyness'], inplace=True)
print(surface.head())
surface.to_hdf(path + 'surface.h5', 'surface')
def load_dividends():
"""Load dividends from the disk (annualized, percentage points).
Typical output:
rate
date
1996-01-04 2.460
1996-01-05 2.492
1996-01-08 2.612
1996-01-09 2.455
1996-01-10 2.511
"""
return pd.read_hdf(path + 'dividends.h5', 'dividends')
def load_yields():
"""Load zero yield curve from the disk (annualized, percentage points).
Typical output:
riskfree
date days
1996-01-02 9 5.763
15 5.746
50 5.673
78 5.609
169 5.474
"""
return pd.read_hdf(path + 'yields.h5', 'yields')
def load_riskfree():
"""Load risk-free rate (annualized, percentage points).
Returns
-------
DataFrame
Annualized rate in percentage points
Typical output:
riskfree
date
1996-01-02 6.138
1996-01-03 6.125
1996-01-04 6.142
1996-01-05 6.219
1996-01-08 6.220
"""
return pd.read_hdf(path + 'riskfree.h5', 'riskfree')
def load_standard_options():
"""Load standardized options from the disk.
Typical output:
forward premium imp_vol
cp_flag date days
C 1996-01-04 30 619.345 7.285 0.103
60 620.935 10.403 0.105
91 622.523 12.879 0.105
122 624.080 16.156 0.114
152 625.545 18.259 0.116
"""
return pd.read_hdf(path + 'std_options.h5', 'std_options')
def load_vol_surface():
"""Load volatility surface from the disk.
Typical output:
date days imp_vol strike premium cp_flag forward price
12 1996-01-10 30 0.175 576.030 3.431 P 600.043 598.480
11 1996-01-10 30 0.165 581.866 4.336 P 600.043 598.480
10 1996-01-10 30 0.157 586.685 5.257 P 600.043 598.480
9 1996-01-10 30 0.150 590.755 6.230 P 600.043 598.480
8 1996-01-10 30 0.146 594.303 7.299 P 600.043 598.480
maturity riskfree call moneyness delta vega
12 0.082 0.032 False -0.041 -0.200 0.080
11 0.082 0.032 False -0.031 -0.251 0.091
10 0.082 0.032 False -0.023 -0.301 0.100
9 0.082 0.032 False -0.016 -0.351 0.106
8 0.082 0.032 False -0.010 -0.401 0.111
"""
return
|
pd.read_hdf(path + 'surface.h5', 'surface')
|
pandas.read_hdf
|
import streamlit as st
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from plotly import express
def accept_user_data():
duration = st.text_input("Enter the duration: ")
start_station = st.text_input("Enter the start area: ")
end_station = st.text_input("Enter the end station: ")
return np.array([duration, start_station, end_station]).reshape(1,-1)
# =================== ML Models Below =================
@st.cache(suppress_st_warning=True)
def decisionTree(X_train, X_test, Y_train, Y_test):
# training the model
tree = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
tree.fit(X_train, Y_train)
Y_pred = tree.predict(X_test)
score = accuracy_score(Y_test, Y_pred) * 100
report = classification_report(Y_test, Y_pred)
return score, report, tree
@st.cache(suppress_st_warning=True)
def neuralNet(X_train, X_test, Y_train, Y_test):
# scaling data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# start classifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5,2), random_state=1)
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
score = accuracy_score(Y_test, Y_pred) * 100
report = classification_report(Y_test, Y_pred)
return score, report, clf
@st.cache
def Knn_Classifier(X_train, X_test, Y_train, Y_test):
clf = KNeighborsClassifier(n_neighbors=5)
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
score = accuracy_score(Y_test, Y_pred) * 100
report = classification_report(Y_test, Y_pred)
return score, report, clf
# =================== ML Models End =================
@st.cache
def showMap():
plotData = pd.read_csv('dataset-2010-latlong.csv')
data = pd.DataFrame()
data['lat'] = plotData['lat']
data['lon'] = plotData['lon']
return data
# enable st caching
@st.cache
def loadData():
return
|
pd.read_csv('dataset-2010.csv')
|
pandas.read_csv
|
from copy import Error
import os
from typing import Type
from ase.parallel import paropen, parprint, world
from ase.db import connect
from ase.io import read
from glob import glob
import numpy as np
from gpaw import restart
import BASIC.optimizer as opt
import sys
from ase.constraints import FixAtoms,FixedLine
import pandas as pd
from BASIC.utils import detect_cluster
def pbc_checker(slab):
anlges_arg=[angle != 90.0000 for angle in np.round(slab.cell.angles(),decimals=4)[:2]]
if np.any(anlges_arg):
slab.pbc=[1,1,1]
else:
slab.pbc=[1,1,0]
# def detect_cluster(slab,tol=0.1):
# n=len(slab)
# dist_matrix=np.zeros((n, n))
# slab_c=np.sort(slab.get_positions()[:,2])
# for i, j in itertools.combinations(list(range(n)), 2):
# if i != j:
# cdist = np.abs(slab_c[i] - slab_c[j])
# dist_matrix[i, j] = cdist
# dist_matrix[j, i] = cdist
# condensed_m = squareform(dist_matrix)
# z = linkage(condensed_m)
# clusters = fcluster(z, tol, criterion="distance")
# return slab_c,list(clusters)
def apply_magmom(opt_slab_magmom,ads_slab,adatom=1):
if adatom == 1:
magmom_ls=np.append(opt_slab_magmom,0)
elif adatom == 2:
magmom_ls=np.append(opt_slab_magmom,0)
magmom_ls=np.append(magmom_ls,0)
ads_slab.set_initial_magnetic_moments(magmom_ls)
return ads_slab
def get_clean_slab(element,
miller_index,
report_location,
target_dir,
size,
fix_layer,
solver_fmax,
solver_maxstep,
gpaw_calc):
f = paropen(report_location,'a')
parprint('Start clean slab calculation: ', file=f)
if size != '1x1':
clean_slab_gpw_path=target_dir+'/clean_slab/slab.gpw'
if os.path.isfile(clean_slab_gpw_path):
opt_slab, pre_calc = restart(clean_slab_gpw_path)
pre_kpts=list(pre_calc.__dict__['parameters']['kpts'])
set_kpts=list(gpaw_calc.__dict__['parameters']['kpts'])
if pre_kpts == set_kpts:
parprint('\t'+size+' clean slab is pre-calculated with kpts matched.',file=f)
else:
parprint('\t'+size+' clean slab pre-calculated has different kpts. Clean slab needs to re-calculate.', file=f)
parprint('\t'+'Calculating '+size+' clean slab...',file=f)
clean_slab=read(target_dir+'/clean_slab/input.traj')
opt_slab=clean_slab_calculator(clean_slab,fix_layer,gpaw_calc,target_dir,solver_fmax,solver_maxstep)
else:
parprint('\t'+size+' clean slab is not pre-calculated.',file=f)
parprint('\t'+'Calculating '+size+' clean slab...',file=f)
interm_gpw=target_dir+'/clean_slab/slab_interm.gpw'
if os.path.isfile(interm_gpw):
clean_slab, gpaw_calc=restart(interm_gpw)
else:
clean_slab=read(target_dir+'/clean_slab/input.traj')
opt_slab=clean_slab_calculator(clean_slab,fix_layer,gpaw_calc,target_dir,solver_fmax,solver_maxstep)
else:
parprint('\tslab size is 1x1. Clean slab calculation is skipped.', file=f)
opt_slab=connect('final_database'+'/'+'surf.db').get_atoms(simple_name=element+'_'+miller_index)
parprint(' ',file=f)
f.close()
return opt_slab.get_potential_energy(), opt_slab.get_magnetic_moments()
def clean_slab_calculator(clean_slab,
fix_layer,
gpaw_calc,
target_dir,
solver_fmax,
solver_maxstep,
fix_option='bottom'):
pbc_checker(clean_slab)
calc_dict=gpaw_calc.__dict__['parameters']
if calc_dict['spinpol']:
clean_slab.set_initial_magnetic_moments([0]*len(clean_slab))
slab_c_coord,cluster=detect_cluster(clean_slab)
if fix_option == 'bottom':
unique_cluster_index=sorted(set(cluster), key=cluster.index)[fix_layer-1]
max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
fix_mask=clean_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
fixed_atom_constrain=FixAtoms(mask=fix_mask)
clean_slab.set_constraint(fixed_atom_constrain)
clean_slab.set_calculator(gpaw_calc)
opt.relax(clean_slab,target_dir+'/clean_slab',fmax=solver_fmax,maxstep=solver_maxstep)
return clean_slab
def adsorption_energy_calculator(traj_file,
report_location,
opt_slab_energy,
adatom_pot_energy,
opt_slab_magmom,
gpaw_calc,
solver_fmax,
solver_maxstep,
calc_type,
fix_layer,
fix_option = 'bottom'):
interm_gpw='/'.join(traj_file.split('/')[:-1]+['slab_interm.gpw'])
if os.path.isfile(interm_gpw):
ads_slab, gpaw_calc=restart(interm_gpw)
else:
ads_slab=read(traj_file)
pbc_checker(ads_slab)
calc_dict=gpaw_calc.__dict__['parameters']
if calc_dict['spinpol']:
ads_slab=apply_magmom(opt_slab_magmom,ads_slab)
fixed_line_constrain=FixedLine(a=-1,direction=[0,0,1])
slab_c_coord,cluster=detect_cluster(ads_slab)
if fix_option == 'bottom':
unique_cluster_index=sorted(set(cluster), key=cluster.index)[fix_layer-1]
max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
fix_mask=ads_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
if calc_type == 'grid':
fixed_atom_constrain=FixAtoms(mask=fix_mask)
ads_slab.set_constraint([fixed_atom_constrain,fixed_line_constrain])
elif calc_type == 'normal' and fix_option == 'bottom':
fixed_atom_constrain=FixAtoms(mask=fix_mask)
ads_slab.set_constraint(fixed_atom_constrain)
ads_slab.set_calculator(gpaw_calc)
location='/'.join(traj_file.split('/')[:-1])
f=paropen(report_location,'a')
parprint('Calculating '+('/'.join(location.split('/')[-2:]))+' adsorption site...',file=f)
f.close()
opt.relax(ads_slab,location,fmax=solver_fmax,maxstep=solver_maxstep)
init_ads_site=traj_file.split('/')[-2]
E_slab_ads=ads_slab.get_potential_energy()
opt_slab_energy=opt_slab_energy
adsorption_energy=E_slab_ads-(opt_slab_energy+adatom_pot_energy)
final_ads_site=list(np.round(ads_slab.get_positions()[-1][:2],decimals=3))
final_ads_site_str='_'.join([str(i) for i in final_ads_site])
return init_ads_site, adsorption_energy, final_ads_site_str
def skip_ads_calculated(report_location,
all_gpw_files,
init_adsorbates_site_lst,
adsorption_energy_lst,
final_adsorbates_site_lst,
opt_slab_energy,
adatom_pot_energy):
f = paropen(report_location,'a')
parprint('Restarting...',file=f)
for gpw_file in all_gpw_files:
location='/'.join(gpw_file.split('/')[:-1])
parprint('Skipping '+('/'.join(location.split('/')[-2:]))+' adsorption site...',file=f)
atoms=restart(gpw_file)[0]
init_adsorbates_site_lst.append(gpw_file.split('/')[-2])
E_slab_ads=atoms.get_potential_energy()
adsorption_energy=E_slab_ads-(opt_slab_energy+adatom_pot_energy)
adsorption_energy_lst.append(adsorption_energy)
final_ads_site=list(np.round(atoms.get_positions()[-1][:2],decimals=3))
final_ads_site_str='_'.join([str(i) for i in final_ads_site])
final_adsorbates_site_lst.append(final_ads_site_str)
parprint(' ',file=f)
f.close()
return init_adsorbates_site_lst,adsorption_energy_lst,final_adsorbates_site_lst
def initialize_report(report_location,gpaw_calc):
calc_dict=gpaw_calc.__dict__['parameters']
if world.rank==0 and os.path.isfile(report_location):
os.remove(report_location)
f = paropen(report_location,'a')
parprint('Initial Parameters:', file=f)
parprint('\t'+'xc: '+calc_dict['xc'],file=f)
parprint('\t'+'h: '+str(calc_dict['h']),file=f)
parprint('\t'+'kpts: '+str(calc_dict['kpts']),file=f)
parprint('\t'+'sw: '+str(calc_dict['occupations']),file=f)
parprint('\t'+'spin polarized: '+str(calc_dict['spinpol']),file=f)
if calc_dict['spinpol']:
parprint('\t'+'magmom: initialize magnetic moment from slab calculation.',file=f)
parprint(' ',file=f)
f.close()
class ads_auto_select:
def __init__(self,
element,
miller_index_tight,
gpaw_calc,
ads,
adatom_pot_energy,
solver_fmax,
solver_max_step,
restart_calc,
size=(1,1), #xy size
fix_layer=2,
fix_option='bottom'):
#initalize variable
size_xy=str(size[0])+'x'+str(size[1])
target_dir='results/'+element+'/'+'ads/'+size_xy+'/'+miller_index_tight
report_location=target_dir+'_autocat_results_report.txt'
all_ads_file_loc=target_dir+'/'+'adsorbates/'+str(ads)+'/'
## TO-DO: need to figure out how to calculate adsorption energy for larger system
# self.gpaw_calc=gpaw_calc
# self.calc_dict=self.gpaw_calc.__dict__['parameters']
# self.ads=ads
# self.all_ads_file_loc=self.target_dir+'/'+'adsorbates/'+str(self.ads)+'/'
# self.adatom_pot_energy=adatom_pot_energy
##generate report
initialize_report(report_location, gpaw_calc)
##compute clean slab energy
opt_slab_energy, opt_slab_magmom=get_clean_slab(element, miller_index_tight,
report_location, target_dir,size_xy,
fix_layer,solver_fmax,solver_max_step,
gpaw_calc)
#opt_slab=self.get_clean_slab()
##start adsorption calculation
adsorption_energy_dict={}
init_adsorbates_site_lst=[]
final_adsorbates_site_lst=[]
adsorption_energy_lst=[]
all_bridge_traj_files=glob(all_ads_file_loc+'bridge/*/input.traj')
all_ontop_traj_files=glob(all_ads_file_loc+'ontop/*/input.traj')
all_hollow_traj_files=glob(all_ads_file_loc+'hollow/*/input.traj')
all_traj_files=all_bridge_traj_files+all_ontop_traj_files+all_hollow_traj_files
all_bridge_gpw_files=glob(all_ads_file_loc+'bridge/*/slab.gpw')
all_ontop_gpw_files=glob(all_ads_file_loc+'ontop/*/slab.gpw')
all_hollow_gpw_files=glob(all_ads_file_loc+'hollow/*/slab.gpw')
all_gpw_files=all_bridge_gpw_files+all_ontop_gpw_files+all_hollow_gpw_files
## restart
if restart_calc==True and len(all_gpw_files)>=1:
init_adsorbates_site_lst,adsorption_energy_lst,final_adsorbates_site_lst=skip_ads_calculated(report_location,
all_gpw_files,
init_adsorbates_site_lst,
adsorption_energy_lst,
final_adsorbates_site_lst,
opt_slab_energy,
adatom_pot_energy)
all_gpw_files_ads_site=['/'.join(i.split('/')[:-1]) for i in all_gpw_files]
all_traj_files=[i for i in all_traj_files if '/'.join(i.split('/')[:-1]) not in all_gpw_files_ads_site]
for traj_file in all_traj_files:
#init_adsobates_site, adsorption_energy, final_adsorbates_site=self.adsorption_energy_calculator(traj_file,opt_slab)
output_lst=adsorption_energy_calculator(traj_file,report_location,
opt_slab_energy,adatom_pot_energy,
opt_slab_magmom,gpaw_calc,
solver_fmax,solver_max_step,
calc_type='normal',
fix_layer=fix_layer,fix_option = fix_option,
)
init_adsorbates_site_lst.append(output_lst[0])
adsorption_energy_lst.append(output_lst[1])
final_adsorbates_site_lst.append(output_lst[2])
adsorption_energy_dict['init_sites[x_y](Ang)']=init_adsorbates_site_lst
adsorption_energy_dict['final_sites[x_y](Ang)']=final_adsorbates_site_lst
adsorption_energy_dict['adsorption_energy(eV)']=adsorption_energy_lst
ads_df=
|
pd.DataFrame(adsorption_energy_dict)
|
pandas.DataFrame
|
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start =
|
Period(freq='D', year=2006, month=12, day=30)
|
pandas.Period
|
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_transform_high_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, high_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_low_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, low_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_low_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, low_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_high_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, high_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_low_is_scalar`` variable to ``True`` and the
``_high_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- high = 'a'
- drop = None
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._strict == True
- instance._high_is_scalar = False
- instance._low_is_scalar = True
- instance._drop = None
"""
# Run
instance = Positive(high='a', strict=True, drop=None)
# Asserts
assert instance._low == 0
assert instance._high == 'a'
assert instance._strict is True
assert instance._high_is_scalar is False
assert instance._low_is_scalar is True
assert instance._drop is None
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_high_is_scalar`` variable to ``True`` and the
``_low_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- low = 'a'
- drop = None
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._high_is_scalar = True
- instance._low_is_scalar = False
- instance._drop = None
"""
# Run
instance = Negative(low='a', strict=True, drop=None)
# Asserts
assert instance._low == 'a'
assert instance._high == 0
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop is None
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_scalar_column(self):
"""Test the ``Between.transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_scalar(self):
"""Test the ``Between.transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_column(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_scalar_column(self):
"""Test the ``Between.is_valid`` method with ``low`` as scalar and ``high`` as a column.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out =
|
pd.Series([True, True, False])
|
pandas.Series
|
"""
This module contains all of the code needed to update the CBO baseline
projections and documentation and instructions automatically. This code assumes
that format of the websites, spreadsheets, and API we access don't change. If
there is a bug, it's probably because that assumption no longer holds true.
When this happens, modify the code as needed to account for this.
"""
import re
import requests
import pandas as pd
from requests_html import HTMLSession
from pathlib import Path
from datetime import datetime
from jinja2 import Template
CUR_PATH = Path(__file__).resolve().parent
def update_cpim(baseline, text_args):
"""
Update the CPI-M values in the CBO baseline using the BLS API
Parameters
----------
baseline: CBO baseline we're updaint
text_args: Dictionary containing the arguments that will be passed to
the documentation template
Returns
-------
baseline: Updated baseline numbers
text_args: Updated dictionary with text aruments to fill in the template
"""
print("Updating CPI-M Values")
url = "https://api.bls.gov/publicAPI/v1/timeseries/data/CUSR0000SAM"
# fetch BLS data from about url
r = requests.get(url)
# raise and error if the request fails
assert r.status_code == 200
result_json = r.json()
# raise error if request was not successful
assert result_json["status"] == "REQUEST_SUCCEEDED"
# extract the data from the results
data = result_json["Results"]["series"][0]["data"]
df = pd.DataFrame(data)
# convert the values to floats so that the groupby mean only returns
# the mean for the value
df["value"] = df["value"].astype(float)
cpi_mean = df.groupby("year").mean().transpose().round(1)
cpi_mean.index = ["CPIM"]
# open the current baseline to replace the values for the years pulled
# from BLS
baseline.update(cpi_mean)
# find the average difference between CPIM and CPIU for available years
last_year = max(cpi_mean.columns)
first_year = min(baseline.columns)
# retrieve subset of the DataFrame containing actual CPIM values
split_col = int(last_year) - int(first_year) + 1
sub_baseline = baseline[baseline.columns[:split_col]]
# find the difference
mean_diff = (sub_baseline.loc["CPIM"] - sub_baseline.loc["CPIU"]).mean()
# update the future values to reflect the difference between
new_vals = {}
for col in baseline.columns[split_col:]:
cpiu = baseline[col].loc["CPIU"]
new_val = cpiu + mean_diff
new_vals[col] = [new_val]
future_df =
|
pd.DataFrame(new_vals, index=["CPIM"])
|
pandas.DataFrame
|
from collections import namedtuple
from pathlib import Path
from typing import List
import arcpy
import numpy as np
import pandas as pd
def create_county_key(name):
"""Creates a properly-formatted county key from name. May rename to match current county names.
Args:
name (str): Name from either the boundaries shapefile's ID field or the district's name field
Returns:
str: name stripped from any prepended 'xyz_', whitespace, and periods.
"""
cleaned_name = name.split('_')[-1].casefold().replace(' ', '').replace('.', '')
if cleaned_name == 'richland':
cleaned_name = 'rich'
return cleaned_name
def nulls_to_nones(row):
"""Replace any pandas null values in a list with None
Args:
row (List): Row of data as a list of arbitrary objects
Returns:
List: List of data appropriately cleaned.
"""
new_row = []
for item in row:
if pd.isnull(item):
new_row.append(None)
else:
new_row.append(item)
return new_row
def pairwise(iterable):
"""Yields an item and the item after it in an iterable
Args:
iterable (iterable): The collection to iterate over
Yields:
tuple: An item and the next item
"""
it = iter(iterable)
a = next(it, None)
for b in it:
yield (a, b)
a = b
def _fix_change_end_dates(change_end_date, shape_key, shape_end, district_end):
#: Change end date scenarios:
#: District exists before shape, shape then changes before district does (Duchesne/dagget)
#: change date should be when shape is created, not when district changes
#: captured properly in existing change end date is next change date - 1
#: Extinct counties: Last record should have a change end date that is max(shp end date, district end date)
#: not captured by change end date is next change date - 1 because there is no next date
#: Shape exists before district
#: captured properly in existing change end date is next change date - 1 because next date is usually
#: district creation date
#: Extant counties: Shape ends at 2003, district does not have an end date
#: captured properly in existing change end date is next change date - 1 because there isn't a next
#: so it gets NaT
# self.change_dates_df['change_end_date'] = self.change_dates_df['change_date'].shift(-1) - pd.Timedelta(days=1)
#: If it's an extinct county (utt in shape name) with a NaT end time, return the latest of the shape or district end
#: utt_richland should pass because it's change_date shouldn't be null due to there being a row after for uts_rich
if pd.isnull(change_end_date) and 'utt_' in shape_key:
return max(shape_end, district_end)
#: Otherwise, just return the original date
return change_end_date
class ChangeDate:
"""Holds information about unique dates when either shape or district change
Attributes:
date (datetime): The start date for a new shape or district version
county_name (str): Name of the county, cleaned.
county_version (str): County join key, 'uts_<name>_S<version>'
district_number (str): District number (includes '1S' and '1N')
district_version (str): District join key, '<name>_D<version>'
"""
def __init__(self, date):
self.date = date
self.county_name = 'n/a'
self.county_version = 'n/a'
self.district_number = 'n/a'
self.district_version = 'n/a'
def __repr__(self):
return ', '.join([
str(self.date), self.county_name, self.county_version,
str(self.district_number), self.district_version
])
class State:
"""Contains statewide data and methods to operate over all counties.
Attributes:
counties_df (pd.DataFrame): Dataframe of the historic boundaries
shapefile.
districts_df (pd.DataFrame): Counties and their assigned districts
over time.
counties (List[County]): Objects for each unique county in
districts_df.
combined_change_df (pd.DataFrame): Change dates from all the
different counties.
output_df (pd.DataFrame): Final data with geometries and other info
from counties_df/districts_df merged into change dates.
"""
def __init__(self):
self.all_shapes_df = None
self.all_districts_df = None
self.counties: List[County] = []
self.combined_change_df = None
self.output_df = None
self.districts: List[District] = []
self.district_versions_dict = {}
self.district_versions_df: pd.DataFrame
def load_counties(self, counties_shp):
"""Read historical boundaries shapefile into counties_df
Calculates a key based on ID and VERSION fields.
Args:
counties_shp (str): Path to the historical boundaries shapefile
"""
#: Read counties shapefile in as dict, transform dict to dataframe
print(f'Loading counties from {counties_shp}...')
counties_list = []
county_fields = [f.name for f in arcpy.ListFields(counties_shp)]
county_fields.append('SHAPE@') #: Get the geometry so we can create a new feature class
with arcpy.da.SearchCursor(counties_shp, county_fields) as search_cursor:
for row in search_cursor:
counties_list.append(dict(zip(county_fields, row)))
self.all_shapes_df = pd.DataFrame(counties_list)
#: Create shape key- name_Sx
self.all_shapes_df['shape_key'] = self.all_shapes_df['ID'] + '_S' + self.all_shapes_df['VERSION'].astype(str)
#: Create county key
self.all_shapes_df['county_key'] = self.all_shapes_df['ID'].apply(create_county_key)
def load_districts(self, districts_csv):
"""Read historical district info into districts_df.
Cleans name with clean_name(), parses dates, and calculates district
key from CountyName and Version fields.
Args:
districts_csv (str): Path to the historical districts data
"""
print(f'Loading districts from {districts_csv}...')
self.all_districts_df = pd.read_csv(districts_csv)
#: Create ID column to better match the county shapefile id
self.all_districts_df['county_key'] = self.all_districts_df['CountyName'].apply(create_county_key)
#: Clean up dates
self.all_districts_df['StartDate'] = pd.to_datetime(self.all_districts_df['StartDate'])
self.all_districts_df['EndDate'] = pd.to_datetime(self.all_districts_df['EndDate'])
#: Create district key- name_Dx
self.all_districts_df['district_key'] = (
self.all_districts_df['CountyName'].str.casefold() + '_D' + self.all_districts_df['Version'].astype(str)
)
def setup_counties(self):
"""Get a list of counties from districts_df and load them in as County objects.
"""
county_names = self.all_districts_df['county_key'].unique()
for name in county_names:
print(f'Setting up {name}...')
county = County(name)
county.setup(self.all_shapes_df, self.all_districts_df)
self.counties.append(county)
def verify_counties(self):
"""Run verification code against shapes and districts
"""
for county in self.counties:
print(f'\n--- {county.name} ---')
county.verify_county_districts()
county.verify_county_shapes()
def calc_counties(self):
"""Calculate unique change dates for each county
"""
for county in self.counties:
county.calc_change_dates()
def combine_change_dfs(self, out_path=None):
"""Combine all individual county change dates into one dataframe.
Args:
out_path (str, optional): Path to save master change dates
dataframe as csv if desired.
"""
self.combined_change_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/python3
__author__ = "<NAME>"
import argparse, os, subprocess, sys
import pandas as pd
from joblib import Parallel, delayed
from datetime import datetime
import multiprocessing
# Mapping the field names between the submitted user metadata spreadsheet and the manifest file fields
spreadsheet_column_mapping = {'study_accession': 'study', 'sample_accession': 'sample', 'experiment_name': 'name', 'sequencing_platform': 'platform', 'sequencing_instrument': 'instrument', 'library_description': 'description'}
def get_args():
"""
Handle script arguments
:return: Script arguments
"""
parser = argparse.ArgumentParser(prog='bulk_webincli.py', formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
+ =========================================================== +
| ENA Webin-CLI Bulk Submission Tool: |
| Python script to handle bulk submission of data through |
| Webin-CLI. |
+ =========================================================== +
""")
parser.add_argument('-w', '--webinCliPath', help='Full path to Webin-CLI jar file. Default: "/webin-cli.jar" (for Docker)', default="/webin-cli.jar", type=str)
parser.add_argument('-u', '--username', help='Webin submission account username (e.g. Webin-XXXXX)', type=str, required=True)
parser.add_argument('-p', '--password', help='password for Webin submission account', type=str, required=True)
parser.add_argument('-g', '--geneticContext', help='Context for submission, options: genome, transcriptome, sequence, reads, taxrefset', choices=['genome', 'transcriptome', 'sequence', 'reads', 'taxrefset'], nargs='?', required=True)
parser.add_argument('-s', '--spreadsheet', help='name of spreadsheet with metadata', type=str, required=True)
parser.add_argument('-d', '--directory', help='parent directory of data files', type=str, required=False)
parser.add_argument('-c', '--centerName', help='FOR BROKER ACCOUNTS ONLY - provide center name', type=str, required=False)
parser.add_argument('-m', '--mode', type=str, help='options for mode are validate/submit', choices=['validate', 'submit'], nargs='?', required=False)
parser.add_argument('-pc', '--parallel', help='Run submissions in parallel and specify the number of cores/threads to use, maximum cores/threads=10', type=int, required=False)
parser.add_argument('-t', '--test', help='specify usage of test submission services', action='store_true')
parser.add_argument('-a', '--ascp', help='Use Aspera (ascp needs to be in path) instead of FTP when uploading files.', action='store_true')
args = parser.parse_args()
if args.mode is None:
args.mode = "validate" # If no mode is provided, default to Webin-CLI validate mode
if args.directory is None:
args.directory=""
if args.centerName is None:
args.centerName=""
if args.parallel is None:
args.parallel = False
elif not 0 < args.parallel <= 10:
print('> ERROR: Invalid number of cores/threads provided. This value should be between 1 and 10 (inclusive).')
sys.exit()
if os.path.exists(args.webinCliPath) is False:
print('> ERROR: Cannot find the Webin CLI jar file. Please set the path to the Webin CLI jar file (--webinCliPath)')
sys.exit()
return args
def spreadsheet_format(spreadsheet_file):
"""
Open the spreadsheet depending on the file-type
:param spreadsheet_file: Path to spreadsheet
:return: spreadsheet: Spreadsheet as a data frame to be manipulated
"""
if spreadsheet_file.endswith(".xlsx") or spreadsheet_file.endswith(".xls"):
spreadsheet =
|
pd.read_excel(spreadsheet_file, header=0, index_col=False)
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#DL_FP Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for timber plantation. Source: Khasanah et al. (2015)
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df2_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Ac_7y')
df2_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Ac_18y')
df2_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Tgr_40y')
df2_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Tgr_60y')
dfE2_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E2_Hbr_40y')
t = range(0,tf,1)
c_firewood_energy_S2_Ac7 = df2_Ac7['Firewood_other_energy_use'].values
c_firewood_energy_S2_Ac18 = df2_Ac18['Firewood_other_energy_use'].values
c_firewood_energy_S2_Tgr40 = df2_Tgr40['Firewood_other_energy_use'].values
c_firewood_energy_S2_Tgr60 = df2_Tgr60['Firewood_other_energy_use'].values
c_firewood_energy_E2_Hbr40 = dfE2_Hbr40['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE2 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E2_Hbr_40y')
c_pellets_Hbr_40y = dfE2['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S2_Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Ac_7y')
tf = 201
t = np.arange(tf)
def decomp_S2_Ac_7y(t,remainAGB_S2_Ac_7y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_Ac_7y
#set zero matrix
output_decomp_S2_Ac_7y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_Ac_7y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_Ac_7y[i:,i] = decomp_S2_Ac_7y(t[:len(t)-i],remain_part_S2_Ac_7y)
print(output_decomp_S2_Ac_7y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_Ac_7y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_Ac_7y[:,i] = np.diff(output_decomp_S2_Ac_7y[:,i])
i = i + 1
print(subs_matrix_S2_Ac_7y[:,:4])
print(len(subs_matrix_S2_Ac_7y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_Ac_7y = subs_matrix_S2_Ac_7y.clip(max=0)
print(subs_matrix_S2_Ac_7y[:,:4])
#make the results as absolute values
subs_matrix_S2_Ac_7y = abs(subs_matrix_S2_Ac_7y)
print(subs_matrix_S2_Ac_7y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_Ac_7y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_Ac_7y)
subs_matrix_S2_Ac_7y = np.vstack((zero_matrix_S2_Ac_7y, subs_matrix_S2_Ac_7y))
print(subs_matrix_S2_Ac_7y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_Ac_7y = (tf,1)
decomp_tot_S2_Ac_7y = np.zeros(matrix_tot_S2_Ac_7y)
i = 0
while i < tf:
decomp_tot_S2_Ac_7y[:,0] = decomp_tot_S2_Ac_7y[:,0] + subs_matrix_S2_Ac_7y[:,i]
i = i + 1
print(decomp_tot_S2_Ac_7y[:,0])
#S2_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Ac_18y')
tf = 201
t = np.arange(tf)
def decomp_S2_Ac_18y(t,remainAGB_S2_Ac_18y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_Ac_18y
#set zero matrix
output_decomp_S2_Ac_18y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_Ac_18y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_Ac_18y[i:,i] = decomp_S2_Ac_18y(t[:len(t)-i],remain_part_S2_Ac_18y)
print(output_decomp_S2_Ac_18y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_Ac_18y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_Ac_18y[:,i] = np.diff(output_decomp_S2_Ac_18y[:,i])
i = i + 1
print(subs_matrix_S2_Ac_18y[:,:4])
print(len(subs_matrix_S2_Ac_18y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_Ac_18y = subs_matrix_S2_Ac_18y.clip(max=0)
print(subs_matrix_S2_Ac_18y[:,:4])
#make the results as absolute values
subs_matrix_S2_Ac_18y = abs(subs_matrix_S2_Ac_18y)
print(subs_matrix_S2_Ac_18y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_Ac_18y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_Ac_18y)
subs_matrix_S2_Ac_18y = np.vstack((zero_matrix_S2_Ac_18y, subs_matrix_S2_Ac_18y))
print(subs_matrix_S2_Ac_18y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_Ac_18y = (tf,1)
decomp_tot_S2_Ac_18y = np.zeros(matrix_tot_S2_Ac_18y)
i = 0
while i < tf:
decomp_tot_S2_Ac_18y[:,0] = decomp_tot_S2_Ac_18y[:,0] + subs_matrix_S2_Ac_18y[:,i]
i = i + 1
print(decomp_tot_S2_Ac_18y[:,0])
#S2_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Tgr_40y')
tf = 201
t = np.arange(tf)
def decomp_S2_Tgr_40y(t,remainAGB_S2_Tgr_40y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_Tgr_40y
#set zero matrix
output_decomp_S2_Tgr_40y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_Tgr_40y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_Tgr_40y[i:,i] = decomp_S2_Tgr_40y(t[:len(t)-i],remain_part_S2_Tgr_40y)
print(output_decomp_S2_Tgr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_Tgr_40y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_Tgr_40y[:,i] = np.diff(output_decomp_S2_Tgr_40y[:,i])
i = i + 1
print(subs_matrix_S2_Tgr_40y[:,:4])
print(len(subs_matrix_S2_Tgr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_Tgr_40y = subs_matrix_S2_Tgr_40y.clip(max=0)
print(subs_matrix_S2_Tgr_40y[:,:4])
#make the results as absolute values
subs_matrix_S2_Tgr_40y = abs(subs_matrix_S2_Tgr_40y)
print(subs_matrix_S2_Tgr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_Tgr_40y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_Tgr_40y)
subs_matrix_S2_Tgr_40y = np.vstack((zero_matrix_S2_Tgr_40y, subs_matrix_S2_Tgr_40y))
print(subs_matrix_S2_Tgr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_Tgr_40y = (tf,1)
decomp_tot_S2_Tgr_40y = np.zeros(matrix_tot_S2_Tgr_40y)
i = 0
while i < tf:
decomp_tot_S2_Tgr_40y[:,0] = decomp_tot_S2_Tgr_40y[:,0] + subs_matrix_S2_Tgr_40y[:,i]
i = i + 1
print(decomp_tot_S2_Tgr_40y[:,0])
#S2_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
def decomp_S2_Tgr_60y(t,remainAGB_S2_Tgr_60y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_Tgr_60y
#set zero matrix
output_decomp_S2_Tgr_60y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_Tgr_60y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_Tgr_60y[i:,i] = decomp_S2_Tgr_60y(t[:len(t)-i],remain_part_S2_Tgr_60y)
print(output_decomp_S2_Tgr_60y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_Tgr_60y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_Tgr_60y[:,i] = np.diff(output_decomp_S2_Tgr_60y[:,i])
i = i + 1
print(subs_matrix_S2_Tgr_60y[:,:4])
print(len(subs_matrix_S2_Tgr_60y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_Tgr_60y = subs_matrix_S2_Tgr_60y.clip(max=0)
print(subs_matrix_S2_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_S2_Tgr_60y = abs(subs_matrix_S2_Tgr_60y)
print(subs_matrix_S2_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_Tgr_60y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_Tgr_60y)
subs_matrix_S2_Tgr_60y = np.vstack((zero_matrix_S2_Tgr_60y, subs_matrix_S2_Tgr_60y))
print(subs_matrix_S2_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_Tgr_60y = (tf,1)
decomp_tot_S2_Tgr_60y = np.zeros(matrix_tot_S2_Tgr_60y)
i = 0
while i < tf:
decomp_tot_S2_Tgr_60y[:,0] = decomp_tot_S2_Tgr_60y[:,0] + subs_matrix_S2_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_S2_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E2_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_E2_Hbr_40y(t,remainAGB_E2_Hbr_40y):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E2_Hbr_40y
#set zero matrix
output_decomp_E2_Hbr_40y = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E2_Hbr_40y in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E2_Hbr_40y[i:,i] = decomp_E2_Hbr_40y(t[:len(t)-i],remain_part_E2_Hbr_40y)
print(output_decomp_E2_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E2_Hbr_40y = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E2_Hbr_40y[:,i] = np.diff(output_decomp_E2_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_E2_Hbr_40y[:,:4])
print(len(subs_matrix_E2_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E2_Hbr_40y = subs_matrix_E2_Hbr_40y.clip(max=0)
print(subs_matrix_E2_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_E2_Hbr_40y = abs(subs_matrix_E2_Hbr_40y)
print(subs_matrix_E2_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E2_Hbr_40y = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E2_Hbr_40y)
subs_matrix_E2_Hbr_40y = np.vstack((zero_matrix_E2_Hbr_40y, subs_matrix_E2_Hbr_40y))
print(subs_matrix_E2_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E2_Hbr_40y = (tf,1)
decomp_tot_E2_Hbr_40y = np.zeros(matrix_tot_E2_Hbr_40y)
i = 0
while i < tf:
decomp_tot_E2_Hbr_40y[:,0] = decomp_tot_E2_Hbr_40y[:,0] + subs_matrix_E2_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_E2_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_S2_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_S2_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_S2_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_S2_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_E2_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2_Ac7 =
|
pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S2_Ac_7y')
|
pandas.read_excel
|
import logging
from typing import List, Union
import pandas as pd
from geopandas import GeoSeries
from sklearn.base import TransformerMixin
from tqdm import tqdm
from coord2vec.common.itertools import flatten
from coord2vec.common.parallel.multiproc_util import parmap
from coord2vec.feature_extraction.feature import Feature
from coord2vec.feature_extraction.feature_table import GEOM_WKT
from coord2vec.feature_extraction.feature_utils import load_features_using_geoms, save_features_to_db
from coord2vec.feature_extraction.osm.postgres_feature_factory import PostgresFeatureFactory
class FeaturesBuilder(TransformerMixin):
"""
A data class for choosing the desired features
"""
def __init__(self, features: List[Union[Feature, List[Feature]]], cache_table: str = None):
"""
features to be used in this builder
Args:
features: a list of features
cache_table: Optional, if specified will look/save calculated features in the cache
"""
self.features = flatten(features)
self.cache_table = cache_table
@property
def all_feat_names(self) -> List[str]:
return flatten([feat.feature_names for feat in self.features])
def transform(self, input_gs: GeoSeries, use_cache: bool = True) -> pd.DataFrame:
"""
extract the desired features on desired geometries
Args:
input_gs: a GeoSeries with the desired geometries
use_cache: if set and self.cache_table is filled will load/save the features to the cache
Returns:
a pandas dataframe, with columns as features, and rows as the geometries in input_gs
"""
assert len(input_gs.apply(lambda p: p.wkt).unique()) == len(
input_gs), "Shouldn't have duplicates when transform"
required_feats, loaded_feats_dfs = self.features, []
if use_cache:
logging.debug(f"Starting load from cache for {len(input_gs)} objects")
required_feats, loaded_feats_dfs = self.load_from_cache(self.features, input_gs)
if len(required_feats) == 0:
logging.debug("loaded all from cache!")
return pd.concat(loaded_feats_dfs, axis=1) # append by column
else:
logging.debug(f"loaded from cache {len(loaded_feats_dfs)}/{len(self.features)}")
else:
logging.debug(f"Don't load from cache")
feature_factory = PostgresFeatureFactory(required_feats, input_gs=input_gs)
with feature_factory:
features_gs_list = parmap(lambda feature: feature.extract(input_gs), feature_factory.features,
use_tqdm=True, desc=f"Calculating Features for {len(input_gs)} geoms", unit='feature', leave=False)
# TODO: if want, extract_object_set
all_features_df =
|
pd.concat(features_gs_list + loaded_feats_dfs, axis=1)
|
pandas.concat
|
import argparse
import datetime
import logging
import os
import re
import shutil
import subprocess
import time
import synapseclient
import synapseutils
import pandas as pd
from . import process_functions
from . import database_to_staging
from . import create_case_lists
from . import dashboard_table_updater
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def storeFile(syn, filePath, parentId, anonymizeCenterDf,
genie_version, name=None):
#process.center_anon(filePath, anonymizeCenterDf)
if name is None:
name = os.path.basename(filePath)
return(syn.store(synapseclient.File(filePath, name=name, parent = parentId, versionComment=genie_version)))
#process.center_convert_back(filePath, anonymizeCenterDf)
#This is the only filter that returns mutation columns to keep
def commonVariantFilter(mafDf):
mafDf['FILTER'] = mafDf['FILTER'].fillna("")
toKeep = ["common_variant" not in i for i in mafDf['FILTER']]
mafDf = mafDf[toKeep]
return(mafDf)
def consortiumToPublic(syn, processingDate, genie_version, releaseId, databaseSynIdMappingDf, publicReleaseCutOff=365, staging=False):
ANONYMIZE_CENTER = syn.tableQuery('SELECT * FROM syn10170510')
ANONYMIZE_CENTER_DF = ANONYMIZE_CENTER.asDataFrame()
CNA_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,"data_CNA_%s.txt" % genie_version)
CLINICAL_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'data_clinical_%s.txt' % genie_version)
CLINICAL_SAMPLE_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'data_clinical_sample_%s.txt' % genie_version)
CLINICAL_PATIENT_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'data_clinical_patient_%s.txt' % genie_version)
DATA_GENE_PANEL_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'data_gene_matrix_%s.txt' % genie_version)
MUTATIONS_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'data_mutations_extended_%s.txt' % genie_version)
FUSIONS_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'data_fusions_%s.txt' % genie_version)
SEG_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'genie_public_data_cna_hg19_%s.seg' % genie_version)
COMBINED_BED_PATH = os.path.join(db_to_staging.GENIE_RELEASE_DIR,'genie_combined_%s.bed' % genie_version)
if not os.path.exists(db_to_staging.GENIE_RELEASE_DIR):
os.mkdir(db_to_staging.GENIE_RELEASE_DIR)
if not os.path.exists(db_to_staging.CASE_LIST_PATH):
os.mkdir(db_to_staging.CASE_LIST_PATH)
# if staging:
# #public release staging
# PUBLIC_RELEASE_PREVIEW = "syn7871696"
# PUBLIC_RELEASE_PREVIEW_CASELIST = "syn9689659"
# else:
#public release preview
PUBLIC_RELEASE_PREVIEW = databaseSynIdMappingDf['Id'][databaseSynIdMappingDf['Database'] == 'public'].values[0]
PUBLIC_RELEASE_PREVIEW_CASELIST = db_to_staging.find_caselistid(syn, PUBLIC_RELEASE_PREVIEW)
##############################################################################################################################
## Sponsored projects filter
##############################################################################################################################
## if before release date -> go into staging consortium
## if after date -> go into public
# sponsoredReleaseDate = syn.tableQuery('SELECT * FROM syn8545108')
# sponsoredReleaseDateDf = sponsoredReleaseDate.asDataFrame()
# sponsoredProjectSamples = syn.tableQuery('SELECT * FROM syn8545106')
# sponsoredProjectSamplesDf = sponsoredProjectSamples.asDataFrame()
# sponsoredProjectsDf = sponsoredProjectSamplesDf.merge(sponsoredReleaseDateDf, left_on="sponsoredProject", right_on="sponsoredProjects")
# dates = sponsoredProjectsDf['releaseDate'].apply(lambda date: datetime.datetime.strptime(date, '%b-%Y'))
# publicReleaseSamples = sponsoredProjectsDf['genieSampleId'][dates < processingDate]
##############################################################################################################################
# SEQ_DATE filter
# Jun-2015, given processing date (today) -> public release (processing date - Jun-2015 > 12 months)
consortiumReleaseWalk = synapseutils.walk(syn, releaseId)
consortiumRelease = next(consortiumReleaseWalk)
clinical = [syn.get(synid, followLink=True) for filename, synid in consortiumRelease[2] if filename == "data_clinical.txt"][0]
gene_matrix = [syn.get(synid, followLink=True) for filename, synid in consortiumRelease[2] if filename == "data_gene_matrix.txt"][0]
clinicalDf = pd.read_csv(clinical.path, sep="\t", comment="#")
gene_matrixdf = pd.read_csv(gene_matrix.path, sep="\t")
removeForPublicSamples = process_functions.seqDateFilter(clinicalDf,processingDate,publicReleaseCutOff)
#comment back in when public release filter back on
#publicReleaseSamples = publicReleaseSamples.append(keepForPublicSamples)
#Make sure all null oncotree codes are removed
clinicalDf = clinicalDf[~clinicalDf['ONCOTREE_CODE'].isnull()]
publicReleaseSamples = clinicalDf.SAMPLE_ID[~clinicalDf.SAMPLE_ID.isin(removeForPublicSamples)]
logger.info("SEQ_DATES for public release: " + ", ".join(set(clinicalDf.SEQ_DATE[clinicalDf.SAMPLE_ID.isin(publicReleaseSamples)].astype(str))))
#Clinical release scope filter
#If consortium -> Don't release to public
clinicalReleaseScope = syn.tableQuery("SELECT * FROM syn8545211 where releaseScope = 'public'")
publicRelease = clinicalReleaseScope.asDataFrame()
allClin = clinicalDf[clinicalDf['SAMPLE_ID'].isin(publicReleaseSamples)]
allClin.to_csv(CLINICAL_PATH, sep="\t", index=False)
gene_matrixdf = gene_matrixdf[gene_matrixdf['SAMPLE_ID'].isin(publicReleaseSamples)]
gene_matrixdf.to_csv(DATA_GENE_PANEL_PATH,sep="\t",index=False)
storeFile(syn, DATA_GENE_PANEL_PATH, PUBLIC_RELEASE_PREVIEW, ANONYMIZE_CENTER_DF, genie_version, name="data_gene_matrix.txt")
storeFile(syn, CLINICAL_PATH, PUBLIC_RELEASE_PREVIEW, ANONYMIZE_CENTER_DF, genie_version, name="data_clinical.txt")
create_case_lists.main(CLINICAL_PATH, DATA_GENE_PANEL_PATH, db_to_staging.CASE_LIST_PATH, "genie_public")
caseListFiles = os.listdir(db_to_staging.CASE_LIST_PATH)
caseListEntities = []
for casePath in caseListFiles:
casePath = os.path.join(db_to_staging.CASE_LIST_PATH, casePath)
caseListEntities.append(storeFile(syn, casePath, PUBLIC_RELEASE_PREVIEW_CASELIST, ANONYMIZE_CENTER_DF, genie_version))
#Grab mapping table to fill in clinical headers
mapping_table = syn.tableQuery('SELECT * FROM syn9621600')
mapping = mapping_table.asDataFrame()
genePanelEntities = []
for entName, entId in consortiumRelease[2]:
if "data_linear" in entName or "meta_" in entName:
continue
elif entName == "data_clinical.txt":
patientCols = publicRelease['fieldName'][publicRelease['level'] == "patient"].tolist()
sampleCols = ["PATIENT_ID"]
sampleCols.extend(publicRelease['fieldName'][publicRelease['level'] == "sample"].tolist())
#clinicalDf is defined on line 36
# clinicalDf['AGE_AT_SEQ_REPORT'] = [int(math.floor(int(float(i))/365.25)) if process.checkInt(i) else i for i in clinicalDf['AGE_AT_SEQ_REPORT']]
# clinicalDf['AGE_AT_SEQ_REPORT'][clinicalDf['AGE_AT_SEQ_REPORT'] == ">32485"] = ">89"
# clinicalDf['AGE_AT_SEQ_REPORT'][clinicalDf['AGE_AT_SEQ_REPORT'] == "<6570"] = "<18"
clinicalDf = clinicalDf[clinicalDf['SAMPLE_ID'].isin(publicReleaseSamples)]
#Delete columns that are private scope
# for private in privateRelease:
# del clinicalDf[private]
process_functions.addClinicalHeaders(clinicalDf, mapping, patientCols, sampleCols, CLINICAL_SAMPLE_PATH, CLINICAL_PATIENT_PATH)
storeFile(syn, CLINICAL_SAMPLE_PATH, PUBLIC_RELEASE_PREVIEW, ANONYMIZE_CENTER_DF, genie_version, name="data_clinical_sample.txt")
storeFile(syn, CLINICAL_PATIENT_PATH, PUBLIC_RELEASE_PREVIEW, ANONYMIZE_CENTER_DF, genie_version, name="data_clinical_patient.txt")
elif "mutation" in entName:
mutation = syn.get(entId, followLink=True)
mutationDf = pd.read_csv(mutation.path, sep="\t", comment="#")
mutationDf = commonVariantFilter(mutationDf)
mutationDf['FILTER'] = "PASS"
mutationDf = mutationDf[mutationDf['Tumor_Sample_Barcode'].isin(publicReleaseSamples)]
text = process_functions.removeFloat(mutationDf)
with open(MUTATIONS_PATH, 'w') as f:
f.write(text)
storeFile(syn, MUTATIONS_PATH, PUBLIC_RELEASE_PREVIEW, ANONYMIZE_CENTER_DF, genie_version, name="data_mutations_extended.txt")
elif "fusion" in entName:
fusion = syn.get(entId, followLink=True)
fusionDf =
|
pd.read_csv(fusion.path, sep="\t")
|
pandas.read_csv
|
#as duas bases estão nessa aplicação
#import matplotlib
#import matplotlib.mlab as mlab
#import matplotlib.gridspec as gridspec
#from scipy.misc import electrocardiogram
#from scipy import stats
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
import pandas as pd
import numpy as np
from scipy.stats import kurtosis, skew
#Funções
#Média
def media(lista):
return (sum(lista)/len(lista))
def comportamento(s, lista_dif, lista_dif_index, peaks):
for i in range(len(s)-1):
sub = abs(s[i]-s[i+1])
sub_index = abs(peaks[i]-peaks[i+1])
lista_dif.append(sub)
lista_dif_index.append(sub_index)
def peaks(lista):
#distancia = frequencia amostral
peaks, _ = find_peaks(lista, distance=fs//2)
np.diff(peaks)
return peaks
def num_peaks(lista, peaks):
#número de picos
num_peaks = np.zeros(len(peaks)-2)
for i in range(len(peaks)-2):
num_peaks[i]=lista[peaks[i]]-lista[peaks[i+1]]
return num_peaks
def plot_comport(lista, peaks):
#plot de comportamento dos picos
s = lista[peaks]
lista_dif = []
lista_dif_index = []
return comportamento(s, lista_dif, lista_dif_index, peaks)
# REFERENCIA
# https://media.readthedocs.org/pdf/python-heart-rate-analysis-toolkit/latest/python-heart-rate-analysis-toolkit.pdf
# x = electrocardiogram()#[2000:4000]
data =
|
pd.read_csv('lista2.txt')
|
pandas.read_csv
|
# coding: utf-8
import pandas as pd
import numpy as np
import dateutil
import requests
import datetime
from matplotlib import pyplot as plt
def smape(actual, predicted):
a = np.abs(np.array(actual) - np.array(predicted))
b = np.array(actual) + np.array(predicted)
return 2 * np.mean(np.divide(a, b, out=np.zeros_like(a), where=b != 0, casting='unsafe'))
from dateutil.parser import parse
from datetime import date, timedelta
def date_add_hours(start_date, hours):
end_date = parse(start_date) + timedelta(hours=hours)
end_date = end_date.strftime('%Y-%m-%d %H:%M:%S')
return end_date
def date_add_days(start_date, days):
end_date = parse(start_date[:10]) + timedelta(days=days)
end_date = end_date.strftime('%Y-%m-%d')
return end_date
def diff_of_hours(time1, time2):
hours = (parse(time1) - parse(time2)).total_seconds() // 3600
return abs(hours)
utc_date = date_add_hours(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), -8)
print('现在是UTC时间:{}'.format(utc_date))
print('距离待预测时间还有{}个小时'.format(diff_of_hours(date_add_days(utc_date, 1), utc_date) + 1))
filename = "2018-05-31_ensemble_all_zhoujie"
day = "2018-05-31"
filepath = '../image/results/' + "".join(day.split("-")) + '/' + filename + '.csv' # 0514
# filepath = './0513commit/bag_55.csv' #0513
# filepath = './0513commit/api_concat_bag_55_6hours.csv' #0513api
result =
|
pd.read_csv(filepath)
|
pandas.read_csv
|
# core
import io
import os
import re
import gc
import time
import pytz
import glob
import zipfile
import datetime
# installed
import quandl
import pandas as pd
import requests as req
from requests.adapters import HTTPAdapter
from pytz import timezone
from concurrent.futures import ProcessPoolExecutor
import pandas_market_calendars as mcal
# custom
from file_utils import get_home_dir
DEFAULT_STORAGE = '/home/nate/Dropbox/data/eod_data/'
# get todays date for checking if files up-to-date
MTN = timezone('America/Denver')
TODAY = datetime.datetime.now(MTN)
WEEKDAY = TODAY.weekday()
HOUR = TODAY.hour
HOME_DIR = get_home_dir()
HEADERS = ['Ticker',
'Date',
'Open',
'High',
'Low',
'Close',
'Volume',
'Dividend',
'Split',
'Adj_Open',
'Adj_High',
'Adj_Low',
'Adj_Close',
'Adj_Volume']
Q_KEY = os.environ.get('quandl_api')
STOCKLIST = "../stockdata/goldstocks.txt"
quandl.ApiConfig.api_key = Q_KEY
def get_stocklist():
"""
"""
url = 'http://static.quandl.com/end_of_day_us_stocks/ticker_list.csv'
df = pd.read_csv(url)
return df
def download_all_stocks_fast_csv(write_csv=False):
"""
"""
zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/data?api_key=' + Q_KEY
r = req.get(zip_file_url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(path='../stockdata/')
headers = update_all_stocks(return_headers=True)
df = pd.read_csv('../stockdata/' + \
z.filelist[0].filename,
names=headers,
index_col=1,
parse_dates=True)
df.sort_index(inplace=True)
if write_csv:
# compression really slows it down...don't recommend
df.to_csv('../stockdata/all_stocks.csv.gzip', compression='gzip')
os.remove('../stockdata/' + z.filelist[0].filename)
return df
def download_entire_db(storage_path=DEFAULT_STORAGE,
remove_previous=True,
return_df=False,
return_latest_date=False,
write=['feather']):
"""
downloads entire database and saves to .h5, replacing old file
:param storage_path: string, temporary location where to save the full csv file
:param remove_previous: removes previous instance of the EOD dataset
:param write: list of filetypes to write, can include 'feather' and 'hdf5'
hdf5 can be compressed for about 1GB filesize saving, but feather is
much faster and can be loaded in R as well
"""
# first check if we have the latest data
if not os.path.exists(storage_path):
splitpath = storage_path.split('/')[1:] # first entry is blank due to home dir /
for i, p in enumerate(splitpath, 1):
path = '/'.join(splitpath[:i])
if not os.path.exists(path):
os.mkdir(path)
zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/data?api_key=' + Q_KEY
s = req.Session()
s.mount('https', HTTPAdapter(max_retries=10))
r = s.get(zip_file_url)
# another possible way to deal with retries
# while True:
# try:
# r = req.get(zip_file_url, timeout=10)
# break
# except Exception as e:
# print(e)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(path=storage_path)
df = pd.read_csv(storage_path + \
z.filelist[0].filename,
names=HEADERS,
index_col=1,
parse_dates=True,
infer_datetime_format=True)
latest_date = df.index.max().date().strftime('%Y%m%d')
if 'hdf5' in write:
df.to_hdf(storage_path + 'EOD_' + latest_date + '.h5',
key='data',
complib='blosc',
complevel=9)
# also write feather file so can read into R
# have to reset the index because feather can't handle non-default index (maybe non-unique?)
df.reset_index(inplace=True)
if 'feather' in write:
df.to_feather(storage_path + 'EOD_' + latest_date + '.ft')
if remove_previous:
for ext in ['h5', 'ft']:
files = glob.glob(storage_path + 'EOD_*.' + ext)
files = [f for f in files if len(f.split('/')[-1]) == 15] # don't want any of the small files, only full DBs
print(sorted(files, key=os.path.getctime))
if len(files) > 1:
previous_file = sorted(files, key=os.path.getctime)[-2]
print('removing', previous_file)
os.remove(previous_file)
# delete downloaded zip file
os.remove(storage_path + z.filelist[0].filename)
if return_df:
# set index back to normal for return_df
df.set_index('Date', inplace=True)
return df
elif return_latest_date:
return pd.to_datetime(df['Date'].max().date())
def check_market_status():
"""
Checks to see if market is open today.
Uses the pandas_market_calendars package as mcal
"""
today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))
ndq = mcal.get_calendar('NASDAQ')
open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny)
if today_ny.date() in open_days.index:
return open_days
else:
return None
def daily_download_entire_db(storage_path=DEFAULT_STORAGE):
"""
checks if it is a trading day today, and downloads entire db after it has been updated
(930pm ET)
need to refactor -- this is messy and touchy. Have to start before midnight UTC
to work ideally
"""
latest_db_date = get_latest_db_date()
while True:
latest_close_date = get_latest_close_date()
if latest_db_date is None:
print('no database file exists, downloading...')
latest_db_date = download_entire_db(return_latest_date=True)
continue
today_utc = pd.to_datetime('now')
today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))
pd_today_ny = pd.to_datetime(today_ny.date())
if latest_db_date.date() != latest_close_date.date():
if (latest_close_date.date() - latest_db_date.date()) >= pd.Timedelta('1D'):
if today_ny.hour > latest_close_date.hour:
print('db more than 1 day out of date, downloading...')
latest_db_date = download_entire_db(return_latest_date=True)
elif pd_today_ny.date() == latest_close_date.date(): # if the market is open and the db isn't up to date with today...
if today_ny.hour >= 22:
print('downloading db with update from today...')
latest_db_date = download_entire_db(return_latest_date=True)
print('sleeping 1h...')
time.sleep(3600)
# old code...don't think I need this anymore
# open_days = check_market_status()
# if open_days is not None:
# close_date = open_days.loc[today_utc.date()]['market_close']
# # TODO: add check if after closing time
# if today_utc.dayofyear > close_date.dayofyear or today_utc.year > close_date.year:
# if today_ny.hour > 10: # need to wait until it has been processed to download
# last_scrape = today_ny.date()
# print('downloading db...')
# download_entire_db()
# else:
# # need to make it wait number of hours until close
# print('waiting for market to close, waiting 1 hour...')
# time.sleep(3600)
# else:
# # need to wait till market will be open then closed next
# print('market closed today, waiting 1 hour...')
# time.sleep(3600) # wait 1 hour
# else:
# # need to make this more intelligent so it waits until the next day
# print('already scraped today, waiting 1 hour to check again...')
# time.sleep(3600)
def get_latest_db_date(storage_path=DEFAULT_STORAGE, filetype='feather'):
"""
gets the date of the last full scrape of the db
"""
if filetype == 'feather':
files = glob.glob(storage_path + 'EOD_*.ft')
elif filetype == 'hdf5':
files = glob.glob(storage_path + 'EOD_*.h5')
else:
print("filetype must be one of ['feather', 'hdf5']")
return None
if len(files) > 0:
files = [f for f in files if len(f.split('/')[-1]) == 15] # don't want any of the small files, only full DBs
latest_file = sorted(files, key=os.path.getctime)[-1]
last_date = pd.to_datetime(latest_file[-11:-3])
return last_date
return None
def get_latest_close_date(market='NASDAQ', return_time=False, last_close=False):
"""
gets the latest date the markets were open (NASDAQ), and returns the closing datetime
if last_close is True, gets last datetime that market has closed (not in the future)
"""
# today = datetime.datetime.now(pytz.timezone('America/New_York')).date()
# today_utc = pd.to_datetime('now').date()
today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))
ndq = mcal.get_calendar(market)
open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny)
if last_close:
past = open_days[open_days['market_close'] <= pd.to_datetime('now').tz_localize('UTC')]
return past.iloc[-1]['market_close']
return open_days.iloc[-1]['market_close']
def check_market_status():
"""
Checks to see if market is open today.
Uses the pandas_market_calendars package as mcal
"""
# today = datetime.datetime.now(pytz.timezone('America/New_York')).date()
today_utc = pd.to_datetime('now').date()
ndq = mcal.get_calendar('NASDAQ')
open_days = ndq.schedule(start_date=today_utc - pd.Timedelta('10 days'), end_date=today_utc)
if today_utc in open_days.index:
return open_days
else:
return None
def update_all_stocks(return_headers=False, update_small_file=False):
"""
return_headers will just return the column names.
update_small_file will just update the small file that starts on 1/1/2000
"""
# 7-13-2017: 28788363 rows in full df
zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/download?api_key=' + \
Q_KEY + '&download_type=partial'
r = req.get(zip_file_url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(path='../stockdata/')
if return_headers:
df = pd.read_csv('../stockdata/' + z.filelist[0].filename, parse_dates=True)
df.set_index('Date', inplace=True)
new_c = [re.sub('.\s', '_', c) for c in df.columns]
return new_c
df = pd.read_csv('../stockdata/' + z.filelist[0].filename)
# it won't parse dates when it reads...
df['Date'] =
|
pd.to_datetime(df['Date'])
|
pandas.to_datetime
|
# Data Preprocessing
"""ML_Workflow template with required libraries and function calls.
@author:Varshtih
"""
import pandas as pd
import numpy as np
from autoimpute.imputations import MultipleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from scipy import stats
import sweetviz
import seaborn as sns
from pyod.models.feature_bagging import FeatureBagging
# Load Input Files
train_data = pd.read_csv(r"C:\Users\svvar\PycharmProjects\ml_workflow\Algorithims\Data Files\train.csv")
test_data = pd.read_csv(r"C:\Users\svvar\PycharmProjects\ml_workflow\Algorithims\Data Files\test.csv")
train_data.info()
test_data.info()
# Fill in required Inputs
x_train = train_data.iloc[:, list(range(3, 11))]
y_train = train_data.iloc[:, list(range(11,12))].values
x_train_num = train_data.iloc[:, list(range(3, 9))]
x_train_txt = train_data.iloc[:, list(range(9, 11))]
x_train_txt_encode_split = 2 # Split at Column Number
x_test = test_data.iloc[:, list(range(3, 11))]
x_test_num = test_data.iloc[:, list(range(3, 9))]
x_test_txt = test_data.iloc[:, list(range(9, 11))]
x_test_txt_encode_split = 2 # Split at Column Number
# Impute Missing values
# Numerical Imputer
imputer_num = MultipleImputer(strategy='stochastic', return_list=True, n=5, seed=101)
x_train_num_avg = imputer_num.fit_transform(x_train_num)
x_train_num_concat = x_train_num_avg[0][1]
for i in range(len(x_train_num_avg)-1):
x_train_num_concat =
|
pd.concat([x_train_num_concat,x_train_num_avg[i+1][1]], axis=1)
|
pandas.concat
|
# TODO move away from this test generator style since its we need to manage the generator file,
# which is no longer in this project workspace, as well as the output test file.
## ##
# #
# THIS TEST WAS AUTOGENERATED BY groupby_test_generator.py #
# #
##
# TODO refactor this into table driven tests using pytest parameterize since each test body follows the same structure
# and a single test body with multiple test tabe entries will be more readable and flexible.
from .groupby_unit_test_parameters import *
import pandas as pd
import riptable as rt
import unittest
class autogenerated_gb_tests(unittest.TestCase):
def safe_assert(self, ary1, ary2):
for a, b in zip(ary1, ary2):
if a == a and b == b:
self.assertAlmostEqual(a, b, places=7)
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(1, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(4, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(7, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(2, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(5, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(1, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(4, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(7, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(2, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(5, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(1, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(4, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(7, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(2, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(5, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(1, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(4, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(7, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(2, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(5, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(1, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(4, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(7, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(2, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(5, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(1, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(4, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(7, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(2, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(5, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(1, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(4, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(7, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(2, 2, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(5, 2, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(1, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(4, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(7, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(2, 1, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(5, 1, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(1, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(4, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(7, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(2, 3, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(5, 3, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(1, 1, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(4, 1, 0.1, ['min', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'max']
test_class = groupby_everything(7, 1, 0.1, ['min', 'max'])
pd_out = (
|
pd.DataFrame(test_class.data)
|
pandas.DataFrame
|
# 14.5 Case Study: Multiple Linear Regression with the California Housing Dataset
# 14.5.1 Loading the Dataset
# Loading the Data
from sklearn.datasets import fetch_california_housing
california = fetch_california_housing()
# Displaying the Dataset’s Description
print(california.DESCR)
california.data.shape
california.target.shape
california.feature_names
# 14.5.2 Exploring the Data with Pandas
import pandas as pd
|
pd.set_option('precision', 4)
|
pandas.set_option
|
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for data_frame_proto."""
from unittest.mock import patch
import json
import unittest
import numpy as np
import pandas as pd
import pytest
import streamlit.elements.data_frame_proto as data_frame_proto
from google.protobuf import json_format
from streamlit.proto.DataFrame_pb2 import AnyArray
from streamlit.proto.DataFrame_pb2 import CSSStyle
from streamlit.proto.DataFrame_pb2 import CellStyle
from streamlit.proto.DataFrame_pb2 import CellStyleArray
from streamlit.proto.DataFrame_pb2 import DataFrame
from streamlit.proto.DataFrame_pb2 import Index
from streamlit.proto.DataFrame_pb2 import Int32Array
from streamlit.proto.DataFrame_pb2 import Table
from streamlit.proto.Delta_pb2 import Delta
from streamlit.proto.VegaLiteChart_pb2 import VegaLiteChart
from streamlit.proto.NamedDataSet_pb2 import NamedDataSet
def _css_style(prop, value):
css_pb = CSSStyle()
css_pb.property = prop
css_pb.value = value
return css_pb
class DataFrameProtoTest(unittest.TestCase):
"""Test streamlit.data_frame_proto."""
def test_marshall_data_frame(self):
"""Test streamlit.data_frame_proto.marshall_data_frame.
"""
pass
def test_is_pandas_styler(self):
"""Test streamlit.data_frame_proto._is_pandas_styler.
Need to test the following:
* object is of type pandas.io.formats.style.Styler
"""
pass
def test_marshall_styles(self):
"""Test streamlit.data_frame_proto._marshall_styles.
Need to test the following:
* styler is:
* None
* not None
* display_values is:
* None
* not None
"""
pass
def test_get_css_styles(self):
"""Test streamlit.data_frame_proto._get_css_styles.
Need to test the following:
* cell_selector_regex isnt found
* cell_style['props'] isn't a list
* cell_style['props'] does not equal 2
* style has name and value
* style does not have name and value
"""
pass
def test_get_custom_display_values(self):
"""Test streamlit.data_frame_proto._get_custom_display_values.
Need to test the following:
* row_header regex is found
* we find row header more than once.
* cell_selector regex isn't found.
* has_custom_display_values
* true
* false
"""
pass
def test_marshall_index(self):
"""Test streamlit.data_frame_proto._marshall_index."""
df =
|
pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
|
pandas.DataFrame
|
#pylint: disable=line-too-long, too-many-public-methods, invalid-name
#pylint: disable=maybe-no-member, too-few-public-methods, no-member
from __future__ import absolute_import
from argparse import Namespace
from collections import OrderedDict
import filecmp
from io import StringIO
import os
import unittest
import numpy
import pandas as pd
from testfixtures import TempDirectory
import generollup.rollup as rollup
#TODO: cgates: How about we fix the chained assigments and get rid of this warning suppression.
pd.set_option('mode.chained_assignment', None)
def dataframe(input_data, sep="|", dtype=None):
return pd.read_csv(StringIO(input_data), sep=sep, header=0, dtype=dtype)
class MockFormatRule(object):
def __init__(self, format_df):
self.format_df = format_df
self.last_data_df = None
self.last_format_df = None
def format(self, data_df):
self.last_data_df = data_df
return self.format_df
def style(self, format_df):
self.last_format_df = format_df
return self.format_df
class GeneRollupTestCase(unittest.TestCase):
def setUp(self):
rollup._DBNSFP_COLUMN = 'dbNSFP_rollup_damaging'
rollup._EFFECT_COLUMN = 'SNPEFF_TOP_EFFECT_IMPACT'
rollup._GENE_SYMBOL = 'GENE_SYMBOL'
def test_create_df(self):
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tSNPEFF_TOP_EFFECT_IMPACT\tJQ_SUMMARY_SOM_COUNT|sampleA
1\t2\t3\t4\t5\t6\t7\t8'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)',
gene_column_name='GENE_SYMBOL',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "dbNSFP_rollup_damaging", "SNPEFF_TOP_EFFECT_IMPACT", "JQ_SUMMARY_SOM_COUNT|sampleA"],
list(actual_df.columns.values))
def test_create_df_missingDbnsfpAndSnpeffOkay(self):
input_string =\
'''GENE_SYMBOL\theaderA\theaderB\tJQ_SUMMARY_SOM_COUNT|sampleA
foo\t1\t2\t0'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)',
gene_column_name='GENE_SYMBOL',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "headerA", "headerB", "JQ_SUMMARY_SOM_COUNT|sampleA"],
list(actual_df.columns.values))
def test_create_df_missingDbNsfpOkay(self):
input_string =\
'''GENE_SYMBOL\tSNPEFF_TOP_EFFECT_IMPACT\tJQ_SUMMARY_SOM_COUNT|sampleA|TUMOR
1\t2\t3'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name='GENE_SYMBOL',
effect_column_name='SNPEFF_TOP_EFFECT_IMPACT',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "SNPEFF_TOP_EFFECT_IMPACT", "JQ_SUMMARY_SOM_COUNT|sampleA|TUMOR"],
list(actual_df.columns.values))
def test_create_df_missingEffectOkay(self):
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|sampleA|TUMOR
1\t2\t3'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name='GENE_SYMBOL',
dbnsfp_column_name='dbNSFP_rollup_damaging',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "dbNSFP_rollup_damaging", "JQ_SUMMARY_SOM_COUNT|sampleA|TUMOR"],
list(actual_df.columns.values))
def test_create_df_missingSamples(self):
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tSNPEFF_TOP_EFFECT_IMPACT
1\t2\t3
1\t2\t3'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(P.)\|TUMOR',
gene_column_name='GENE_SYMBOL',
dbnsfp_column_name='dbNSFP_rollup_damaging',
effect_column_name='SNPEFF_TOP_EFFECT_IMPACT',
tsv=False)
self.assertRaisesRegexp(rollup.UsageError,
"Cannot determine sample genotype columns with supplied regex.*",
rollup._create_df,
StringIO(input_string),
args)
#TODO: (jebene) I can't figure out how to initialize this as having null values
def xtest_create_df_removesIntergenicVariants(self):
input_string =\
'''GENE_SYMBOL\tSNPEFF_TOP_EFFECT_IMPACT\tJQ_SUMMARY_SOM_COUNT
BRCA1\t2\t3
0\t4\t5
6\t7\t8'''
input_string = input_string.replace("0", numpy.nan)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name="GENE_SYMBOL",
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "SNPEFF_TOP_EFFECT_IMPACT", "JQ_SUMMARY_SOM_COUNT"],
list(actual_df.columns.values))
self.assertEquals(["BRCA1", "6"], list(actual_df["GENE_SYMBOL"].values))
def test_sort_by_dbnsfp_rank(self):
input_string =\
'''gene_symbol\tJQ_SUMMARY_SOM_COUNT|P1|NORMAL\teffect_annotation|overall_effect_rank\tdbNSFP_annotation|overall_damaging_rank
BRCA1\th\t7\t2
EGFR\tm\t4\t3
SON\tm\t5\t1
BRCA2\tm\t5\t1
CREBBP\thhh\t6\t1'''
input_df = dataframe(input_string, sep="\t")
sorted_df = rollup._sort_by_dbnsfp_rank(input_df)
self.assertEquals(["BRCA2", "SON", "CREBBP", "BRCA1", "EGFR"], list(sorted_df["gene_symbol"].values))
self.assertEquals([1, 1, 1, 2, 3], list(sorted_df["dbNSFP_annotation|overall_damaging_rank"].values))
def test_combine_dfs(self):
summary_string =\
'''GENE_SYMBOL\tJQ_SUMMARY_SOM_COUNT|P1|NORMAL
BRCA1\t1
EGFR\t1
CREBBP\t1'''
summary_df = dataframe(summary_string, sep="\t")
summary_df = summary_df.set_index(["GENE_SYMBOL"])
dbNSFP_string =\
'''GENE_SYMBOL\tdbNSFP|P1|NORMAL
BRCA1\t2
CREBBP\t4'''
dbNSFP_df = dataframe(dbNSFP_string, sep="\t")
dbNSFP_df = dbNSFP_df.set_index(["GENE_SYMBOL"])
snpEff_string =\
'''GENE_SYMBOL\tsnpEff|P1|NORMAL
BRCA1\th
CREBBP\thh'''
snpEff_df = dataframe(snpEff_string, sep="\t")
snpEff_df = snpEff_df.set_index(["GENE_SYMBOL"])
dfs = OrderedDict()
dfs["summary"] = summary_df
dfs["dbNSFP"] = dbNSFP_df
dfs["snpEff"] = snpEff_df
actual = rollup._combine_dfs(dfs)
self.assertEquals(["BRCA1", "CREBBP"], list(actual.index.values))
def xtest_translate_to_excel(self):
with TempDirectory() as output_dir:
output_dir.write("output.xlsx", "")
output_file = os.path.join(output_dir.path, "output.xlsx")
data_string =\
'''gene symbol|PATIENT_A_SnpEff|PATIENT_A_dbNSFP|SnpEff_overall_impact_rank
MOD|mml|12|2
NULL1|||
HIGH|hhmlx|4|1'''
data_df = dataframe(data_string)
data_df.fillna("", inplace=True)
style_string = \
'''gene symbol|PATIENT_A_SnpEff|PATIENT_A_dbNSFP|SnpEff_overall_impact_rank
MOD|||
HIGH|||
NULL1|||'''
style_df = dataframe(style_string)
style_df["PATIENT_A_SnpEff"] = [{"font_size": "4", "bg_color": "#6699FF", "font_color": "#6699FF"},
"",
{"font_size": "4", "bg_color": "#003366", "font_color": "#003366"}]
style_df["PATIENT_A_dbNSFP"] = [{"font_size": "12", "bg_color": "#ffa500", "font_color": "#000000"},
"",
{"font_size": "12", "bg_color": "white", "font_color": "#003366"}]
style_df["SnpEff_overall_impact_rank"] = [{"font_size": "12", "bg_color": "white", "font_color": "#000000"},
"",
{"font_size": "12", "bg_color": "red", "font_color": "#000000"}]
style_df.fillna("", inplace=True)
writer = pd.ExcelWriter(output_file, engine="xlsxwriter")
rollup._translate_to_excel(data_df, style_df, writer)
script_dir = os.path.dirname(os.path.realpath(__file__))
expected_output = os.path.join(script_dir,
"functional_tests",
"translate_to_excel",
"expected_output.xlsx")
self.assertEquals(True, filecmp.cmp(expected_output, output_file))
def test_reset_style_gene_values(self):
data_string =\
'''gene_symbol|PATIENT_A_SnpEff
BRCA1|{"foo": "bar"}
TANK|{"foo": "bar"}
CREBBP|{"foo": "bar"}'''
data_df = dataframe(data_string)
actual = rollup._reset_style_gene_values(data_df)
actual = actual.applymap(str)
expected_string =\
'''gene_symbol|PATIENT_A_SnpEff
{}|{"foo": "bar"}
{}|{"foo": "bar"}
{}|{"foo": "bar"}'''
expected = dataframe(expected_string)
self.assertEquals(list(expected["gene_symbol"].values),
list(actual["gene_symbol"].values))
self.assertEquals(list(expected["PATIENT_A_SnpEff"].values),
list(actual["PATIENT_A_SnpEff"].values))
class dbNSFPTestCase(unittest.TestCase):
def setUp(self):
rollup._SAMPLENAME_REGEX = "JQ_SUMMARY_SOM_COUNT.*"
rollup._GENE_SYMBOL = "GENE_SYMBOL"
rollup._XLSX = False
def tearDown(self):
pass
def test_remove_unnecessary_columns(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 1)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name="GENE_SYMBOL",
tsv=False)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string = \
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tBAZ\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR\tFOO\tBAR'''
input_df = dataframe(input_string, sep="\t")
actual = dbNSFP._remove_unnecessary_columns(input_df)
self.assertEquals(4, len(actual.columns))
self.assertNotIn("BAZ", actual.columns)
self.assertNotIn("FOO", actual.columns)
self.assertNotIn("BAR", actual.columns)
def test_remove_invalid_rows(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 2)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR
BRCA1\t2\t1\t1
BRCA1\t0\t.\t1
BRCA1\t\t1\t1
BRCA1\t.\t1\t1
CREBBP\t3\t0\t.'''
input_df = dataframe(input_string, sep="\t")
actual = dbNSFP._remove_invalid_rows(input_df)
self.assertEquals(["2", "3"], list(actual["dbNSFP_rollup_damaging"].values))
self.assertEquals(["1", "0"], list(actual["JQ_SUMMARY_SOM_COUNT|P1|TUMOR"].values))
self.assertEquals(["1", "."], list(actual["JQ_SUMMARY_SOM_COUNT|P2|TUMOR"].values))
def test_summarize_dataMatrix(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 2)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name=rollup._GENE_SYMBOL,
input_gene_column_name=rollup._GENE_SYMBOL,
tsv=False)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR
BRCA1\t2\t1\t1
BRCA1\t5\t.\t1
CREBBP\t3\t0\t.'''
input_df = dataframe(input_string, sep="\t")
input_df = input_df.applymap(str)
(data_df, style_dfs) = dbNSFP.summarize(input_df)
self.assertEquals(1, len(style_dfs))
self.assertEquals(data_df.shape, style_dfs[0].shape)
data_df = data_df.applymap(str)
expected_string =\
'''GENE_SYMBOL\tdbNSFP_annotation|overall_damaging_rank\tdbNSFP_annotation|damaging_total\tdbNSFP_annotation|damaging_votes|P1\tdbNSFP_annotation|damaging_votes|P2
BRCA1\t1\t9\t2\t7
CREBBP\t2\t0\t\t'''
expected_df = dataframe(expected_string, sep="\t", dtype=str)
expected_df = expected_df.set_index(["GENE_SYMBOL"])
expected_df.fillna("", inplace=True)
expected_df = expected_df.applymap(str)
self.assertEquals('\t'.join(expected_df.columns.values), '\t'.join(data_df.columns.values))
self.assertEquals([list(i) for i in expected_df.values], [list(i) for i in data_df.values])
def test_summarize_dataMatrixIgnoresNullOrZeroDamagingCounts(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 1)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name=rollup._GENE_SYMBOL,
input_gene_column_name=rollup._GENE_SYMBOL,
tsv=False)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR
BRCA1\t0\t1\t1
BRCA1\t1\t.\t1
CREBBP\t.\t0\t.'''
input_df = dataframe(input_string, sep="\t")
input_df = input_df.applymap(str)
(data_df, style_dfs) = dbNSFP.summarize(input_df)
self.assertEquals(1, len(style_dfs))
self.assertEquals(data_df.shape, style_dfs[0].shape)
data_df = data_df.applymap(str)
expected_string =\
'''GENE_SYMBOL\tdbNSFP_annotation|overall_damaging_rank\tdbNSFP_annotation|damaging_total\tdbNSFP_annotation|damaging_votes|P1\tdbNSFP_annotation|damaging_votes|P2
BRCA1\t1\t1\t\t1'''
expected_df = dataframe(expected_string, sep="\t", dtype=str)
expected_df = expected_df.set_index(["GENE_SYMBOL"])
expected_df.fillna("", inplace=True)
expected_df = expected_df.applymap(str)
self.assertEquals('\t'.join(expected_df.columns.values), '\t'.join(data_df.columns.values))
self.assertEquals([list(i) for i in expected_df.values], [list(i) for i in data_df.values])
def test_summarize_formatMatrix(self):
FORMAT_DF =
|
pd.DataFrame([[42] * 4] * 2)
|
pandas.DataFrame
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data =
|
pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
|
pandas.DataFrame
|
import tensorflow as tf
import numpy as np
import pandas as pd
class TFRecordWriter:
def __init__(self, **kwargs):
self.__input_size = kwargs['input_size']
self.__output_size = kwargs['output_size']
self.__train_file_path = kwargs['train_file_path']
self.__validate_file_path = kwargs['validate_file_path']
self.__test_file_path = kwargs['test_file_path']
self.__binary_train_file_path = kwargs['binary_train_file_path']
self.__binary_validation_file_path = kwargs['binary_validation_file_path']
self.__binary_test_file_path = kwargs['binary_test_file_path']
# read the text data from text files
def read_text_data(self):
self.__list_of_training_inputs = []
self.__list_of_training_outputs = []
self.__list_of_validation_inputs = []
self.__list_of_validation_outputs =[]
self.__list_of_validation_metadata = []
self.__list_of_test_inputs = []
self.__list_of_test_metadata = []
# Reading the training dataset.
train_df = pd.read_csv(self.__train_file_path, nrows=10)
float_cols = [c for c in train_df if train_df[c].dtype == "float64"]
float32_cols = {c: np.float32 for c in float_cols}
train_df = pd.read_csv(self.__train_file_path, sep=" ", header=None, engine='c', dtype=float32_cols)
train_df = train_df.rename(columns={0: 'series'})
# Returns unique number of time series in the dataset.
series = pd.unique(train_df['series'])
# Construct input and output training tuples for each time series.
for ser in series:
one_series_df = train_df[train_df['series'] == ser]
inputs_df = one_series_df.iloc[:, range(1, (self.__input_size + 1))]
outputs_df = one_series_df.iloc[:, range((self.__input_size + 2), (self.__input_size + self.__output_size + 2))]
self.__list_of_training_inputs.append(np.ascontiguousarray(inputs_df, dtype=np.float32))
self.__list_of_training_outputs.append(np.ascontiguousarray(outputs_df, dtype=np.float32))
# Reading the validation dataset.
val_df = pd.read_csv(self.__validate_file_path, nrows=10)
float_cols = [c for c in val_df if val_df[c].dtype == "float64"]
float32_cols = {c: np.float32 for c in float_cols}
val_df = pd.read_csv(self.__validate_file_path, sep=" ", header=None, engine='c', dtype=float32_cols)
val_df = val_df.rename(columns={0: 'series'})
series = pd.unique(val_df['series'])
for ser in series:
one_series_df = val_df[val_df['series'] == ser]
inputs_df_test = one_series_df.iloc[:, range(1, (self.__input_size + 1))]
metadata_df = one_series_df.iloc[:, range((self.__input_size + self.__output_size + 3), one_series_df.shape[1])]
outputs_df_test = one_series_df.iloc[:, range((self.__input_size + 2), (self.__input_size + self.__output_size + 2))]
self.__list_of_validation_inputs.append(np.ascontiguousarray(inputs_df_test, dtype=np.float32))
self.__list_of_validation_outputs.append(np.ascontiguousarray(outputs_df_test, dtype=np.float32))
self.__list_of_validation_metadata.append(np.ascontiguousarray(metadata_df, dtype=np.float32))
# Reading the test file.
test_df = pd.read_csv(self.__test_file_path, nrows=10)
float_cols = [c for c in test_df if test_df[c].dtype == "float64"]
float32_cols = {c: np.float32 for c in float_cols}
test_df = pd.read_csv(self.__test_file_path, sep=" ", header=None, engine='c', dtype=float32_cols)
test_df = test_df.rename(columns={0: 'series'})
series =
|
pd.unique(test_df['series'])
|
pandas.unique
|
"""Contains functions for preprocessing data
Classes
-------
Person
Functions
----------
recurcive_append
create_pedigree
add_control
prepare_data
"""
import logging
import pandas as pd
import numpy as np
from pysnptools.snpreader import Bed
from bgen_reader import open_bgen, read_bgen
from config import nan_integer
class Person:
"""Just a simple data structure representing individuals
Args:
id : str
IID of the individual.
fid : str
FID of the individual.
pid : str
IID of the father of that individual.
mid : str
IID of the mother of that individual.
"""
def __init__(self, id, fid=None, pid=None, mid=None):
self.id = id
self.fid = fid
self.pid = pid
self.mid = mid
def recurcive_append(dictionary, index, element):
"""Adds an element to value of all the keys that can be reached from index with using get recursively.
Args:
dictionary : dict
A dictionary of objects to list
index
The start point
element
What should be added to values
"""
queue = {index}
seen_so_far = set()
while queue:
current_index = queue.pop()
seen_so_far.add(current_index)
dictionary[current_index].add(element)
queue = queue.union(dictionary[current_index])
queue = queue.difference(seen_so_far)
def create_pedigree(king_address, agesex_address):
"""Creates pedigree table from agesex file and kinship file in KING format.
Args:
king_address : str
Address of a kinship file in KING format. kinship file is a '\t' seperated csv with columns "FID1", "ID1", "FID2", "ID2, "InfType".
Each row represents a relationship between two individuals. InfType column states the relationship between two individuals.
The only relationships that matter for this script are full sibling and parent-offspring which are shown by 'FS' and 'PO' respectively.
This file is used in creating a pedigree file and can be generated using KING.
As fids starting with '_' are reserved for control there should be no fids starting with '_'.
agesex_address : str
Address of the agesex file. This is a " " seperated CSV with columns "FID", "IID", "FATHER_ID", "MOTHER_ID", "sex", "age".
Each row contains the age and sex of one individual. Male and Female sex should be represented with 'M' and 'F'.
Age column is used for distinguishing between parent and child in a parent-offspring relationship inferred from the kinship file.
ID1 is a parent of ID2 if there is a 'PO' relationship between them and 'ID1' is at least 12 years older than ID2.
Returns:
pd.DataFrame:
A pedigree table with 'FID', 'IID', 'FATHER_ID', 'MOTHER_ID'. Each row represents an individual.
"""
kinship = pd.read_csv(king_address, delimiter="\t").astype(str)
logging.info("loaded kinship file")
agesex =
|
pd.read_csv(agesex_address, delim_whitespace=True)
|
pandas.read_csv
|
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from PIL import Image
import sys
import os
import random
import numpy as np
import pandas as pd
class UltrasoundDataset(object):
def __init__(self, data_path, val_size=0.2, random_seed=1):
self.data_path = data_path
self.dataset = os.path.basename(os.path.normpath(self.data_path))
self.dataset_table_path = self.dataset + '.csv'
self.make_dataset_table()
self.train_val_split(val_size, random_seed=random_seed)
def make_dataset_table(self):
print('dataset csv table creating...')
data = []
if self.dataset not in ['Endocrinology', 'BUSI', 'BPUI']:
print('The folder with the dataset must have one of these names: Endocrinology, BUSI, BPUI')
sys.exit()
if self.dataset == 'Endocrinology':
classes_dict = {}
for _, row in pd.read_csv(self.data_path + os.sep + 'TIRADS.txt').iterrows():
if classes_dict.get(row['Patient']) is None:
classes_dict[row['Patient']] = {row['file']: row['TIRADS']}
else:
classes_dict[row['Patient']].update({row['file']: row['TIRADS']})
for patient_path in [os.path.join(self.data_path, patient_name)
for patient_name in sorted(os.listdir(self.data_path))
if os.path.isdir(os.path.join(self.data_path, patient_name))]:
images = [os.path.splitext(image_id)[0]
for image_id in sorted(os.listdir(os.path.join(patient_path, 'Images')))]
for image_id in images:
image_path = os.path.join(patient_path, 'Images', '{}.tif'.format(image_id))
mask_path = os.path.join(patient_path, 'Masks', '{}.labels.tif'.format(image_id))
mask = Image.open(mask_path)
frames = [frame for frame in range(mask.n_frames)]
classes = []
for frame in frames:
mask.seek(frame)
if not np.array(mask).any():
classes.append(0)
else:
classes.append(classes_dict[os.path.split(patient_path)[-1]][image_id])
image_paths = [image_path for _ in range(len(frames))]
mask_paths = [mask_path for _ in range(len(frames))]
data.append(np.array([image_paths, mask_paths, frames, classes]).T)
pd.DataFrame(np.concatenate(data),
columns=['image', 'mask', 'frame', 'class']).to_csv(self.dataset_table_path, index=False)
elif self.dataset == 'BUSI':
classes = sorted([name for name in os.listdir(self.data_path)])
classes_dict = dict((class_type, index) for index, class_type in enumerate(classes))
for class_type in classes:
class_type_path = os.path.join(self.data_path, class_type)
image_paths = sorted([os.path.join(class_type_path, name) for name in os.listdir(class_type_path)
if 'mask' not in name])
for image_path in image_paths:
mask_path = ''.join([os.path.splitext(image_path)[0], '_mask.png'])
data.append(np.array([[image_path, mask_path, classes_dict[class_type]]]))
pd.DataFrame(np.concatenate(data),
columns=['image', 'mask', 'class']).to_csv(self.dataset_table_path, index=False)
elif self.dataset == 'BPUI':
image_names = sorted([name for name in os.listdir(self.data_path) if 'mask' not in name])
for image_name in image_names:
image_path = os.path.join(self.data_path, image_name)
mask_path = os.path.join(self.data_path, ''.join([os.path.splitext(image_name)[0], '_mask.tif']))
data.append(np.array([[image_path, mask_path]]))
pd.DataFrame(np.concatenate(data), columns=['image', 'mask']).to_csv(self.dataset_table_path, index=False)
def train_val_split(self, val_size=0.2, random_seed=1):
dataset_table =
|
pd.read_csv(self.dataset_table_path)
|
pandas.read_csv
|
import json
import os
import unittest, pandas as pd
from prima.configuration import Experiment
from prima.engine import ExecutionMode, ExecutionContext, Executor
from testing.models import param_sweep, policy_aggregation
exp = Experiment()
sys_model_A_id = "sys_model_A"
exp.append_model(
model_id=sys_model_A_id,
sim_configs=param_sweep.sim_config,
initial_state=param_sweep.genesis_states,
env_processes=param_sweep.env_process,
partial_state_update_blocks=param_sweep.partial_state_update_blocks,
)
sys_model_B_id = "sys_model_B"
exp.append_model(
model_id=sys_model_B_id,
sim_configs=param_sweep.sim_config,
initial_state=param_sweep.genesis_states,
env_processes=param_sweep.env_process,
partial_state_update_blocks=param_sweep.partial_state_update_blocks,
)
sys_model_C_id = "sys_model_C"
exp.append_model(
model_id=sys_model_C_id,
sim_configs=policy_aggregation.sim_config,
initial_state=policy_aggregation.genesis_states,
partial_state_update_blocks=policy_aggregation.partial_state_update_block,
policy_ops=[lambda a, b: a + b, lambda y: y * 2], # Default: lambda a, b: a + b
)
simulation = 3
model_A_sweeps = len(param_sweep.sim_config)
model_B_sweeps = len(param_sweep.sim_config)
model_C_sweeps = 1
# total_sweeps = model_A_sweeps + model_B_sweeps
model_A_runs = param_sweep.sim_config[0]["N"]
model_B_runs = param_sweep.sim_config[0]["N"]
model_C_runs = policy_aggregation.sim_config["N"]
# total_runs = model_A_runs + model_B_runs
model_A_timesteps = len(param_sweep.sim_config[0]["T"])
model_B_timesteps = len(param_sweep.sim_config[0]["T"])
model_C_timesteps = len(policy_aggregation.sim_config["T"])
model_A_substeps = len(param_sweep.partial_state_update_blocks)
model_B_substeps = len(param_sweep.partial_state_update_blocks)
model_C_substeps = len(policy_aggregation.partial_state_update_block)
# total_substeps = model_A_substeps + model_B_substeps
model_A_init_rows = model_A_runs * model_A_sweeps
model_B_init_rows = model_B_runs * model_B_sweeps
model_C_init_rows = model_C_runs * 1
model_A_rows = model_A_init_rows + (
model_A_sweeps * (model_A_runs * model_A_timesteps * model_A_substeps)
)
model_B_rows = model_B_init_rows + (
model_B_sweeps * (model_B_runs * model_B_timesteps * model_B_substeps)
)
model_C_rows = model_C_init_rows + (
model_C_sweeps * (model_C_runs * model_C_timesteps * model_C_substeps)
)
exec_mode = ExecutionMode()
local_mode_ctx = ExecutionContext(context=exec_mode.local_mode)
simulation = Executor(exec_context=local_mode_ctx, configs=exp.configs)
raw_results, _, _ = simulation.execute()
results_df =
|
pd.DataFrame(raw_results)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import PyEMD
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['font.serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
import matplotlib.pyplot as plt
def dec_emds(series, dec_type='EMD'):
'''
时间序列信号分解,基于emd的方法(使用EMD-signal库)
series,时间序列信号数据,pd.Series
dec_type,采用的分解算法类型,可选'EMD'、'EEMD'和'CEEMDAN'三种
返回modes,分解之后的成分表,pd.DataFrame格式,每列代表一个成分,
modes中第一列为最高频成分,倒数第二列为最低频成分,最后一列为分解残差
'''
if dec_type == 'EMD':
method = PyEMD.EMD()
elif dec_type == 'EEMD':
method = PyEMD.EEMD()
elif dec_type == 'CEEMDAN':
method = PyEMD.CEEMDAN()
else:
raise ValueError('分解方法请选择EMD、EEMD和CEEMDAN中的一种!')
modes = method(np.array(series))
modes = pd.DataFrame(modes.transpose())
cols = ['imf_' + str(k) for k in range(1, modes.shape[1]+1)]
modes.columns = cols
modes.set_index(series.index, inplace=True)
modes['dec_res'] = series-modes.transpose().sum()
return modes
def merge_high_modes(modes, high_num=3):
'''
合并时间序列分解后得到的成份数据中的高频成份
modes,pd.DataFrame,分解结果,高频在前面的列低频在后面的列
high_num,需要合并的高频成份个数
返回合并高频成分之后的IMFs,已经删除残差列(dec_res),每列一个成份,每行一个样本
'''
IMFs = modes.copy()
merge_col_name = 'IMF1_' + str(high_num)
IMFs[merge_col_name] = IMFs.iloc[:, 0:high_num].transpose().sum()
IMFs.drop(list(IMFs.columns[0: high_num]), axis=1, inplace=True)
IMFs.insert(0, merge_col_name, IMFs.pop(merge_col_name))
if 'dec_res' in IMFs.columns:
IMFs.drop('dec_res', axis=1, inplace=True)
return IMFs
def plot_modes(modes, n_xticks=6, figsize=(10, 10)):
'''
对分解之后的modes进行绘图查看
modes,分解之后的数据表,pd.DataFrame格式,每行一个样本,每列一个成份
n_xticks设置x轴上显示的刻度个数
'''
Nplots = modes.shape[1]
Nsamp = modes.shape[0]
plt.figure(figsize=figsize)
for k in range(0, Nplots):
curr_plot = plt.subplot(Nplots, 1, k+1)
curr_plot.plot(np.arange(Nsamp), modes.iloc[:,k])
x_pos = np.linspace(0, Nsamp-1, n_xticks).astype(int)
plt.xticks(x_pos, list(modes.index[x_pos]), rotation=0)
plt.tight_layout()
plt.show()
def SSD(series, lag=10):
'''奇异值分解时间序列'''
# 嵌入
seriesLen = len(series)
K = seriesLen - lag + 1
X = np.zeros((lag, K))
for i in range(K):
X[:, i] = series[i: i+lag]
# svd分解,U和sigma已经按升序排序
U, sigma, VT = np.linalg.svd(X, full_matrices=False)
for i in range(VT.shape[0]):
VT[i, :] *= sigma[i]
A = VT
# 重组
rec = np.zeros((lag, seriesLen))
for i in range(lag):
for j in range(lag-1):
for m in range(j+1):
rec[i, j] += A[i, j-m] * U[m, i]
rec[i, j] /= (j+1)
for j in range(lag-1, seriesLen - lag + 1):
for m in range(lag):
rec[i, j] += A[i, j-m] * U[m, i]
rec[i, j] /= lag
for j in range(seriesLen - lag + 1, seriesLen):
for m in range(j-seriesLen+lag, lag):
rec[i, j] += A[i, j - m] * U[m, i]
rec[i, j] /= (seriesLen - j)
rec =
|
pd.DataFrame(rec)
|
pandas.DataFrame
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/dev-01-retrieval.ipynb (unless otherwise specified).
__all__ = ['query_API', 'dict_col_2_cols', 'clean_nested_dict_cols', 'set_dt_idx', 'create_df_dt_rng', 'clean_df_dts',
'retrieve_stream_df', 'check_streams', 'retrieve_streams_df', 'parse_A44_response', 'retreive_DAM_prices',
'parse_A75_response', 'retrieve_production']
# Cell
import json
import numpy as np
import pandas as pd
import os
import requests
import xmltodict
from datetime import date
from warnings import warn
from itertools import product
from dotenv import load_dotenv
from entsoe import EntsoePandasClient, EntsoeRawClient
# Cell
def query_API(start_date:str, end_date:str, stream:str, time_group='30m'):
"""
'Query API' makes the call to Electric Insights and returns the JSON response
Parameters:
start_date: Start date for data given as a string in the form '%Y-%m-%d'
end_date: End date for data given as a string in the form '%Y-%m-%d'
stream: One of 'prices_ahead', 'prices_ahead', 'prices', 'temperatures' or 'emissions'
time_group: One of '30m', '1h', '1d' or '7d'. The default is '30m'
"""
# Checking stream is an EI endpoint
possible_streams = ['prices_ahead', 'prices', 'temperatures', 'emissions', 'generation-mix']
assert stream in possible_streams, f"Stream must be one of {''.join([stream+', ' for stream in possible_streams])[:-2]}"
# Checking time_group will be accepted by API
possible_time_groups = ['30m', '1h', '1d', '7d']
assert time_group in possible_time_groups, f"Time group must be one of {''.join([time_group+', ' for time_group in possible_time_groups])[:-2]}"
# Formatting dates
format_dt = lambda dt: date.strftime(dt, '%Y-%m-%d') if isinstance(dt, date) else dt
start_date = format_dt(start_date)
end_date = format_dt(end_date)
# Running query and parsing response
response = requests.get(f'http://drax-production.herokuapp.com/api/1/{stream}?date_from={start_date}&date_to={end_date}&group_by={time_group}')
r_json = response.json()
return r_json
# Cell
def dict_col_2_cols(df:pd.DataFrame, value_col='value'):
"""Checks the `value_col`, if it contains dictionaries these are transformed into new columns which then replace it"""
## Checks the value col is found in the dataframe
if value_col not in df.columns:
return df
if isinstance(df.loc[0, value_col], dict):
df_values = pd.DataFrame(df[value_col].to_dict()).T
df[df_values.columns] = df_values
df = df.drop(columns=[value_col])
return df
# Cell
def clean_nested_dict_cols(df):
"""Unpacks columns contining nested dictionaries"""
# Calculating columns that are still dictionaries
s_types = df.iloc[0].apply(lambda val: type(val))
cols_with_dicts = s_types[s_types == dict].index
while len(cols_with_dicts) > 0:
for col_with_dicts in cols_with_dicts:
# Extracting dataframes from dictionary columns
df = dict_col_2_cols(df, col_with_dicts)
# Recalculating columns that are still dictionaries
s_types = df.iloc[0].apply(lambda val: type(val))
cols_with_dicts = s_types[s_types == dict].index
return df
# Cell
def set_dt_idx(df:pd.DataFrame, idx_name='local_datetime'):
"""
Converts the start datetime to UK local time, then sets it as the index and removes the original datetime columns
"""
idx_dt = pd.DatetimeIndex(pd.to_datetime(df['start'], utc=True)).tz_convert('Europe/London')
idx_dt.name = idx_name
df.index = idx_dt
df = df.drop(columns=['start', 'end'])
return df
def create_df_dt_rng(start_date, end_date, freq='30T', tz='Europe/London', dt_str_template='%Y-%m-%d'):
"""
Creates a dataframe mapping between local datetimes and electricity market dates/settlement periods
"""
# Creating localised datetime index
s_dt_rng = pd.date_range(start_date, end_date, freq=freq, tz=tz)
s_dt_SP_count = pd.Series(0, index=s_dt_rng).resample('D').count()
# Creating SP column
SPs = []
for num_SPs in list(s_dt_SP_count):
SPs += list(range(1, num_SPs+1))
# Creating datetime dataframe
df_dt_rng = pd.DataFrame(index=s_dt_rng)
df_dt_rng.index.name = 'local_datetime'
# Adding query call cols
df_dt_rng['SP'] = SPs
df_dt_rng['date'] = df_dt_rng.index.strftime(dt_str_template)
return df_dt_rng
def clean_df_dts(df):
"""Cleans the datetime index of the passed DataFrame"""
df = set_dt_idx(df)
df = df[~df.index.duplicated()]
df_dt_rng = create_df_dt_rng(df.index.min(), df.index.max())
df = df.reindex(df_dt_rng.index)
df['SP'] = df_dt_rng['SP'] # Adding settlement period designation
return df
# Cell
def retrieve_stream_df(start_date:str, end_date:str, stream:str, time_group='30m', renaming_dict={}):
"""
Makes the call to Electric Insights and parses the response into a dataframe which is returned
Parameters:
start_date: Start date for data given as a string in the form '%Y-%m-%d'
end_date: End date for data given as a string in the form '%Y-%m-%d'
stream: One of 'prices_ahead', 'prices_ahead', 'prices', 'temperatures' or 'emissions'
time_group: One of '30m', '1h', '1d' or '7d'. The default is '30m'
renaming_dict: Mapping from old to new column names
"""
# Calling data and parsing into dataframe
r_json = query_API(start_date, end_date, stream, time_group)
df = pd.DataFrame.from_dict(r_json)
# Handling entrys which are dictionarys
df = clean_nested_dict_cols(df)
# Setting index as localised datetime, reindexing with all intervals and adding SP
df = clean_df_dts(df)
# Renaming value col
if 'value' in df.columns:
df = df.rename(columns={'value':stream})
if 'referenceOnly' in df.columns:
df = df.drop(columns=['referenceOnly'])
df = df.rename(columns=renaming_dict)
return df
# Cell
def check_streams(streams='*'):
"""
Checks that the streams given are a list containing only possible streams, or is all streams - '*'.
"""
possible_streams = ['prices_ahead', 'prices', 'temperatures', 'emissions', 'generation-mix']
if isinstance(streams, list):
unrecognised_streams = list(set(streams) - set(possible_streams))
if len(unrecognised_streams) == 0:
return streams
else:
unrecognised_streams_2_print = ''.join(["'"+stream+"', " for stream in unrecognised_streams])[:-2]
raise ValueError(f"Streams {unrecognised_streams_2_print} could not be recognised, must be one of: {', '.join(possible_streams)}")
elif streams=='*':
return possible_streams
else:
raise ValueError(f"Streams could not be recognised, must be one of: {', '.join(possible_streams)}")
# Cell
def retrieve_streams_df(start_date:str, end_date:str, streams='*', time_group='30m', renaming_dict={}):
"""
Makes the calls to Electric Insights for the given streams and parses the responses into a dataframe which is returned
Parameters:
start_date: Start date for data given as a string in the form '%Y-%m-%d'
end_date: End date for data given as a string in the form '%Y-%m-%d'
streams: Contains 'prices_ahead', 'prices_ahead', 'prices', 'temperatures' or 'emissions', or is given as all, '*'
time_group: One of '30m', '1h', '1d' or '7d'. The default is '30m'
"""
df = pd.DataFrame()
streams = check_streams(streams)
for stream in streams:
df_stream = retrieve_stream_df(start_date, end_date, stream, renaming_dict=renaming_dict)
df[df_stream.columns] = df_stream
return df
# Cell
def parse_A44_response(r, freq='H', tz='UTC'):
"""Extracts the price time-series"""
s_price = pd.Series(dtype=float)
parsed_r = xmltodict.parse(r.text)
for timeseries in parsed_r['Publication_MarketDocument']['TimeSeries']:
dt_rng = pd.date_range(timeseries['Period']['timeInterval']['start'], timeseries['Period']['timeInterval']['end'], freq=freq, tz=tz)[:-1]
s_dt_price = pd.DataFrame(timeseries['Period']['Point'])['price.amount'].astype(float)
s_dt_price.index = dt_rng
s_price = s_price.append(s_dt_price)
assert s_price.index.duplicated().sum() == 0, 'There are duplicate date indexes'
return s_price
# Cell
def retreive_DAM_prices(dt_pairs, domain='10Y1001A1001A63L'):
"""Retrieves and collates the day-ahead prices for the specified date ranges"""
params = {
'documentType': 'A44',
'in_Domain': domain,
'out_Domain': domain
}
s_price = pd.Series(dtype=float)
for dt_pair in track(dt_pairs):
start = pd.Timestamp(dt_pair[0], tz='UTC')
end = pd.Timestamp(dt_pair[1], tz='UTC')
try:
r = client._base_request(params=params, start=start, end=end)
s_price_dt_rng = parse_A44_response(r)
s_price = s_price.append(s_price_dt_rng)
except:
warn(f"{start.strftime('%Y-%m-%d')} - {end.strftime('%Y-%m-%d')} failed")
return s_price
# Cell
def parse_A75_response(r, freq='15T', tz='UTC', warn_on_failure=False):
"""Extracts the production data by fuel-type from the JSON response"""
psr_code_to_type = {
'A03': 'Mixed',
'A04': 'Generation',
'A05': 'Load',
'B01': 'Biomass',
'B02': 'Fossil Brown coal/Lignite',
'B03': 'Fossil Coal-derived gas',
'B04': 'Fossil Gas',
'B05': 'Fossil Hard coal',
'B06': 'Fossil Oil',
'B07': 'Fossil Oil shale',
'B08': 'Fossil Peat',
'B09': 'Geothermal',
'B10': 'Hydro Pumped Storage',
'B11': 'Hydro Run-of-river and poundage',
'B12': 'Hydro Water Reservoir',
'B13': 'Marine',
'B14': 'Nuclear',
'B15': 'Other renewable',
'B16': 'Solar',
'B17': 'Waste',
'B18': 'Wind Offshore',
'B19': 'Wind Onshore',
'B20': 'Other',
'B21': 'AC Link',
'B22': 'DC Link',
'B23': 'Substation',
'B24': 'Transformer'
}
parsed_r = xmltodict.parse(r.text)
columns = [f'B{str(fuel_idx).zfill(2)}' for fuel_idx in np.arange(1, 24)]
index = pd.date_range(
parsed_r['GL_MarketDocument']['time_Period.timeInterval']['start'],
parsed_r['GL_MarketDocument']['time_Period.timeInterval']['end'],
freq=freq, tz=tz)[:-1]
df_production = pd.DataFrame(dtype=float, columns=columns, index=index)
for timeseries in parsed_r['GL_MarketDocument']['TimeSeries']:
try:
psr_type = timeseries['MktPSRType']['psrType']
dt_rng = pd.date_range(timeseries['Period']['timeInterval']['start'], timeseries['Period']['timeInterval']['end'], freq=freq, tz=tz)[:-1]
s_psr_type = pd.DataFrame(timeseries['Period']['Point'])['quantity'].astype(float)
s_psr_type.index = dt_rng
df_production[psr_type] = s_psr_type
except:
if warn_on_failure == True:
warn(f"{timeseries['Period']['timeInterval']['start']}-{timeseries['Period']['timeInterval']['start']} failed for {psr_type}")
assert df_production.index.duplicated().sum() == 0, 'There are duplicate date indexes'
df_production = df_production.dropna(how='all').dropna(how='all', axis=1)
df_production = df_production.rename(columns=psr_code_to_type)
return df_production
def retrieve_production(dt_pairs, domain='10Y1001A1001A63L', warn_on_failure=False):
"""Retrieves and collates the production data for the specified date ranges"""
params = {
'documentType': 'A75',
'processType': 'A16',
'in_Domain': domain
}
df_production =
|
pd.DataFrame(dtype=float)
|
pandas.DataFrame
|
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
|
tm.assert_numpy_array_equal(result.values, expected)
|
pandas.util.testing.assert_numpy_array_equal
|
"""
Get Retailer Statistics for Illinois
Cannabis Data Science Meetup Group
Saturday Morning Statistics
Copyright (c) 2021 Cannlytics
Authors: <NAME> <<EMAIL>>
Created: 11/17/2021
Updated: 11/27/2021
License: MIT License <https://opensource.org/licenses/MIT>
Data Sources:
- Licensed Adult Use Cannabis Dispensaries
<https://www.idfpr.com/LicenseLookup/AdultUseDispensaries.pdf>
- Illinois adult use cannabis monthly sales figures
<https://www.idfpr.com/Forms/AUC/2021%2011%2002%20IDFPR%20monthly%20adult%20use%20cannabis%20sales.pdf>
Resources:
- Fed Fred API Keys
<https://fred.stlouisfed.org/docs/api/api_key.html>
Objective:
Retrieve Illinois cannabis data, locked in public PDFs,
to save the data and calculate interesting statistics,
such as retailers per 100,000 people and sales per retailer.
You will need a Fed Fred API Key saved in a .env file
as a FRED_API_KEY variable. A `data` and `figure` folders
are also expected.
You will also need to install various Python dependencies,
including fredapi and pdfplumber.
`pip install fredapi pdfplumber`
"""
# Standard imports.
from datetime import datetime
# External imports.
from dotenv import dotenv_values
from fredapi import Fred
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import pdfplumber
import requests
import statsmodels.api as sm
from statsmodels.graphics.regressionplots import abline_plot
# Internal imports.
from utils import (
end_of_period_timeseries,
format_thousands,
)
#-----------------------------------------------------------------------------
# Download and parse the retailer licensee data.
#-----------------------------------------------------------------------------
# Download the licensees PDF.
licensees_url = 'https://www.idfpr.com/LicenseLookup/AdultUseDispensaries.pdf'
filename = './data/illinois_retailers.pdf'
response = requests.get(licensees_url)
with open(filename, 'wb') as f:
f.write(response.content)
# Read the licensees PDF.
pdf = pdfplumber.open(filename)
# Get all of the table data.
table_data = []
for page in pdf.pages:
table = page.extract_table()
table_data += table
# Remove the header.
table_data = table_data[1:]
# Create a DataFrame from the table data.
licensee_columns = [
'organization',
'trade_name',
'address',
'medical',
'license_issue_date',
'license_number',
]
licensees = pd.DataFrame(table_data, columns=licensee_columns)
# Clean the organization names.
licensees['organization'] = licensees['organization'].str.replace('\n', '')
# Separate address into 'street', 'city', 'state', 'zip_code', 'phone_number'.
# FIXME: This could probably be done more elegantly and it's not perfect.
streets, cities, states, zip_codes, phone_numbers = [], [], [], [], []
for index, row in licensees.iterrows():
parts = row.address.split(' \n')
streets.append(parts[0])
phone_numbers.append(parts[-1])
locales = parts[1]
city_locales = locales.split(', ')
state_locales = city_locales[-1].split(' ')
cities.append(city_locales[0])
states.append(state_locales[0])
zip_codes.append(state_locales[-1])
licensees['street'] = pd.Series(streets)
licensees['city'] = pd.Series(cities)
licensees['state'] = pd.Series(states)
licensees['zip_code'] = pd.Series(zip_codes)
licensees['phone_number'] =
|
pd.Series(phone_numbers)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 23:24:11 2021
@author: rayin
"""
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import re
import random
from collections import Counter
from pprint import pprint
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
case_gene_update = pd.read_csv("data/processed/variant_clean.csv", index_col=0)
aa_variant = list(case_gene_update['\\12_Candidate variants\\09 Protein\\'])
#pd.DataFrame(aa_variant).to_csv('aa_variant.csv')
#aa_variant_update = pd.read_csv("data/processed/aa_variant_update.csv", index_col=0)
#aa_variant_update = list(aa_variant_update['\\12_Candidate variants\\09 Protein\\'])
amino_acid = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'TER': 'X'}
aa_3 = []
aa_1 = []
for i in amino_acid.keys():
aa_3.append(i)
aa_1.append(amino_acid[i])
for i in range(0, len(aa_variant)):
for j in range(len(aa_3)):
if isinstance(aa_variant[i], float):
break
aa_variant[i] = str(aa_variant[i].upper())
if aa_3[j] in aa_variant[i]:
aa_variant[i] = aa_variant[i].replace(aa_3[j], aa_1[j])
#extracting aa properties from aaindex
#https://www.genome.jp/aaindex/
aa = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
#RADA880108
polarity = [-0.06, -0.84, -0.48, -0.80, 1.36, -0.73, -0.77, -0.41, 0.49, 1.31, 1.21, -1.18, 1.27, 1.27, 0.0, -0.50, -0.27, 0.88, 0.33, 1.09]
aa_polarity = pd.concat([pd.Series(aa), pd.Series(polarity)], axis=1)
aa_polarity = aa_polarity.rename(columns={0:'amino_acid', 1: 'polarity_value'})
#KLEP840101
net_charge = [0, 1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
aa_net_charge = pd.concat([pd.Series(aa), pd.Series(net_charge)], axis=1)
aa_net_charge = aa_net_charge.rename(columns={0:'amino_acid', 1: 'net_charge_value'})
#CIDH920103
hydrophobicity = [0.36, -0.52, -0.90, -1.09, 0.70, -1.05, -0.83, -0.82, 0.16, 2.17, 1.18, -0.56, 1.21, 1.01, -0.06, -0.60, -1.20, 1.31, 1.05, 1.21]
aa_hydrophobicity = pd.concat([pd.Series(aa), pd.Series(hydrophobicity)], axis=1)
aa_hydrophobicity = aa_hydrophobicity.rename(columns={0:'amino_acid', 1: 'hydrophobicity_value'})
#FAUJ880103 -- Normalized van der Waals volume
normalized_vdw = [1.00, 6.13, 2.95, 2.78, 2.43, 3.95, 3.78, 0.00, 4.66, 4.00, 4.00, 4.77, 4.43, 5.89, 2.72, 1.60, 2.60, 8.08, 6.47, 3.00]
aa_normalized_vdw = pd.concat([pd.Series(aa), pd.Series(normalized_vdw)], axis=1)
aa_normalized_vdw = aa_normalized_vdw.rename(columns={0:'amino_acid', 1: 'normalized_vdw_value'})
#CHAM820101
polarizability = [0.046, 0.291, 0.134, 0.105, 0.128, 0.180, 0.151, 0.000, 0.230, 0.186, 0.186, 0.219, 0.221, 0.290, 0.131, 0.062, 0.108, 0.409, 0.298, 0.140]
aa_polarizability = pd.concat([pd.Series(aa), pd.Series(polarizability)], axis=1)
aa_polarizability = aa_polarizability.rename(columns={0:'amino_acid', 1: 'polarizability_value'})
#JOND750102
pK_COOH = [2.34, 1.18, 2.02, 2.01, 1.65, 2.17, 2.19, 2.34, 1.82, 2.36, 2.36, 2.18, 2.28, 1.83, 1.99, 2.21, 2.10, 2.38, 2.20, 2.32]
aa_pK_COOH = pd.concat([pd.Series(aa), pd.Series(pK_COOH)], axis=1)
aa_pK_COOH = aa_pK_COOH.rename(columns={0:'amino_acid', 1: 'pK_COOH_value'})
#FASG760104
pK_NH2 = [9.69, 8.99, 8.80, 9.60, 8.35, 9.13, 9.67, 9.78, 9.17, 9.68, 9.60, 9.18, 9.21, 9.18, 10.64, 9.21, 9.10, 9.44, 9.11, 9.62]
aa_pK_NH2 = pd.concat([pd.Series(aa), pd.Series(pK_NH2)], axis=1)
aa_pK_NH2 = aa_pK_NH2.rename(columns={0:'amino_acid', 1: 'pK_NH2_value'})
#ROBB790101 Hydration free energy
hydration = [-1.0, 0.3, -0.7, -1.2, 2.1, -0.1, -0.7, 0.3, 1.1, 4.0, 2.0, -0.9, 1.8, 2.8, 0.4, -1.2, -0.5, 3.0, 2.1, 1.4]
aa_hydration = pd.concat([pd.Series(aa), pd.Series(hydration)], axis=1)
aa_hydration = aa_hydration.rename(columns={0:'amino_acid', 1: 'hydration_value'})
#FASG760101
molecular_weight = [89.09, 174.20, 132.12, 133.10, 121.15, 146.15, 147.13, 75.07, 155.16, 131.17, 131.17, 146.19, 149.21, 165.19,
115.13, 105.09, 119.12, 204.24, 181.19, 117.15]
aa_molecular_weight = pd.concat([pd.Series(aa), pd.Series(molecular_weight)], axis=1)
aa_molecular_weight = aa_molecular_weight.rename(columns={0:'amino_acid', 1: 'molecular_weight_value'})
#FASG760103
optical_rotation = [1.80, 12.50, -5.60, 5.05, -16.50, 6.30, 12.00, 0.00, -38.50, 12.40, -11.00, 14.60, -10.00, -34.50, -86.20,
-7.50, -28.00, -33.70, -10.00, 5.63]
aa_optical_rotation = pd.concat([pd.Series(aa), pd.Series(optical_rotation)], axis=1)
aa_optical_rotation = aa_optical_rotation.rename(columns={0:'amino_acid', 1: 'optical_rotation_value'})
#secondary structure #LEVJ860101
#https://pybiomed.readthedocs.io/en/latest/_modules/CTD.html#CalculateCompositionSolventAccessibility
#SecondaryStr = {'1': 'EALMQKRH', '2': 'VIYCWFT', '3': 'GNPSD'}
# '1'stand for Helix; '2'stand for Strand, '3' stand for coil
secondary_structure = [1, 1, 3, 3, 2, 1, 1, 3, 1, 2, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2]
aa_secondary_structure = pd.concat([pd.Series(aa), pd.Series(secondary_structure)], axis=1)
aa_secondary_structure = aa_secondary_structure.rename(columns={0:'amino_acid', 1: 'secondary_structure_value'})
#_SolventAccessibility = {'-1': 'ALFCGIVW', '1': 'RKQEND', '0': 'MPSTHY'}
# '-1'stand for Buried; '1'stand for Exposed, '0' stand for Intermediate
solvent_accessibility = [-1, 1, 1, 1, -1, 1, 1, -1, 0, -1, -1, 1, 0, -1, 0, 0, 0, -1, 0, -1]
aa_solvent_accessibility = pd.concat([pd.Series(aa), pd.Series(solvent_accessibility)], axis=1)
aa_solvent_accessibility = aa_solvent_accessibility.rename(columns={0:'amino_acid', 1: 'solvent_accessibility_value'})
############################################################################################################################################
#CHAM820102 Free energy of solution in water
free_energy_solution = [-0.368, -1.03, 0.0, 2.06, 4.53, 0.731, 1.77, -0.525, 0.0, 0.791, 1.07, 0.0, 0.656, 1.06, -2.24, -0.524, 0.0, 1.60, 4.91, 0.401]
aa_free_energy_solution = pd.concat([pd.Series(aa), pd.Series(free_energy_solution)], axis=1)
aa_free_energy_solution = aa_free_energy_solution.rename(columns={0:'amino_acid', 1: 'free_energy_solution_value'})
#FAUJ880109 Number of hydrogen bond donors
number_of_hydrogen_bond = [0, 4, 2, 1, 0, 2, 1, 0, 1, 0, 0, 2, 0, 0, 0, 1, 1, 1, 1, 0]
aa_number_of_hydrogen_bond = pd.concat([pd.Series(aa), pd.Series(number_of_hydrogen_bond)], axis=1)
aa_number_of_hydrogen_bond = aa_number_of_hydrogen_bond.rename(columns={0:'amino_acid', 1: 'number_of_hydrogen_bond_value'})
#PONJ960101 Average volumes of residues
volumes_of_residues = [91.5, 196.1, 138.3, 135.2, 114.4, 156.4, 154.6, 67.5, 163.2, 162.6, 163.4, 162.5, 165.9, 198.8, 123.4, 102.0, 126.0, 209.8, 237.2, 138.4]
aa_volumes_of_residues = pd.concat([pd.Series(aa), pd.Series(volumes_of_residues)], axis=1)
aa_volumes_of_residues = aa_volumes_of_residues.rename(columns={0:'amino_acid', 1: 'volumes_of_residues_value'})
#JANJ790102
transfer_free_energy = [0.3, -1.4, -0.5, -0.6, 0.9, -0.7, -0.7, 0.3, -0.1, 0.7, 0.5, -1.8, 0.4, 0.5, -0.3, -0.1, -0.2, 0.3, -0.4, 0.6]
aa_transfer_free_energy = pd.concat([pd.Series(aa), pd.Series(transfer_free_energy)], axis=1)
aa_transfer_free_energy = aa_transfer_free_energy.rename(columns={0:'amino_acid', 1: 'transfer_free_energy_value'})
#WARP780101 amino acid side-chain interactions in 21 proteins
side_chain_interaction = [10.04, 6.18, 5.63, 5.76, 8.89, 5.41, 5.37, 7.99, 7.49, 8.7, 8.79, 4.40, 9.15, 7.98, 7.79, 7.08, 7.00, 8.07, 6.90, 8.88]
aa_side_chain_interaction = pd.concat([pd.Series(aa),
|
pd.Series(side_chain_interaction)
|
pandas.Series
|
import pandas as pd
#import geopandas as gpd
import numpy as np
import os
#from sqlalchemy import create_engine
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
import math
#from shapely import wkt
from datetime import datetime, timedelta, date
import time
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import requests
from pyspark.sql import SparkSession
from pyspark.sql.functions import substring, length, col, expr
from pyspark.sql.types import *
import matplotlib.pyplot as plt
#import contextily as cx --> gives error?
spark = SparkSession \
.builder \
.getOrCreate()
def get_minio_herkomst_2020():
bucket = "gvb-gvb"
data_key = "*/*/*/Datalab_Reis_Herkomst_Uur_*.csv"
data_location = bucket + "/" + data_key
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep = ";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2020 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/*/*/*/Datalab_Reis_Bestemming_Uur_*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def get_minio_herkomst_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Herkomst_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep =";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Bestemming_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def read_csv_dir(dir):
read_csv_beta = pd.read_csv(dir,sep=';')
return read_csv_beta
def get_knmi_obs():
knmi_obs_schema = StructType([StructField("DD", StringType(), True),
StructField("DR", StringType(), True),
StructField("FF", StringType(), True),
StructField("FH", StringType(), True),
StructField("FX", StringType(), True),
StructField("IX", StringType(), True),
StructField("M", IntegerType(), True),
StructField("N", IntegerType(), True),
StructField("O", IntegerType(), True),
StructField("P", IntegerType(), True),
StructField("Q", IntegerType(), True),
StructField("R", IntegerType(), True),
StructField("RH", IntegerType(), True),
StructField("S", IntegerType(), True),
StructField("SQ", IntegerType(), True),
StructField("T", IntegerType(), True),
StructField("T10N", IntegerType(), True),
StructField("TD", IntegerType(), True),
StructField("U", IntegerType(), True),
StructField("VV", IntegerType(), True),
StructField("WW", IntegerType(), True),
StructField("Y", IntegerType(), True),
StructField("date", StringType(), True),
StructField("hour", IntegerType(), True),
StructField("station_code", IntegerType(), True)
])
knmi_obs = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi-observations/2021/*/*/*", schema=knmi_obs_schema)
return knmi_obs
def get_knmi_preds():
knmi_pred_schema = StructType([StructField("cape", IntegerType(), True),
StructField("cond", StringType(), True),
StructField("gr", StringType(), True),
StructField("gr_w", StringType(), True),
StructField("gust", StringType(), True),
StructField("gustb", StringType(), True),
StructField("gustkmh", StringType(), True),
StructField("gustkt", StringType(), True),
StructField("hw", StringType(), True),
StructField("ico", StringType(), True),
StructField("icoon", StringType(), True),
StructField("loc", StringType(), True),
StructField("luchtd", StringType(), True),
StructField("luchtdinhg", StringType(), True),
StructField("luchtdmmhg", StringType(), True),
StructField("lw", StringType(), True),
StructField("mw", StringType(), True),
StructField("neersl", StringType(), True),
StructField("offset", StringType(), True),
StructField("rv", StringType(), True),
StructField("samenv", IntegerType(), True),
StructField("temp", StringType(), True),
StructField("tijd", StringType(), True),
StructField("tijd_nl", StringType(), True),
StructField("tw", StringType(), True),
StructField("vis", StringType(), True),
StructField("windb", StringType(), True),
StructField("windkmh", StringType(), True),
StructField("windknp", StringType(), True),
StructField("windr", StringType(), True),
StructField("windrltr", StringType(), True),
StructField("winds", StringType(), True)
])
knmi_pred_cols = ('cape', 'cond', 'gr', 'gr_w', 'gust', 'gustb', 'gustkmh', 'gustkt',
'hw', 'ico', 'icoon', 'loc', 'luchtd', 'luchtdinhg', 'luchtdmmhg', 'lw',
'mw', 'neersl', 'offset', 'rv', 'samenv', 'temp', 'tijd', 'tijd_nl',
'tw', 'vis', 'windb', 'windkmh', 'windknp', 'windr', 'windrltr',
'winds')
knmi_pred = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi/2021/*/*/*.json.gz", schema=knmi_pred_schema).select(*knmi_pred_cols)
return knmi_pred
def get_prediction_df():
"""
Return the prediction dataframe (date- and hours only)
"""
this_year = date.today().isocalendar()[0]
this_week = date.today().isocalendar()[1]
firstdayofweek = datetime.strptime(f'{this_year}-W{int(this_week )}-1', "%Y-W%W-%w").date()
prediction_date_range = pd.date_range(first_date, periods=8, freq='D')
prediction_date_range_hour = pd.date_range(prediction_date_range.min(), prediction_date_range.max(), freq='h').delete(-1)
return prediction_date_range_hour
def get_vacations():
"""
Retrieves vacations in the Netherlands from the Government of the Netherlands (Rijksoverheid) and returns
the list of dates that are vacation dates
"""
vacations_url = 'https://opendata.rijksoverheid.nl/v1/sources/rijksoverheid/infotypes/schoolholidays?output=json'
vacations_raw = requests.get(url = vacations_url).json()
df_vacations = pd.DataFrame(columns={'vacation', 'region', 'startdate', 'enddate'})
for x in range(0, len(vacations_raw)): # Iterate through all vacation years
for y in range(0, len(vacations_raw[0]['content'][0]['vacations'])): # number of vacations in a year
dates = pd.DataFrame(vacations_raw[x]['content'][0]['vacations'][y]['regions'])
dates['vacation'] = vacations_raw[x]['content'][0]['vacations'][y]['type'].strip() # vacation name
dates['school_year'] = vacations_raw[x]['content'][0]['schoolyear'].strip() # school year
df_vacations = df_vacations.append(dates)
filtered = df_vacations[(df_vacations['region']=='noord') | (df_vacations['region']=='heel Nederland')]
vacations_date_only = pd.DataFrame(columns={'date'})
for x in range(0, len(filtered)):
df_temporary = pd.DataFrame(data = {'date':pd.date_range(filtered.iloc[x]['startdate'], filtered.iloc[x]['enddate'], freq='D') + pd.Timedelta(days=1)})
vacations_date_only = vacations_date_only.append(df_temporary)
vacations_date_only['date'] = vacations_date_only['date'].apply(lambda x: x.date)
vacations_date_only['date'] = vacations_date_only['date'].astype('datetime64[ns]')
# Since the data from Rijksoverheid starts from school year 2019-2020, add the rest of 2019 vacations manually!
kerst_18 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 1, 1), periods = 6, freq='1d')})
voorjaar_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 2, 16), periods = 9, freq='1d')})
mei_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 4, 27), periods = 9, freq='1d')})
zomer_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 7, 13), periods = 7*6 + 2, freq='1d')})
vacations_date_only = vacations_date_only.append([kerst_18, voorjaar_19, mei_19, zomer_19])
return vacations_date_only
def get_events():
"""
Event data from static file. We can store events in the database in the near future. When possible, we can get it from an API.
"""
events = pd.read_excel('events_zuidoost.xlsx', sheet_name='Resultaat', header=1)
# Clean
events.dropna(how='all', inplace=True)
events.drop(events.loc[events['Datum']=='Niet bijzonder evenementen zijn hierboven niet meegenomen.'].index, inplace=True)
events.drop(events.loc[events['Locatie'].isna()].index, inplace=True)
events.drop(events.loc[events['Locatie']=='Overig'].index, inplace=True)
events['Datum'] = events['Datum'].astype('datetime64[ns]')
# Fix location names
events['Locatie'] = events['Locatie'].apply(lambda x: x.strip()) # Remove spaces
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo dome', 'Ziggo Dome', events['Locatie'])
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo Dome (2x)', 'Ziggo Dome', events['Locatie'])
# Get events from 2019 from static file
events = events[events['Datum'].dt.year>=2019].copy()
events.reset_index(inplace=True)
events.drop(columns=['index'], inplace=True)
events
# Add 2020-present events manually
events = events.append({'Datum':datetime(2020, 1, 19)}, ignore_index=True) # Ajax - Sparta
events = events.append({'Datum':datetime(2020, 2, 2)}, ignore_index=True) # Ajax - PSV
events = events.append({'Datum':datetime(2020, 2, 16)}, ignore_index=True) # Ajax - RKC
events = events.append({'Datum':datetime(2020, 1, 3)}, ignore_index=True) # Ajax - AZ
# Euro 2021
events = events.append({'Datum':datetime(2021, 6, 13)}, ignore_index=True) # EURO 2020 Nederland- Oekraïne
events = events.append({'Datum':datetime(2021, 6, 17)}, ignore_index=True) # EURO 2020 Nederland- Oostenrijk
events = events.append({'Datum':datetime(2021, 6, 21)}, ignore_index=True) # EURO 2020 Noord-Macedonië - Nederland
events = events.append({'Datum':datetime(2021, 6, 26)}, ignore_index=True) # EURO 2020 Wales - Denemarken
return events
def merge_csv_json(bestemming_csv, herkomst_csv, bestemming_json, herkomst_json):
bestemming = pd.concat([bestemming_csv, bestemming_json]).copy()
herkomst = pd.concat([herkomst_csv, herkomst_json]).copy()
return [bestemming, herkomst]
def merge_bestemming_herkomst(bestemming, herkomst):
bestemming.rename(columns={'AantalReizen':'Uitchecks',
'UurgroepOmschrijving (van aankomst)':'UurgroepOmschrijving',
'AankomstHalteCode':'HalteCode',
'AankomstHalteNaam':'HalteNaam'}, inplace=True)
herkomst.rename(columns={'AantalReizen':'Inchecks',
'UurgroepOmschrijving (van vertrek)':'UurgroepOmschrijving',
'VertrekHalteCode':'HalteCode',
'VertrekHalteNaam':'HalteNaam'}, inplace=True)
merged = pd.merge(left=bestemming, right=herkomst,
left_on=['Datum', 'UurgroepOmschrijving', 'HalteNaam'],
right_on=['Datum', 'UurgroepOmschrijving', 'HalteNaam'],
how='outer')
return merged
def preprocess_gvb_data_for_modelling(gvb_df, station):
df = gvb_df[gvb_df['HalteNaam']==station].copy()
# create datetime column
df['datetime'] = df['Datum'].astype('datetime64[ns]')
df['UurgroepOmschrijving'] = df['UurgroepOmschrijving'].astype(str)
df['hour'] = df['UurgroepOmschrijving'].apply(lambda x: int(x[:2]))
# add time indications
df['week'] = df['datetime'].dt.isocalendar().week
df['month'] = df['datetime'].dt.month
df['year'] = df['datetime'].dt.year
df['weekday'] = df['datetime'].dt.weekday
hours = pd.get_dummies(df['hour'], prefix='hour')
days = pd.get_dummies(df['weekday'], prefix='weekday')
df = pd.concat([df, hours, days], axis=1)
# drop duplicates and sort
df_ok = df.drop_duplicates()
# sort values and reset index
df_ok = df_ok.sort_values(by = 'datetime')
df_ok = df_ok.reset_index(drop = True)
# drop unnecessary columns
df_ok.drop(columns=['Datum', 'UurgroepOmschrijving', 'HalteNaam'], inplace=True)
# rename columns
df_ok.rename(columns={'Inchecks':'check-ins', 'Uitchecks':'check-outs'}, inplace=True)
return df_ok
def preprocess_knmi_data_hour(df_raw):
"""
Prepare the raw knmi data for modelling.
We rename columns and resample from 60min to 15min data.
Also, we will create a proper timestamp.
Documentation: https://www.daggegevens.knmi.nl/klimatologie/uurgegevens
"""
# drop duplicates
df_raw = df_raw.drop_duplicates()
# rename columns
df = df_raw.rename(columns={"DD": "wind_direction", "FH": "wind_speed_h", "FF": "wind_speed", "FX": "wind_gust",
"T": "temperature", "T10N": "temperature_min", "TD": "dew_point_temperature",
"SQ": "radiation_duration", "Q": "global_radiation",
"DR": "precipitation_duration", "RH": "precipitation_h",
"P": "pressure", "VV": "sight", "N": "cloud_cover", "U": "relative_humidity",
"WW": "weather_code", "IX": "weather_index",
"M": "fog", "R": "rain", "S": "snow", "O": "thunder", "Y": "ice"
})
# get proper datetime column
df["datetime"] = pd.to_datetime(df['date'], format='%Y%m%dT%H:%M:%S.%f') + pd.to_timedelta(df["hour"] - 1, unit = 'hours')
df["datetime"] = df["datetime"].dt.tz_convert("Europe/Amsterdam")
df = df.sort_values(by = "datetime", ascending = True)
df = df.reset_index(drop = True)
df['date'] = df['datetime'].dt.date
df['date'] = df['date'].astype('datetime64[ns]')
df['hour'] -= 1
# drop unwanted columns
df = df.drop(['datetime', 'weather_code', 'station_code'], axis = 'columns')
df = df.astype({'wind_speed':'float64', 'wind_gust':'float64','temperature':'float64','temperature_min':'float64',
'dew_point_temperature':'float64','radiation_duration':'float64','precipitation_duration':'float64',
'precipitation_h':'float64','pressure':'float64'})
# divide some columns by ten (because using 0.1 degrees C etc. as units)
col10 = ["wind_speed", "wind_gust", "temperature", "temperature_min", "dew_point_temperature",
"radiation_duration", "precipitation_duration", "precipitation_h", "pressure"]
df[col10] = df[col10] / 10
return df
def preprocess_metpre_data(df_raw):
"""
To be filled
Documentation: https://www.meteoserver.nl/weersverwachting-API.php
"""
# rename columns
df = df_raw.rename(columns={"windr": "wind_direction", "rv": "relative_humidity", "luchtd": "pressure",
"temp": "temperature", "windb": "wind_force", "winds": "wind_speed",
"gust": "wind_gust", "vis": "sight_m", "neersl": "precipitation_h",
"gr": "global_radiation", "tw": "clouds"
})
# drop duplicates
df = df.drop_duplicates()
# get proper datetime column
df["datetime"] = pd.to_datetime(df['tijd'], unit='s', utc = True)
df["datetime"] = df["datetime"] + pd.to_timedelta(1, unit = 'hours') ## klopt dan beter, maar waarom?
df = df.sort_values(by = "datetime", ascending = True)
df = df.reset_index(drop = True)
df["datetime"] = df["datetime"].dt.tz_convert("Europe/Amsterdam")
# new column: forecast created on
df["offset_h"] = df["offset"].astype(float)
#df["datetime_predicted"] = df["datetime"] - pd.to_timedelta(df["offset_h"], unit = 'hours')
# select only data after starting datetime
#df = df[df['datetime'] >= start_ds] # @me: move this to query later
# select latest prediction # logisch voor prediction set, niet zozeer voor training set
df = df.sort_values(by = ['datetime', 'offset_h'])
df = df.drop_duplicates(subset = 'datetime', keep = 'first')
# drop unwanted columns
df = df.drop(['tijd', 'tijd_nl', 'loc',
'icoon', 'samenv', 'ico',
'cape', 'cond', 'luchtdmmhg', 'luchtdinhg',
'windkmh', 'windknp', 'windrltr', 'wind_force',
'gustb', 'gustkt', 'gustkmh', 'wind_gust', # deze zitten er niet in voor 14 juni
'hw', 'mw', 'lw',
'offset', 'offset_h',
'gr_w'], axis = 'columns', errors = 'ignore')
# set datatypes of weather data to float
df = df.set_index('datetime')
df = df.astype('float64').reset_index()
# cloud cover similar to observations (0-9) & sight, but not really the same thing
df['cloud_cover'] = df['clouds'] / 12.5
df['sight'] = df['sight_m'] / 333
df.drop(['clouds', 'sight_m'], axis = 'columns')
# go from hourly to quarterly values
df_hour = df.set_index('datetime').resample('1h').ffill(limit = 11)
# later misschien smoothen? lijkt nu niet te helpen voor voorspelling
#df_smooth = df_15.apply(lambda x: savgol_filter(x,17,2))
#df_smooth = df_smooth.reset_index()
df_hour = df_hour.reset_index()
df_hour['date'] = df_hour['datetime'].dt.date
df_hour['date'] = df_hour['date'].astype('datetime64[ns]')
df_hour['hour'] = df_hour['datetime'].dt.hour
return df_hour # df_smooth
def preprocess_covid_data(df_raw):
# Put data to dataframe
df_raw_unpack = df_raw.T['NLD'].dropna()
df = pd.DataFrame.from_records(df_raw_unpack) # Add datetime column
df['datetime'] = pd.to_datetime(df['date_value']) # Select columns
df_sel = df[['datetime', 'stringency']] # extend dataframe to 14 days in future (based on latest value)
dates_future = pd.date_range(df['datetime'].iloc[-1], periods = 14, freq='1d')
df_future = pd.DataFrame(data = {'datetime': dates_future,
'stringency': df['stringency'].iloc[-1]}) # Add together and set index
df_final = df_sel.append(df_future.iloc[1:])
df_final = df_final.set_index('datetime')
return df_final
def preprocess_holiday_data(holidays):
df = pd.DataFrame(holidays, columns=['Date', 'Holiday'])
df['Date'] = df['Date'].astype('datetime64[ns]')
return df
def interpolate_missing_values(data_to_interpolate):
df = data_to_interpolate.copy()
random_state_value = 1 # Ensure reproducability
# Train check-ins interpolator
checkins_interpolator_cols = ['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-outs']
checkins_interpolator_targets = ['check-ins']
X_train = df.dropna()[checkins_interpolator_cols]
y_train = df.dropna()[checkins_interpolator_targets]
checkins_interpolator = RandomForestRegressor(random_state=random_state_value)
checkins_interpolator.fit(X_train, y_train)
# Train check-outs interpolator
checkouts_interpolator_cols = ['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-ins']
checkouts_interpolator_targets = ['check-outs']
X_train = df.dropna()[checkouts_interpolator_cols]
y_train = df.dropna()[checkouts_interpolator_targets]
checkouts_interpolator = RandomForestRegressor(random_state=random_state_value)
checkouts_interpolator.fit(X_train, y_train)
# Select rows which need interpolation
df_to_interpolate = df.drop(df.loc[(df['check-ins'].isna()==True) & (df['check-outs'].isna()==True)].index)
# Interpolate check-ins
checkins_missing = df_to_interpolate[(df_to_interpolate['check-outs'].isna()==False) & (df_to_interpolate['check-ins'].isna()==True)].copy()
checkins_missing['stringency'] = checkins_missing['stringency'].replace(np.nan, 0)
checkins_missing['check-ins'] = checkins_interpolator.predict(checkins_missing[['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-outs']])
# Interpolate check-outs
checkouts_missing = df_to_interpolate[(df_to_interpolate['check-ins'].isna()==False) & (df_to_interpolate['check-outs'].isna()==True)].copy()
checkouts_missing['stringency'] = checkouts_missing['stringency'].replace(np.nan, 0)
checkouts_missing['check-outs'] = checkouts_interpolator.predict(checkouts_missing[['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-ins']])
# Insert interpolated values into main dataframe
for index, row in checkins_missing.iterrows():
df.loc[df.index==index, 'check-ins'] = row['check-ins']
for index, row in checkouts_missing.iterrows():
df.loc[df.index==index, 'check-outs'] = row['check-outs']
return df
def get_crowd_last_week(df, row):
week_ago = row['datetime'] - timedelta(weeks=1)
subset_with_hour = df[(df['datetime']==week_ago) & (df['hour']==row['hour'])]
# If crowd from last week is not available at exact date- and hour combination, then get average crowd of last week.
subset_week_ago = df[(df['year']==row['year']) & (df['week']==row['week']) & (df['hour']==row['hour'])]
checkins_week_ago = 0
checkouts_week_ago = 0
if len(subset_with_hour) > 0: # return crowd from week ago at the same day/time (hour)
checkins_week_ago = subset_with_hour['check-ins'].mean()
checkouts_week_ago = subset_with_hour['check-outs'].mean()
elif len(subset_week_ago) > 0: # return average crowd the hour group a week ago
checkins_week_ago = subset_week_ago['check-ins'].mean()
checkouts_week_ago = subset_week_ago['check-outs'].mean()
return [checkins_week_ago, checkouts_week_ago]
def get_train_test_split(df):
"""
Create train and test split for 1-week ahead models. This means that the last week of the data will be used
as a test set and the rest will be the training set.
"""
most_recent_date = df['datetime'].max()
last_week = pd.date_range(df.datetime.max()-pd.Timedelta(7, unit='D')+pd.DateOffset(1), df['datetime'].max())
train = df[df['datetime']<last_week.min()]
test = df[(df['datetime']>=last_week.min()) & (df['datetime']<=last_week.max())]
return [train, test]
def get_train_val_test_split(df):
"""
Create train, validation, and test split for 1-week ahead models. This means that the last week of the data will be used
as a test set, the second-last will be the validation set, and the rest will be the training set.
"""
most_recent_date = df['datetime'].max()
last_week = pd.date_range(df.datetime.max()-pd.Timedelta(7, unit='D')+
|
pd.DateOffset(1)
|
pandas.DateOffset
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
|
tm.assert_series_equal(result, exp)
|
pandas._testing.assert_series_equal
|
import os
import pandas
import numpy as np
import nibabel as ni
import itertools
from glob import glob
import statsmodels.distributions.empirical_distribution as ed
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from scipy import stats
from scipy.io import savemat,loadmat
from nilearn import input_data, image
from matplotlib import mlab
from sklearn.utils import resample
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import MinMaxScaler
from statsmodels.sandbox.stats.multicomp import multipletests
#import matlab.engine
import sys
#eng = matlab.engine.start_matlab()
#eng.addpath('../',nargout=0)
def Extract_Values_from_Atlas(files_in, atlas,
mask = None, mask_threshold = 0,
blocking = 'one_at_a_time',
labels = [], sids = [],
output = None,):
'''
This function will extract mean values from a set of images for
each ROI from a given atlas. Returns a Subject x ROI pandas
DataFrame (and csv file if output argument is set to a path).
Use blocking argument according to memory capacity of your
computer vis-a-vis memory requirements of loading all images.
files_in: determines which images to extract values from. Input
can be any of the following:
-- a list of paths
-- a path to a directory containing ONLY files to extract from
-- a search string (with wild card) that would return all
desired images. For example, doing ls [files_in] in a terminal
would list all desired subjects
-- a 4D Nifti image
**NOTE** be aware of the order of file input, which relates to
other arguments
atlas: Path to an atlas, or a Nifti image or np.ndarry of desired
atlas. Or, if doing native space analysis, instead, supply a list
of paths to atlases that match each subject.
NOTE: In this case, The order of this list should be the same
order as subjects in files_in
mask: Path to a binary inclusive mask image. Script will set all
values to 0 for every image where mask voxels = 0. This process
is done before extraction. If doing a native space analysis,
instead, supply a list of paths to masks that match each subject
and each atlas.
mask_threshold: An integer that denotes the minimum acceptable
size (in voxels) of an ROI after masking. This is to prevent
tiny ROIs resulting from conservative masks that might have
spuriously high or low mean values due to the low amount of
information within.
blocking: loading all images to memory at once may not be possible
depending on your computer. Acceptable arguments are:
-- 'one_at_a_time': will extract values from each image
independently. Recommended for memories with poor memory
capacity. Required for native space extraction.
-- 'all_at_once': loads all images into memory at once.
Provides a slight speed up for faster machines overe
one_at_a_time, but is probably not faster than batching (see
below). Only recommended for smaller datasets.
** WARNING ** Not recommended on very large datasets. Will
crash computers with poor memory capacity.
-- any integer: determines the number of images to be read to
memory at once. Recommended for large datasets.
labels: a list of string labels that represent the names of the
ROIs from atlas.
NOTE: ROIs are read consecutively from lowest to highest, and
labels *must* match that order
Default argument [] will use "ROI_x" for each ROI, where X
corresponds to the actual ROI integer lael
sids: a list of subject IDs in the same order as files_in. Default
argument [] will list subjects with consecutive integers.
output: if you wish the resulting ROI values to be written to file,
provide a FULL path. Otherwise, leave as None (matrix will be
returned)
'''
if type(blocking) == str and blocking not in ['all_at_once','one_at_a_time']:
raise IOError('blocking only accepts integers or argumennts of "all_at_once" or "one_at_a_time"')
if type(atlas) == list:
if blocking != 'one_at_a_time':
print('WARNING: you have passed a list of atlases but blocking is not set to one_at_a_time')
print('Lists of atlases are for native space situations where each subject has their own atlas')
print('If you want to test multiple atlases, run the script multiple times with different atlases')
raise IOError('you have passed a list of atlases but blocking is not set to one_at_a_time')
if type(mask) != type(None):
if type(atlas) != type(mask):
raise IOError('for masking, list of masks must be passed that equals length of atlas list')
elif type(mask) == list:
if len(atlas) != len(mask):
raise IOError('list of atlases (n=%s) and masks (n=%s) are unequal'%(len(atlases),
len(masks)))
if type(atlas) != list:
if type(atlas) == str:
try:
atl = ni.load(atlas).get_data()
except:
raise IOError('could not find an atlas at the specified location: %s'%atlas)
elif type(atlas) == ni.nifti1.Nifti1Image:
atl = atlas.get_data()
elif type(atlas) == np.ndarray:
atl = atlas
else:
print('could not recognize atlas filetype. Please provide a path, a NiftiImage object, or an numpy ndarray')
raise IOError('atlas type not recognized')
if blocking == 'all_at_once':
i4d = load_data(files_in, return_images=True).get_data()
if i4d.shape[:-1] != atl.shape:
raise IOError('image dimensions do not match atlas dimensions')
if type(mask) != type(None):
print('masking...')
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
i4d = mask_image_data(i4d, mask_data)
if len(sids) == 0:
sids = range(i4d.shape[-1])
print('extracting values from atlas')
roi_vals = generate_matrix_from_atlas(i4d, atl, labels, sids)
else:
image_paths = load_data(files_in, return_images = False)
if blocking == 'one_at_a_time':
catch = []
for i,image_path in enumerate(image_paths):
if len(sids) > 0:
sid = [sids[i]]
else:
sid = [i]
print('working on subject %s'%sid[0])
img = ni.load(image_path).get_data()
try:
assert img.shape == atl.shape, 'fail'
except:
print('dimensions for subject %s (%s) image did not match atlas dimensions (%s)'%(sid,
img.shape,
atl.shape))
print('skipping subject %s'%sid[0])
continue
if type(mask) != type(None):
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
img = mask_image_data(img, mask_data)
f_mat = generate_matrix_from_atlas(img, atl, labels, sid)
catch.append(f_mat)
roi_vals = pandas.concat(catch)
elif type(blocking) == int:
block_size = blocking
if len(image_paths)%block_size == 0:
blocks = int(len(image_paths)/block_size)
remainder = False
else:
blocks = int((len(image_paths)/blocking) + 1)
remainder = True
catch = []
count = 0
if type(mask) != type(None):
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
for block in range(blocks):
if block == (blocks - 1) and remainder:
print('working on final batch of subjects')
sub_block = image_paths[count:]
else:
print('working on batch %s of %s subjects'%((block+1),block_size))
sub_block = image_paths[count:(count+block_size)]
i4d = load_data(sub_block, return_images = True).get_data()
if i4d.shape[:-1] != atl.shape:
raise IOError('image dimensions (%s) do not match atlas dimensions (%)'%(atl.shape,
i4d.shape[:-1]
))
if type(mask) != type(None):
if len(mask_data.shape) == 4:
tmp_mask = mask_data[:,:,:,:block_size]
else:
tmp_mask = mask_data
i4d = mask_image_data(i4d, tmp_mask)
if block == (blocks - 1) and remainder:
if len(sids) == 0:
sids_in = range(count,i4d.shape[-1])
else:
sids_in = sids[count:]
else:
if len(sids) == 0:
sids_in = range(count,(count+block_size))
else:
sids_in = sids[count:(count+block_size)]
f_mat = generate_matrix_from_atlas(i4d, atl, labels, sids_in)
catch.append(f_mat)
count += block_size
roi_vals = pandas.concat(catch)
else:
image_paths = load_data(files_in, return_images = False)
if len(atlas) != len(image_paths):
raise IOError('number of images (%s) does not match number of atlases (%s)'%(len(image_paths),
len(atlas)))
catch = []
for i,image_path in enumerate(image_paths):
if len(sids) > 0:
sid = [i]
else:
sid = [sids[i]]
print('working on subject'%sid)
img = ni.load(image_path).get_data()
atl = ni.load(atlas[i]).get_data()
if type(mask) != type(None):
mask_data = ni.load(mask[i]).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
img = mask_image_data(img,mask_data)
try:
assert img.shape == atl.shape, 'fail'
except:
print('dimensions for subject %s (%s) image did not match atlas dimensions (%s)'%(sid,
img.shape,
atl.shape
))
print('skipping subject %s'%sid)
continue
f_mat = generate_matrix_from_atlas(img, atl, labels, sid)
catch.append(f_mat)
roi_vals = pandas.concat(catch)
if output:
roi_vals.to_csv(output)
return roi_vals
def generate_matrix_from_atlas(files_in, atl, labels, sids):
if len(files_in.shape) == 3:
x,y,z = files_in.shape
files_in = files_in.reshape(x,y,z,1)
atl = atl.astype(int)
if max(np.unique(atl)) != (len(np.unique(atl)) -1):
atl = fix_atlas(atl)
if len(labels) > 0:
cols = labels
else:
cols = ['roi_%s'%x for x in np.unique(atl) if x != 0]
f_mat = pandas.DataFrame(index = sids,
columns = cols)
tot = np.bincount(atl.flat)
for sub in range(files_in.shape[-1]):
mtx = files_in[:,:,:,sub]
sums = np.bincount(atl.flat, weights = mtx.flat)
rois = (sums/tot)[1:]
f_mat.loc[f_mat.index[sub]] = rois
return f_mat
def load_data(files_in, return_images):
fail = False
if type(files_in) == str:
if os.path.isdir(files_in):
print('It seems you passed a directory')
search = os.path.join(files_in,'*')
flz = glob(search)
num_f = len(flz)
if num_f == 0:
raise IOError('specified directory did not contain any files')
else:
print('found %s images!'%num_f)
if return_images:
i4d = ni.concat_images(flz)
elif '*' in files_in:
print('It seems you passed a search string')
flz = glob(files_in)
num_f = len(flz)
if num_f == 0:
raise IOError('specified search string did not result in any files')
else:
print('found %s images'%num_f)
if return_images:
i4d = ni.concat_images(flz)
else:
fail = True
elif type(files_in) == list:
flz = files_in
print('processing %s subjects'%len(files_in))
if return_images:
i4d = ni.concat_images(files_in)
elif type(files_in) == ni.nifti1.Nifti1Image:
print('processing %s subjects'%files_in.shape[-1])
i4d = files_in
else:
fail = True
if fail:
print('files_in not recognized.',
'Please enter a search string, valid directory, list of paths, or a Nifti object')
raise ValueError('I do not recognize the files_in input.')
if return_images:
return i4d
else:
return flz
def mask_image_data(image_data, mask_data):
if len(image_data.shape) == 3:
if mask_data.shape != image_data.shape:
raise ValueError('dimensions of mask (%s) and image (%s) do not match!'%(mask_data.shape,
image_data.shape))
image_data[mask_data==0] = 0
elif len(image_data.shape) == 4:
if len(mask_data.shape) == 4:
if mask_data.shape != image_data.shape:
raise ValueError('dimensions of mask (%s) and image (%s) do not match!'%(mask_data.shape,
image_data.shape))
else:
masker = mask_data
else:
if mask_data.shape != image_data.shape[:3]:
raise ValueError('dimensions of mask (%s) and image (%s) do not match!'%(mask_data.shape,
image_data.shape[:3]))
masker = np.repeat(mask_data[:, :, :, np.newaxis], image_data.shape[-1], axis=3)
image_data[masker==0] = 0
return image_data
def mask_atlas(mask_data, atlas_data, mask_threshold):
if len(mask_data.shape) == 4:
dim4 = mask_data.shape[-1]
mask_data = mask_data[:,:,:,0]
tfm_4d = True
else:
tfm_4d = False
if max(np.unique(atlas_data)) != (len(np.unique(atlas_data)) -1):
atlas_data = fix_atlas(atlas_data)
mask_atlas = np.array(atlas_data, copy=True)
new_mask = np.array(mask_data, copy=True)
mask_atlas[mask_data == 0] = 0
counts = np.bincount(mask_atlas.astype(int).flat)
labs_to_mask = [x for x in range(len(counts)) if counts[x] < mask_threshold]
for label in labs_to_mask:
new_mask[atlas_data==label] = 0
if tfm_4d:
new_mask = np.repeat(new_mask[:, :, :, np.newaxis], dim4, axis=3)
return new_mask
def fix_atlas(atl):
new_atl = np.zeros_like(atl)
atl_map = dict(zip(np.unique(atl),
range(len(np.unique(atl)))
))
for u in np.unique(atl):
new_atl[atl == u] = atl_map[u]
return new_atl
def Convert_ROI_values_to_Probabilities(roi_matrix, norm_matrix = None,
models = None,
target_distribution = 'right',
outdir = False, fail_behavior = 'nan',
mixed_probability = False, mp_thresh = 0.05):
'''
Will take a Subject x ROI array of values and convert them to probabilities,
using ECDF (monomial distribution) or Gaussian Mixture models (binomial
distribution), with or without a reference sample with the same ROIs.
Returns a Subject x ROI matrix the same size as the input with probability
values. A report is also generated if an argument is passed for models. The
report details which model was selected for each ROI and notes any problems.
roi_matrix -- A subject x ROI matrix. can be pandas Dataframe or numpy array
norm_matrix -- A matrix with the same ROIs as roi_matrix. This sample will
be used to fit the distributions used to calculate the probabilities of
subject in roi_matrix. Norm_matrix and roi_matrix can have overlapping
subjects
if None (default), will use roi_matrix as norm_matrix
models -- a dict object pairing sklearn.gaussian models (values) with
labels describing the models (keys). If more than one model is passed,
for each ROI, model fit between all models will be evaluated and best model
(lowest BIC) will be selected for that ROI.
if None (default), probabilities will be calculated using ECDF.
NOTE: Models with n_components=1 will be calculate probabilities using
ECDF.
NOTE: This script does not currently support models with
n_distributions > 2
target_distribution -- Informs the script whether the target distribution is
expected to have lower values ('left', e.g. gray matter volume) or higher values
('right', e.g. tau-PET). The target distribution is the one for which
probabilities are generated. For example, passing a value of 'right' will give
the probability that a subject falls on the rightmost distribution of values for
a particular ROI.
outdir -- If the resulting probability matrix (and report) should be save to disk,
provide the path to an existing directory.
WARNING: Will overwrite already-existing outcome of this script one already
exists in the passed directory
fail_behavior -- Occasionally, two-component models will find distributions that
are not consistent with the hypothesis presented in target_distribution.
This argument tells the script what to do in such situations:
'nan' will return NaNs for all ROIs that fail
'values' will return probability values from one the distributions (selected
arbitrarily)
mixed_probability -- Experimental setting. If set to True, after calculating
probabilities, for rois with n_components > 1 only, will set all values <
mp_thresh to 0. Remaining values will be put through ECDF. This will create less
of a binarized distribution for n_components > 1 ROIs.
mp_thresh -- Threshold setting for mixed_probability. Must be a float between 0
and 1. Decides the arbitrary probability of "tau positivity". Default is 0.05.
'''
if target_distribution not in ['left','right']:
raise IOError('target_distribution must be set to "left", "right" or None')
if fail_behavior not in ['nan', 'values']:
raise IOError('fail_behavior must be set to "nan" or "values"')
if type(roi_matrix) == pandas.core.frame.DataFrame:
roi_matrix = pandas.DataFrame(roi_matrix,copy=True)
if type(roi_matrix) != pandas.core.frame.DataFrame:
if type(roi_matrix) == np.ndarray:
roi_matrix = np.array(roi_matrix,copy=True)
roi_matrix = pandas.DataFrame(roi_matrix)
else:
raise IOError('roi_matrix type not recognized. Pass pandas DataFrame or np.ndarray')
if mixed_probability:
holdout_mtx = pandas.DataFrame(roi_matrix, copy=True)
if type(norm_matrix) != type(None):
if type(norm_matrix) == pandas.core.frame.DataFrame:
norm_matrix = pandas.DataFrame(norm_matrix,copy=True)
if type(norm_matrix) != pandas.core.frame.DataFrame:
if type(norm_matrix) == np.ndarray:
norm_matrix = np.array(norm_matrix,copy=True)
norm_matrix = pandas.DataFrame(norm_matrix)
else:
raise IOError('roi_matrix type not recognized. Pass pandas DataFrame or np.ndarray')
if norm_matrix.shape[-1] != roi_matrix.shape[-1]:
raise IOError('norm_matrix must have the same number of columns as roi_matrix')
elif all(norm_matrix.columns != roi_matrix.columns):
raise IOError('norm_matrix must have the same column labels as roi_matrix')
else:
norm_matrix = pandas.DataFrame(roi_matrix, copy=True)
results = pandas.DataFrame(index = roi_matrix.index, columns = roi_matrix.columns)
if type(models) == type(None):
for col in roi_matrix.columns:
if not all([x==0 for x in roi_matrix[col]]):
results.loc[:,col] = ecdf_tfm(roi_matrix[col], norm_matrix[col])
if target_distribution == 'left':
results.loc[:,col] = (1 - results.loc[:,col].values)
final_report = None
else:
results.loc[:,col] = [0 for x in range(len(roi_matrix[col]))]
elif type(models) == dict:
for label, model in models.items():
if not hasattr(model, 'predict_proba'):
raise AttributeError('Passed model %s requires the predict_proba attribute'%label)
if not hasattr(model, 'n_components'):
raise AttributeError('Passed model %s requires the n_components attribute'%label)
elif model.n_components > 2:
raise ValueError('Models with > 2 components currently not supported (%s, n=%s)'%(label,
model.n_components))
final_report = pandas.DataFrame(index = roi_matrix.columns,
columns = ['model','n_components','reversed',
'perc. positive','problem'])
for col in roi_matrix.columns:
if not all([x==0 for x in roi_matrix[col]]):
tfm, report_out = model_tfm(roi_matrix[col], norm_matrix[col], models,
target_distribution, fail_behavior)
results.loc[:,col] = tfm
final_report.loc[col,:] = pandas.DataFrame.from_dict(report_out,'index'
).T[final_report.columns].values
fails = len(final_report[final_report.problem!='False']['problem'].dropna())
else:
results.loc[:,col] = [0 for x in range(len(roi_matrix[col]))]
final_report.loc[col,:] = [np.nan for x in range(len(final_report.columns))]
if fails > 0:
print('%s ROIs showed unexpected fitting behavior. See report...'%fails)
else:
raise ValueError('models must be a dict object or must be set to "ecdf". You passed a %s'%(type(models)))
if mixed_probability:
results = mixed_probability_transform(results, holdout_mtx, mp_thresh, final_report)
if type(final_report) == type(None):
if outdir:
results.to_csv(os.path.join(outdir, 'results.csv'))
return results
else:
if outdir:
results.to_csv(os.path.join(outdir, 'results.csv'))
final_report.to_csv(os.path.join(outdir, 'model_choice_report.csv'))
return results, final_report
def ecdf_tfm(target_col, norm_col):
return ed.ECDF(norm_col.values)(target_col.values)
def model_tfm(target_col, norm_col, models, target_distribution, fail_behavior):
report = {}
if len(models.keys()) > 1:
model, label = compare_models(models,norm_col)
else:
model = models[list(models.keys())[0]]
label = list(models.keys())[0]
report.update({'model': label})
report.update({'n_components': model.n_components})
if model.n_components == 1:
tfm = ecdf_tfm(target_col, norm_col)
report.update({'reversed': 'False'})
report.update({'perc. positive': np.nan})
report.update({'problem': 'False'})
else:
fitted = model.fit(norm_col.values.reshape(-1,1))
labs = fitted.predict(target_col.values.reshape(-1,1))
d0_mean = target_col.values[labs==0].mean()
d1_mean = target_col.values[labs==1].mean()
numb = len([x for x in labs if x == 1])/len(target_col)
if target_distribution == 'right':
if d0_mean > d1_mean and numb > 0.5:
report.update({'reversed': 'True'})
report.update({'perc. positive': 1-numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,0]
elif d0_mean < d1_mean and numb < 0.5:
report.update({'reversed': 'False'})
report.update({'perc. positive': numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,1]
else:
report.update({'reversed': np.nan})
report.update({'perc. positive': np.nan})
report.update({'problem': 'mean of 0s = %s, mean of 1s = %s, perc of 1s = %s'%(
d0_mean, d1_mean, numb)})
if fail_behavior == 'nan':
tfm = [np.nan for x in range(len(target_col))]
elif fail_behavior == 'values':
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,1]
else:
if d0_mean < d1_mean and numb < 0.5:
report.update({'reversed': 'False'})
report.update({'perc. positive': numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,0]
elif d0_mean > d1_mean and numb > 0.5:
report.update({'reversed': 'True'})
report.update({'perc. positive': 1-numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,1]
else:
report.update({'reversed': np.nan})
report.update({'perc. positive': np.nan})
report.update({'problem': 'mean of 0s = %s, mean of 1s = %s, perc of 1s = %s'%(
d0_mean, d1_mean, numb)})
if fail_behavior == 'nan':
tfm = [np.nan for x in range(len(target_col))]
elif fail_behavior == 'values':
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,0]
return tfm, report
def compare_models(models, norm_col):
modz = []
labs = []
for lab, mod in models.items():
modz.append(mod)
labs.append(lab)
bix = []
for model in modz:
bic = model.fit(norm_col.values.reshape(-1,1)).bic(norm_col.values.reshape(-1,1))
bix.append(bic)
winner_id = np.argmin(bix)
winning_mod = modz[winner_id]
winning_label = labs[winner_id]
return winning_mod, winning_label
def mixed_probability_transform(p_matrix, original_matrix, mp_thresh, report):
for col in original_matrix.columns:
if report.loc[col,'n_components'] == 2:
newcol = pandas.Series(
[0 if p_matrix.loc[x, col] < mp_thresh else original_matrix.loc[x,col] for x in original_matrix.index]
)
if len(newcol[newcol>0]) > 0:
newcol[newcol>0] = ecdf_tfm(newcol[newcol>0], newcol[newcol>0])
p_matrix.loc[:,col] = newcol
return p_matrix
def Evaluate_Model(roi, models, bins=None):
'''
Given an array of values and a dictionary of models, this script
will generate a plot of the fitted distribution(s) from each
model (seperately) over the supplied data.
roi -- an array, series or list values
models -- a dict object of string label: (unfitted) sklearn.gaussian
model pairs
bins -- Number of bins for the histogram.
Passing None (default) sets bin to length(roi) / 2
'''
if type(roi) == np.ndarray or type(roi) == list:
roi = pandas.Series(roi)
plt.close()
if not bins:
bins = int(len(roi)/2)
for label,model in models.items():
mmod = model.fit(roi.values.reshape(-1,1))
if mmod.n_components == 2:
m1, m2 = mmod.means_
w1, w2 = mmod.weights_
c1, c2 = mmod.covariances_
histdist = plt.hist(roi, bins, normed=True)
plotgauss1 = lambda x: plt.plot(x,w1*stats.norm.pdf(x,m1,np.sqrt(c1))[0], linewidth=3, color="black", label="AB Negative")
plotgauss2 = lambda x: plt.plot(x,w2*stats.norm.pdf(x,m2,np.sqrt(c2))[0], linewidth=3, color="red", label="AB Positive")
plotgauss1(histdist[1])
plotgauss2(histdist[1])
elif mmod.n_components == 1:
m1 = mmod.means_
w1 = mmod.weights_
c1 = mmod.covariances_
histdist = plt.hist(roi, bins, normed=True)
plotgauss1 = lambda x: plt.plot(x,w1*stats.norm.pdf(x,m1,np.sqrt(c1))[0][0], linewidth=3, color="black")
plotgauss1(histdist[1])
plt.title(label, fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.legend()
plt.show()
def Plot_Probabilites(prob_matrix, col_order = [], ind_order = [],
vmin=None, vmax=None, figsize=(), cmap=None, ax=None, path=None):
'''
Given the output matrix of Convert_ROI_values_to_Probabilities, will plot
a heatmap of all probability values sorted in such a manner to demonstrate
a progression of values.
'''
## NOTE TO SELF: ADD ARGUMENT FOR FIGSIZE AND THRESHOLDING HEATMAP
## ALSO ARGUMENT TO SORT BY DIFFERENT COLUMNS OR ROWS
if type(prob_matrix) == np.ndarray:
prob_matrix = pandas.DataFrame(prob_matrix)
if len(figsize) == 0:
figsize = (14,6)
elif len(figsize) > 2:
raise IOError('figsize must be a tuple with two values (x and y)')
good_cols = [x for x in prob_matrix.columns if not all([x==0 for x in prob_matrix[x]])]
prob_matrix = prob_matrix[good_cols]
plt.close()
if len(ind_order) == 0:
sorter = pandas.DataFrame(prob_matrix,copy=True)
sorter.loc[:,'mean'] = prob_matrix.mean(axis=1)
ind_order = sorter.sort_values('mean',axis=0,ascending=True).index
if len(col_order) == 0:
sorter2 = pandas.DataFrame(prob_matrix,copy=True)
sorter2.loc['mean'] = prob_matrix.mean(axis=0)
col_order = sorter2.sort_values('mean',axis=1,ascending=False).columns
fig, ax = plt.subplots(figsize=figsize)
forplot = prob_matrix.loc[ind_order, col_order]
g = sns.heatmap(forplot, vmin, vmax, cmap=cmap, ax=ax)
plt.xlabel('Regions (highest - lowest p)', fontsize=24)
plt.ylabel('Subjects (lowest - highest p)', fontsize=24)
if path != None:
plt.yticks([])
plt.tight_layout()
plt.savefig(path)
return [ind_order,col_order]
def Evaluate_Probabilities(prob_matrix, to_test, alpha_threshold = 0.05, FDR=None, info='medium'):
'''
This script will quickly calculate significant (as defined by user)
associations between all columns in a DataFrame or matrix and variables
passed by the user. The script will try to guess the appropriate test to
run. Depending on inputs, the script will display the number of
significant columns, which columns are significant and the alpha values;
for each passed variable.
Multiple comparisons correction is supported.
prob_matrix -- a Subject x ROI matrix or DataFrame
to_test -- a dict object of where values are columns, arrays or lists with
the same length as prob_matrix, and keys are string labels IDing them.
alpha_threshold -- determines what is significant. NOTE: If an argument is
passed for FDR, alpha_threshold refers to Q, otherwise, it refers to p.
FDR -- If no argument is passed (default), no multiple comparisons
correction is performed. If the user desires multiple comparisons correction,
the user can select the type by entering any of the string arguments described
here: http://www.statsmodels.org/0.8.0/generated/statsmodels.sandbox.stats.multicomp.multipletests.html
info -- Determines how much information the script will display upon
completion.
light: script will only display the number of significant regions
medium: script will also display which regions were significnat
heavy: script will also display the alpha value for each region
'''
if info not in ['light','medium','heavy']:
print('WARNING: a value of %s was passed for argument "info"'%(info))
print('Script will proceed with minimal information displayed')
print('in the future, please pass one of the following:')
print('"light", "medium", "heavy"')
info = 'light'
if type(prob_matrix) == np.ndarray:
prob_matrix = pandas.DataFrame(prob_matrix)
good_cols = [x for x in prob_matrix.columns if not all([x==0 for x in prob_matrix[x]])]
prob_matrix = prob_matrix[good_cols]
for label, var in to_test.items():
if type(var) == np.ndarray or type(var) == list:
var = pandas.Series(var)
ps = []
n_vals = len(np.unique(var))
if n_vals < 7:
vals = np.unique(var)
if n_vals == 2:
print('for %s, using t-test...'%(label))
for col in prob_matrix.columns:
p = stats.ttest_ind(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col])[-1]
ps.append(p)
elif n_vals == 3:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col])[-1]
ps.append(p)
elif n_vals == 4:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col],
prob_matrix.loc[var==vals[3]][col])[-1]
ps.append(p)
elif n_vals == 5:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col],
prob_matrix.loc[var==vals[3]][col],
prob_matrix.loc[var==vals[4]][col])[-1]
ps.append(p)
elif n_vals == 6:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col],
prob_matrix.loc[var==vals[3]][col],
prob_matrix.loc[var==vals[4]][col],
prob_matrix.loc[var==vals[4]][col])[-1]
ps.append(p)
else:
print('for %s, using correlation...'%(label))
for col in prob_matrix.columns:
p = stats.pearsonr(prob_matrix[col],var)[-1]
ps.append(p)
if not FDR:
hits = [i for i in range(len(ps)) if ps[i] < alpha_threshold]
else:
correction = multipletests(ps,alpha_threshold,FDR)
hits = [i for i in range(len(ps)) if correction[0][i]]
print('=============%s============'%label)
print('for %s, %s regions were significant'%(label,len(hits)))
if info == 'medium':
print(prob_matrix.columns[hits])
if info == 'heavy':
if not FDR:
print([(prob_matrix.columns[i], ps[i]) for i in hits])
else:
print([(prob_matrix.columns[i], correction[1][i]) for i in hits])
print('\n\n')
return ps
def Prepare_Inputs_for_ESM(prob_matrices, ages, output_dir, file_name,
conn_matrices = [], conn_mat_names = [],
conn_out_names = [], epicenters_idx = [],
sub_ids = [], visit_labels = [], roi_labels = [],
figure = True, betas0 = None, deltas0 = None):
'''
This script will convert data into a matfile compatible with
running the ESM, and will print outputs to be entered into
ESM launcher script. The script will also adjust connectomes
to accomodate missing (masked) ROIs.
prob_matrices -- a dict object matching string labels to
probability matrices (pandas DataFrames). These will be
converted into a matlab structure. Columns with all 0s will be
removed automatically.
NOTE: All prob_matrices should the same shape, and a
matching number of non-zero columns. If they do not, run the
script separately for these matrices.
ages -- an array the same length of prob_matrices that contains
the age of each subject.
output_dir -- an existing directory where all outputs will be
written to
file_name -- the name of the output matfile. Do not include a
file extension
conn_matrices -- a list of paths to matfiles or csvs containing
connectomes that match the atlas used to intially extract data.
if your probability matrix does not have columns with 0s
(because, for example, you used a mask), this argument can be
left unchanged. Otherwise, the script will chop up the
connectomes so they match the dimensions of the non-zero columns
in the probability matrices.
NOTE: passing this argument requires passing an argument for
conn_out_names
con_mat_names -- a list the same length of conn_matrices that
contains string labels
'''
if type(prob_matrices) != dict:
raise IOError('prob_matrices must be a dict object')
col_lens = []
for lab, df in prob_matrices.items():
good_cols = [y for y in df.columns if not all([x==0 for x in df[y]])]
col_lens.append(len(good_cols))
prob_matrices.update({lab: df[good_cols].values.T})
if not all([x == col_lens[0] for x in col_lens]):
raise IOError('all probability matrices entered must have the same # of non-zero columns')
goodcols = [y for y in range(len(df.columns)) if not all([x==0 for x in df[df.columns[y]]])]
if len(conn_matrices) > 0:
if not len(conn_matrices) == len(conn_out_names):
raise ValueError('equal length lists must be passed for conn_matrices and out_names')
for i,mtx in enumerate(conn_matrices):
if mtx[-3:] == 'csv':
connmat = pandas.read_csv(mtx)
x,y = connmat.shape
if x < y:
connmat = pandas.read_csv(mtx,header=None)
if all(connmat.loc[:,connmat.columns[0]] == range(connmat.shape[0])):
connmat = pandas.read_csv(mtx, index_col=0).values
x,y = connmat.shape
if x < y:
connmat = pandas.read_csv(mtx, index_col=0, header=None).values
else:
connmat = connmat.values
jnk = {}
elif mtx[-3:] == 'mat':
jnk = loadmat(mtx)
connmat = jnk[conn_mat_names[i]]
newmat = np.array([thing[goodcols] for thing in connmat[goodcols]])
prob_matrices.update({conn_out_names[i]: newmat})
#jnk[file_name] = newmat
#savemat(os.path.join(output_dir,conn_out_names[i]), jnk)
print('new connectivity matrix size: for %s'%conn_out_names[i],newmat.shape)
if figure:
plt.close()
try:
sns.heatmap(newmat)
plt.show()
except:
sns.heatmap(newmat.astype(float))
plt.show()
if type(ages) == np.ndarray or type(ages) == list:
ages = pandas.Series(ages)
if len(ages.dropna()) != len(df):
raise ValueError('length mismatch between "ages" and prob_matrices. Does "ages" have NaNs?')
prob_matrices.update({'ages': ages.values})
elif type(ages) == dict:
for key, ages_list in ages.items():
ages_list = pandas.Series(ages_list)
if len(ages_list.dropna()) != len(df):
raise ValueError('length mismatch between "ages" and prob_matrices. Does "ages" have NaNs?')
prob_matrices.update({key: ages_list.values})
if type(sub_ids) == list:
prob_matrices.update({'sub_ids': sub_ids})
if type(visit_labels) == list:
prob_matrices.update({'visit_labels': visit_labels})
elif type(visit_labels) == dict:
for key, visit_list in visit_labels.items():
visit_list = pandas.Series(visit_list)
if len(visit_list.dropna()) != len(df):
raise ValueError('length mismatch between "visits" and prob_matrices. Does "visits" have NaNs?')
prob_matrices.update({key: visit_list.values})
if type(epicenters_idx) == list:
prob_matrices.update({'epicenters_idx': epicenters_idx})
if type(roi_labels) == list:
prob_matrices.update({'roi_labels': roi_labels})
if type(betas0) == list:
prob_matrices.update({'betas': betas0})
if type(deltas0) == list:
prob_matrices.update({'deltas': deltas0})
fl_out = os.path.join(output_dir,file_name)
savemat(fl_out,prob_matrices)
print('ESM input written to',fl_out)
print('===inputs:===')
for x in prob_matrices.keys():
print(x)
if len(conn_matrices) > 0:
print('===connectivity matrices===')
for i in range(len(conn_matrices)):
print(os.path.join(output_dir,conn_out_names[i]) + '.mat')
def Evaluate_ESM_Results(results, sids, save=True,
labels = None, lit = False, plot = True):
'''
This script will load the matfile outputted from the ESM, will
display the main model results (r2, RMSE and "eval"), the
chosen epicenter(s) and will return the model outputs as a
pandas DataFrame if desired.
results -- a .mat file created using the ESM script
sids -- a list of subject IDs that matches the subjects input to
the ESM
save -- if True, will return a pandas DataFrame with model
results
labels -- ROI labels that match those from the ESM input matrix.
lit -- If only one epicenter was sent (for example, for
hypothesis testing), set this to True. Otherwise, leave as False.
plot -- If True, function will plot several charts to evaluate
ESM results on an ROI and subject level.
'''
mat = loadmat(results)
if not lit:
res = pandas.DataFrame(index = sids)
for i in range(len(mat['ref_pattern'][0])):
# Model fits
sid = sids[i]
r,p = stats.pearsonr(mat['ref_pattern'][:,i], mat['Final_solutions'][:,i])
res.loc[sid,'model_r'] = r
res.loc[sid,'model_r2'] = r**2
res.loc[:, 'model_RMSE'] = mat['Final_RMSEs'].flatten()
res.loc[:, 'model_eval'] = mat['Final_CORRs'].flatten()
if save:
# params
res.loc[:, 'beta'] = mat['Final_parameters'][0,:].flatten()
res.loc[:, 'delta'] = mat['Final_parameters'][1,:].flatten()
res.loc[:, 'sigma'] = mat['Final_parameters'][2,:].flatten()
# other
res.loc[:, 'ref_age'] = mat['AGEs'].flatten()
res.loc[:, 'times'] = mat['Final_times'].flatten()
res.loc[:, 'Onset_age'] = mat['ONSETS_est'].flatten()
print('average r2 = ', res.model_r2.mean())
print('average RMSE =', res.model_RMSE.mean())
print('average eval =', res.model_eval.mean())
if type(labels) != type(None):
if type(labels) == np.ndarray or type(labels) == list:
labels = pandas.Series(labels)
print('model identfied the following epicenters')
for l in mat['models'][0,0][0][0]:
print(labels.loc[labels.index[l-1]])
if plot:
plot_out = Plot_ESM_results(mat, labels, sids, lit)
if save:
if plot:
res = {'model_output': res, 'eval_output': plot_out}
return res
else:
res = pandas.DataFrame(index = sids)
for i in range(len(mat['ref_pattern'][0])):
# Model fits
sid = sids[i]
r,p = stats.pearsonr(mat['ref_pattern'][:,i], mat['model_solutions0'][:,i])
res.loc[sid,'model_r'] = r
res.loc[sid,'model_r2'] = r**2
res.loc[:, 'model_RMSE'] = mat['model_RMSEs0'].flatten()
res.loc[:, 'model_eval'] = mat['model_CORRs0'].flatten()
if save:
# params
res.loc[:, 'beta'] = mat['model_parameters0'][0,:].flatten()
res.loc[:, 'delta'] = mat['model_parameters0'][1,:].flatten()
res.loc[:, 'sigma'] = mat['model_parameters0'][2,:].flatten()
# other
res.loc[:, 'ref_age'] = mat['AGEs'].flatten()
res.loc[:, 'times'] = mat['model_times0'].flatten()
res.loc[:, 'Onset_age'] = mat['ONSETS_est'].flatten()
print('average r2 = ', res.model_r2.mean())
print('average RMSE =', res.model_RMSE.mean())
print('average eval =', res.model_eval.mean())
#if type(labels) != type(None):
# print('model identfied the following epicenters')
# for l in mat['models'][0,0][0][0]:
# print(labels.iloc[l-1]['label'])
if plot:
plot_out = Plot_ESM_results(mat, labels, sids, lit)
if save:
if plot:
res = {'model_output': res, 'eval_output': plot_out}
return res
def Plot_ESM_results(mat, labels, subids, lit):
if not lit:
mat.update({'model_solutions0': mat['Final_solutions']})
sheets = {}
# regional accuracy across subjects
plt.close()
sns.regplot(mat['ref_pattern'].mean(1), mat['model_solutions0'].mean(1))
plt.xlabel('Avg ROI Amyloid Probability Across Subjects')
plt.ylabel('Avg Predicted ROI Amyloid Probability Across Subjects')
plt.title('Regional accuracy across subjects')
plt.show()
r,p = stats.pearsonr(mat['ref_pattern'].mean(1), mat['model_solutions0'].mean(1))
print('r2 = ',r**2,'/n')
fp = pandas.DataFrame(pandas.concat([pandas.Series(mat['ref_pattern'].mean(1)),
pandas.Series(mat['model_solutions0'].mean(1))
], axis = 1))
fp.columns = ['reference','predicted']
if type(labels) != type(None):
fp.loc[:,'labels'] = labels
sheets.update({'regional accuracy': fp})
# Average ROI values across subject
r2s = []
for i in range(mat['ref_pattern'].shape[0]):
r = stats.pearsonr(mat['ref_pattern'][i,:],mat['model_solutions0'][i,:])[0]
r2s.append(r**2)
if type(labels) == type(None):
labels = range(mat['ref_pattern'].shape[0])
roi_test = pandas.concat([pandas.Series(labels).astype(str),pandas.Series(r2s)],
axis=1)
roi_test.columns = ['label','r2']
plt.close()
g = sns.catplot(x='label', y='r2',data=roi_test, ci=None,
order = roi_test.sort_values('r2',ascending=False)['label'])
g.set_xticklabels(rotation=90)
g.fig.set_size_inches((14,6))
plt.title('ROI values across subjects')
plt.show()
print(roi_test.r2.mean())
sheets.update({'ROI_acc': roi_test})
# average subjects across ROIs
r2s = []
for i in range(mat['ref_pattern'].shape[-1]):
r2s.append(stats.pearsonr(mat['ref_pattern'][:,i], mat['model_solutions0'][:,i]
)[0]**2)
sub_test = pandas.concat([pandas.Series(subids).astype(str), pandas.Series(r2s)],
axis=1)
sub_test.columns = ['subid','model_r2']
plt.close()
#sns.set_context('notebook')
#g = sns.factorplot(x='subid', y='model_r2', data=sub_test, ci=None,
#order = sub_test.sort_values('model_r2',ascending=False)['subid'])
#g.set_xticklabels(rotation=90)
#g.fig.set_size_inches((14,6))
#plt.show()
#print(sub_test.model_r2.mean())
return sheets
def Plot_Individual(matrix, index, style='ROI', label = None):
'''
Plot a single ROI across subjects, or a single subject across
ROIs.
matrix -- a dict object representing ESM results
index -- the index of the ROI or subject to plot
style -- set to 'ROI' or 'subject'
label -- Title to put over the plot
'''
if style not in ['ROI', 'subject']:
raise IOError('style argument must be set to "ROI" or "subject"')
if 'Final_solutions' not in matrix.keys():
matrix.update({'Final_solutions': matrix['model_solutions0']})
if style == 'ROI':
x = matrix['ref_pattern'][index,:]
y = matrix['Final_solutions'][index,:]
else: # subject
x = matrix['ref_pattern'][:,index]
y = matrix['Final_solutions'][:,index]
plt.close()
sns.regplot(x,y)
plt.xlabel('Observed')
plt.ylabel('Predicted')
if label:
plt.title(label)
plt.show()
def Prepare_PET_Data(files_in, atlases, ref = None, msk = None, dimension_reduction = False,
ECDF_in = None, output_type = 'py', out_dir = './', out_name = 'PET_data',
save_matrix = False, save_ECDF = False, save_images = False, ref_index = [],
mx_model = 0, orig_atlas = None, esm2014method_py = False, orig_prob_method_matlab = False):
''' This is a function that will take several PET images and an atlas and will
return a subject X region matrix. If specified, the function will also calculate
probabilities (via ECDF) either voxelwise, or using a specified reference region
files_in = input can either be
- a path to a directory full of (only) nifti images OR
- a "search string" using wildcards
- a list of subject paths OR
- a subject X image matrix
atlas = multiple options:
- a path to a labeled regional atlas in the same space as the PET data
- if analysis was done in native space, a path to a list of labeled regional atlases
ref = multiple options:
- If None, no probabilities will be calculated, and script will simply extract
regional PET data using the atlas.
- If a path to a reference region mask, will calculate voxelwise probabilities
based on values within the reference region. Mask must be in the same space as
as PET data and atlas
- List of paths to reference region masks in native space. Voxelwise probabilities
will be calculated based on values within the reference region.
- If a list of integers, will combine these atlas labels with these integers to
make reference region
- if 'voxelwise', voxelwise (or atom-wise from dimension reduction) probabilities
will be estimated. In other words, each voxel or atom will use serve as its own
reference.
msk = multiple options:
- A path to a binary mask file in the same space as PET data and atlas. If None,
mask will be computed as a binary mask of the atlas.
** PLEASE NOTE: The mask will be used to mask the reference region! **
dimension_reduction = whether or not to first reduce dimensions of data using
hierarchical clustering. This results in an initial step that will be very slow, but
will may result in an overall speedup for the script, but perhaps only if ref is set
to 'voxelwise'.
- If None, do not perform dimension reduction
- If integer, the number of atoms (clusters) to reduce to
ECDF_in = If the user wishes to apply an existing ECDF to the PET data instead of
generating one de novo, that can be done here. This crucial if the user wishes to
use multiple datasets. Think of it like scaling in machine learning.
- If None, will generate ECDF de novo.
- If np.array, will use this array to generate the ECDF.
- If statsmodel ECDF object, will use this as ECDF
- If a path, will use the
output_type = type of file to save final subject x region matrix into. multiple options:
-- 'py' will save matrix into a csv
-- 'mat' will save matrix into a matfile
out_dir = location to save output files. Defaults to current directory
out_name = the prefix for all output files
save_matrix = Whether to save or return subject x image matrix. Useful if running multiple
times, as this matrix can be set as files_in, bypassing the costly data import
-- if 'return', will return subject x image matrix to python environment
-- if 'save', will write subject x image matrix to file.
-- if None, matrix will not be stored
save_ECDF = whether to save the ECDF used to create the probabilities. This is crucial if
using multiple datasets. The resulting output can be used as input for the ECDF argument.
-- if 'return, will return np.array to python environment
-- if 'save', will write array to file
-- if None, array will not be stored
'''
# Check input arguments
print('initiating...')
if output_type != 'py' and output_type != 'mat':
raise IOError('output_type must be set to py or mat')
# Initialize variables
# Load data
print('loading data...')
i4d = load_data(files_in, return_images=False) # load PET data
if save_matrix == 'save':
otpt = os.path.join(out_dir,'%s_4d_data'%out_name)
print('saving 4d subject x scan to nifti image: \n',otpt)
i4d.to_filename(otpt)
# load atlas
if type(atlases) != list:
if type(atlases) == str:
try:
atlas = ni.load(atlases).get_data().astype(int)
except:
raise IOError('could not find an atlas at the specified location: %s'%atlas)
if orig_atlas == True:
orig_atlas = np.array(atlas, copy=True)
if atlas.shape != i4d.shape[:3]:
raise ValueError('atlas dimensions do not match PET data dimensions')
# load reference region
if type(ref) == str and ref != 'voxelwise':
print('looking for reference image...')
if not os.path.isdir(ref):
raise IOError('Please enter a valid path for ref, or select a different option for this argument')
else:
ref_msk = ni.load(ref).get_data()
if ref_msk.shape != i4d.shape[:3]:
raise ValueError('ref region image dimensions do not match PET data dimensions')
elif type(ref) == list:
ref_msk = np.zeros_like(atlas)
for i in ref:
ref_msk[atlas == i] = 1
else:
ref_msk = None
# Mask data
print('masking data...')
if msk == None:
img_mask = np.array(atlas,copy=True)
img_mask[img_mask<1] = 0
img_mask[img_mask>0] = 1
else:
img_mask = ni.load(msk).get_data()
atlas[img_mask < 1] = 0
if type(ref_msk) != type(None):
ref_msk[img_mask < 1] = 0
mask_tfm = input_data.NiftiMasker(ni.Nifti1Image(img_mask,i4d.affine))
mi4d = mask_tfm.fit_transform(i4d)
# dimension reduction (IN BETA!)
if dimension_reduction:
print('reducing dimensions...')
shape = img_mask.shape
connectivity = grid_to_graph(n_x=shape[0], n_y=shape[1],
n_z=shape[2], mask=img_mask)
# main ECDF calculation (or mixture model calc)
skip = False
if ref != 'voxelwise':
if type(ECDF_in) != type(None):
print('generating ECDF...')
print('using user-supplied data...')
if type(ECDF_in) == ed.ECDF:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = 'not generated'
elif type(ECDF_in) == np.ndarray:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = ECDF_in
# elif # add later an option for importing an external object
else:
try:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
print('Could not understand ECDF input, but ECDF successful')
input_distribution = 'not generated'
except:
raise IOError(
'Invalid argument for ECDF in. Please enter an ndarray, an ECDF object, or a valid path')
else:
if type(ref_msk) != type(None):
print('generating ECDF...')
ref_tfm = input_data.NiftiMasker(ni.Nifti1Image(ref_msk,i4d.affine))
refz = ref_tfm.fit_transform(i4d)
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz, mx=mx_model)
input_distribution = refz.flat
else:
print('skipping ECDF...')
skip = True
else:
print('generating voxelwise ECDF...')
mi4d_ecdf, ECDF_array = ecdf_voxelwise(mi4d, ref_index, save_ECDF, mx=mx_model)
input_distribution = 'not generated'
if not skip:
# if save_ECDF:
# create an array and somehow write it to a file
# transform back to image-space
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d_ecdf)
else:
#if type(ECDF):
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d)
# generate output matrix
print('generating final subject x region matrix')
if type(orig_atlas) == type(None):
f_mat = generate_matrix_from_atlas_old(f_images, atlas)
else:
f_mat = generate_matrix_from_atlas_old(f_images, orig_atlas)
else:
if len(atlases) != len(files_in):
raise IOError('number of images (%s) does not match number of atlases (%s)'%(len(files_in),
len(atlases)))
type_ref_int = True
if isinstance(ref, list):
if all(isinstance(x, int) for x in ref):
print("Passing in a list of integers to specify the reference region.")
elif all(isinstance(x, str) for x in ref):
if len(ref) == len(files_in):
type_ref_int = False
print("Passing in a list of paths to reference region masks in native space")
else:
raise IOError(
'number of images (%s) does not match number of ref region masks (%s)' % len(files_in),
len(ref))
catch = []
for i in range(0, len(files_in)):
print(files_in[i])
i4d = ni.load(files_in[i])
atlas = ni.load(atlases[i]).get_data()
if len(atlas.shape) == 4:
atlas = np.reshape(atlas, atlas.shape[:3])
if atlas.shape != i4d.shape[:3]:
raise ValueError('atlas dimensions do not match PET data dimensions')
if type_ref_int:
ref_msk = np.zeros_like(atlas)
for i in ref:
ref_msk[atlas == i] = 1
else:
ref_msk = ni.load(ref[i]).get_data()
if len(ref_msk.shape) == 4:
ref_msk = np.reshape(ref_msk, ref_msk.shape[:3])
if ref_msk.shape != i4d.shape[:3]:
raise ValueError('ref region image dimensions do not match PET data dimensions')
if type(ref_msk) != type(None):
ref_msk[ref_msk < 1] = 0
ref_msk[ref_msk > 0] = 1
# Mask data
if msk == None:
img_mask = np.array(atlas, copy=True)
img_mask[img_mask < 1] = 0
img_mask[img_mask > 0] = 1
else:
img_mask = ni.load(msk).get_data()
atlas[img_mask < 1] = 0
mask_tfm = input_data.NiftiMasker(ni.Nifti1Image(img_mask, i4d.affine))
mi4d = mask_tfm.fit_transform(i4d)
#Calculate voxelwise ECDF with respect to ref region in native space
skip = False
if type(ECDF_in) == type(None):
if type(ref_msk) != type(None):
print('generating ECDF...')
ref_tfm = input_data.NiftiMasker(ni.Nifti1Image(ref_msk, i4d.affine))
refz = ref_tfm.fit_transform(i4d)
if esm2014method_py:
mi4d_ecdf, ecref = ecdf_voxelwise_bootstrapped_maxvalues_refregion(mi4d, refz)
elif orig_prob_method_matlab:
mi4d_ecdf = ecdf_voxelwise_bootstrapped_yasser2016(mi4d, refz)
else:
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz, mx=mx_model)
input_distribution = refz.flat
else:
print('skipping ECDF...')
skip = True
if not skip:
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d_ecdf)
else:
# if type(ECDF):
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d)
# generate output matrix
print('generating final subject x region matrix')
if type(orig_atlas) == type(None):
f_mat_single = generate_matrix_from_atlas_old(f_images, atlas)
else:
f_mat_single = generate_matrix_from_atlas_old(f_images, orig_atlas)
#f_mat_single = ecdf_main(mi4d=mi4d, i4d=i4d, atlas=atlas, ref=ref, mask_tfm = mask_tfm)
catch.append(f_mat_single)
f_mat = pandas.concat(catch)
print('preparing outputs')
output = {}
if output_type == 'py':
f_mat.to_csv(os.path.join(out_dir, '%s_roi_data.csv' % out_name), index=False)
output.update({'roi_matrix': f_mat})
else:
output.update({'roi_matrix': f_mat.values})
output.update({'roi_matrix_columns': f_mat.columns})
if save_matrix == 'return':
output.update({'4d_image_matrix': i4d})
if save_ECDF == 'return':
if output_type == 'py':
output.update({'ECDF_function': ECDF_array})
else:
output.update({'input_distribution': input_distribution})
return output
def ecdf_main(mi4d, i4d, atlas, ref, mask_tfm, mx_model=0, ref_msk=None, save_ECDF=False, ECDF_in=None, ref_index=[],
skip=False, orig_atlas=None):
if ref != 'voxelwise':
if type(ECDF_in) != type(None):
print('generating ECDF...')
print('using user-supplied data...')
if type(ECDF_in) == ed.ECDF:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = 'not generated'
elif type(ECDF_in) == np.ndarray:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = ECDF_in
# elif # add later an option for importing an external object
else:
try:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
print('Could not understand ECDF input, but ECDF successful')
input_distribution = 'not generated'
except:
raise IOError(
'Invalid argument for ECDF in. Please enter an ndarray, an ECDF object, or a valid path')
else:
if type(ref_msk) != type(None):
print('generating ECDF...')
ref_tfm = input_data.NiftiMasker(ni.Nifti1Image(ref_msk,i4d.affine))
refz = ref_tfm.fit_transform(i4d)
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz, mx=mx_model)
input_distribution = refz.flat
else:
print('skipping ECDF...')
skip = True
else:
print('generating voxelwise ECDF...')
mi4d_ecdf, ECDF_array = ecdf_voxelwise(mi4d, ref_index, save_ECDF, mx=mx_model)
input_distribution = 'not generated'
if not skip:
# if save_ECDF:
# create an array and somehow write it to a file
# transform back to image-space
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d_ecdf)
else:
#if type(ECDF):
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d)
# generate output matrix
print('generating final subject x region matrix')
if type(orig_atlas) == type(None):
f_mat = generate_matrix_from_atlas_old(f_images, atlas)
else:
f_mat = generate_matrix_from_atlas_old(f_images, orig_atlas)
return f_mat
def load_data_old(files_in):
fail = False
if type(files_in) == str:
if os.path.isdir(files_in):
print('It seems you passed a directory')
search = os.path.join(files_in,'*')
num_f = len(glob(search))
if num_f == 0:
raise IOError('specified directory did not contain any files')
else:
print('found %s images!'%num_f)
i4d = image.load_img(search)
elif '*' in files_in:
print('It seems you passed a search string')
num_f = len(glob(files_in))
if num_f == 0:
raise IOError('specified search string did not result in any files')
else:
print('found %s images'%num_f)
i4d = image.load_img(files_in)
else:
fail = True
elif type(files_in) == list:
print('processing %s subjects'%len(files_in))
i4d = ni.concat_images(files_in)
elif type(files_in) == ni.nifti1.Nifti1Image:
print('processing %s subjects'%files_in.shape[-1])
i4d = files_in
else:
fail = True
if fail:
print('files_in not recognized.',
'Please enter a search string, valid directory, list of subjects, or matrix')
raise ValueError('I do not recognize the files_in input.')
return i4d
def dim_reduction(mi4d, connectivity, dimension_reduction):
ward = FeatureAgglomeration(n_clusters=dimension_reduction/2,
connectivity=connectivity, linkage='ward', memory='nilearn_cache')
ward.fit(mi4d)
ward = FeatureAgglomeration(n_clusters=dimension_reduction,
connectivity=connectivity, linkage='ward', memory='nilearn_cache')
ward.fit(mi4d)
mi4d = ward.transform(mi4d)
return mi4d
def ecdf_voxelwise_bootstrapped_yasser2016(mi4d, refz):
mi4d_ecdf = eng.voxelwise_pet_prob_yasser2016(matlab.double([list(mi4d[0])]), matlab.double([list(refz[0])]))
return mi4d_ecdf
def ecdf_voxelwise_bootstrapped_maxvalues_refregion(mi4d, refz):
refz_max_values = []
for i in range(0, 40000):
resampled_refz = resample(refz.flatten(), n_samples=500, replace=True)
percentile_value = np.percentile(resampled_refz, 95)
refz_max_values.append(percentile_value)
refz_max_array = np.array(refz_max_values)
refz_max_array = np.reshape(refz_max_array, (1, len(refz_max_array)))
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz_max_array, mx=0)
return mi4d_ecdf, ecref
def ecdf_simple(mi4d, refz, mx=0):
if type(refz) == ed.ECDF:
ecref = refz
else:
if len(refz.shape) > 1:
ecref = ed.ECDF(refz.flat)
else:
ecref = ed.ECDF(refz)
print('transforming images...')
if mx == 0:
mi4d_ecdf = ecref(mi4d.flat).reshape(mi4d.shape[0],mi4d.shape[1])
else:
print('are you sure it makes sense to use a mixture model on reference region?')
mod = GaussianMixture(n_components=mx).fit(ecref)
mi4d_ecdf = mod.predict_proba(mi4d.flat)[:,-1].reshape(mi4d.shape[0],mi4d.shape[1])
return mi4d_ecdf, ecref
def ecdf_voxelwise(mi4d, ref_index, save_ECDF, mx=0):
X,y = mi4d.shape
if mx != 0:
mmod = GaussianMixture(n_components=mx)
if len(ref_index) == 0:
if not save_ECDF:
if mx == 0:
mi4d_ecdf = np.array([ed.ECDF(mi4d[:,x])(mi4d[:,x]) for x in range(y)]).transpose()
else:
mi4d_ecdf = np.array([mmod.fit(mi4d[:,x].reshape(-1,1)).predict_proba(mi4d[:,x].reshape(-1,1)
)[:,-1] for x in range(y)]).transpose()
ECDF_array = None
else:
if mx == 0:
ECDF_array = np.array([ed.ECDF(mi4d[:,x]) for x in range(y)]).transpose()
print('transforming data...')
mi4d_ecdf = np.array([ECDF_array[x](mi4d[:,x]) for x in range(y)]
).transpose()
else:
raise IOError('at this stage, cant save mixture model info....sorry...')
else:
if mx == 0:
# if you don't want to include subjects used for reference, un-hash this, hash
# the next line, and fix the "transpose" line so that the data gets back into the matrix properly
#good_ind = [x for x in list(range(X)) if x not in ref_index]
good_ind = range(X)
if not save_ECDF:
mi4d_ecdf = np.array([ed.ECDF(mi4d[ref_index,x])(mi4d[good_ind,x]) for x in range(y)]
).transpose()
ECDF_array = None
else:
ECDF_array = [ed.ECDF(mi4d[ref_index,x]) for x in range(y)]
print('transforming data...')
mi4d_ecdf = np.array([ECDF_array[x](mi4d[good_ind,x]) for x in range(y)]
).transpose()
else:
### COMING SOON!
raise IOError('have not yet set up implementation for mixture models and reg groups')
return mi4d_ecdf, ECDF_array
def generate_matrix_from_atlas_old(files_in, atlas):
files_in = files_in.get_data()
atlas = atlas.astype(int)
f_mat = pandas.DataFrame(index = range(files_in.shape[-1]),
columns = ['roi_%s'%x for x in np.unique(atlas) if x != 0])
tot = np.bincount(atlas.flat)
sorted_cols = []
for sub in range(files_in.shape[-1]):
mtx = files_in[:,:,:,sub]
sums = np.bincount(atlas.flat, weights = mtx.flat)
rois = (sums/tot)[1:]
for i in range(0, len(rois)):
col="roi_" + str(i+1)
sorted_cols.append(col)
if col in list(f_mat.columns):
f_mat.loc[f_mat.index[sub], col] = rois[i]
else:
f_mat.loc[f_mat.index[sub], col] = 0
f_mat = f_mat[sorted_cols]
return f_mat
def W_Transform(roi_matrix, covariates, norm_index = [],
columns = [], verbose = False):
'''
Depending on inputs, this function will either regress selected
variables out of an roi_matrix, or will perform a W-transform on an
roi_matrix.
W-transform is represented as such:
(Pc - A) / SDrc
Where Pc is the predicted value of the roi *based on the covariates
of the norm sample*; A = actual value of the roi; SDrc = standard
deviation of the residuals *or the norm sample*
roi_matrix = a subjects x ROI array
covariates = a subject x covariates array
norm_index = index pointing exclusively to subjects to be used for
normalization. If norm index is passed, W-transformation will be
performed using these subjects as the norm_sample (see equation
above). If no norm_index is passed, covariates will simply be
regressed out of all ROIs.
columns = the columns to use fron the covariate matrix. If none,
all columns if the covariate matrix will be used.
verbose = If True, will notify upon the completion of each ROI
transformation.
'''
if type(roi_matrix) != pandas.core.frame.DataFrame:
raise IOError('roi_matrix must be a subjects x ROIs pandas DataFrame')
if type(covariates) != pandas.core.frame.DataFrame:
raise IOError('covariates must be a subjects x covariates pandas DataFrame')
covariates = clean_time(covariates)
roi_matrix = clean_time(roi_matrix)
if len(columns) > 0:
covs = pandas.DataFrame(covariates[columns], copy=True)
else:
covs = pandas.DataFrame(covariates, copy=True)
if covs.shape[0] != roi_matrix.shape[0]:
raise IOError('length of indices for roi_matrix and covariates must match')
else:
data = pandas.concat([roi_matrix, covs], axis=1)
output = pandas.DataFrame(np.zeros_like(roi_matrix.values),
index = roi_matrix.index,
columns = roi_matrix.columns)
if len(norm_index) == 0:
for roi in roi_matrix.columns:
eq = '%s ~'%roi
for i,col in enumerate(covs.columns):
if i != len(covs.columns) - 1:
eq += ' %s +'%col
else:
eq += ' %s'%col
mod = smf.ols(eq, data = data).fit()
output.loc[:,roi] = mod.resid
if verbose:
print('finished',roi)
else:
for roi in roi_matrix.columns:
eq = '%s ~'%roi
for i,col in enumerate(covs.columns):
if i != len(covs.columns) - 1:
eq += ' %s +'%col
else:
eq += ' %s'%col
mod = smf.ols(eq, data=data.loc[norm_index]).fit()
predicted = mod.predict(data)
w_score = (data.loc[:,roi] - predicted) / mod.resid.std()
output.loc[:,roi] = w_score
if verbose:
print('finished',roi)
return output
def clean_time(df):
df = pandas.DataFrame(df, copy=True)
symbols = ['.','-',' ', ':', '/','&']
ncols = []
for col in df.columns:
for symbol in symbols:
if symbol in col:
col = col.replace(symbol,'_')
ncols.append(col)
df.columns = ncols
return df
def Weight_Connectome(base_cx, weight_cx, method = 'min', symmetric = True,
transform = MinMaxScaler(), transform_when = 'post',
illustrative = False, return_weight_mtx = False):
if method not in ['min','mean','max']:
raise IOError('a value of "min" or "mean" must be passed for method argument')
choices = ['prae','post','both','never']
if transform_when not in choices:
raise IOError('transform_when must be set to one of the following: %s'%choices)
if len(np.array(weight_cx.shape)) == 1 or np.array(weight_cx).shape[-1] == 1:
print('1D array passed. Transforming to 2D matrix using %s method'%method)
weight_cx = create_connectome_from_1d(weight_cx, method, symmetric)
if transform_when == 'pre' or transform_when == 'both':
weight_cx = transform.fit_transform(weight_cx)
if base_cx.shape == weight_cx.shape:
if illustrative:
plt.close()
sns.heatmap(base_cx)
plt.title('base_cx')
plt.show()
plt.close()
sns.heatmap(weight_cx)
plt.title('weight_cx')
plt.show()
weighted_cx = base_cx * weight_cx
if illustrative:
plt.close()
sns.heatmap(weighted_cx)
plt.title('final (weighted) cx')
plt.show()
else:
raise ValueError('base_cx (%s) and weight_cx %s do not have the sampe shape'%(
base_cx.shape,
weight_cx.shape))
if transform_when == 'post' or transform_when == 'both':
transform.fit_transform(weighted_cx)
if return_weight_mtx:
return weighted_cx, weight_cx
else:
return weighted_cx
def create_connectome_from_1d(cx, method, symmetric):
nans = [x for x in range(len(cx)) if not pandas.notnull(cx[x])]
if len(nans) > 1:
raise ValueError('Values at indices %s are NaNs. Cannot compute'%nans)
weight_cx = np.zeros((len(cx),len(cx)))
if method == 'min':
if symmetric:
for i,j in list(itertools.product(range(len(cx)),repeat=2)):
weight_cx[i,j] = min([cx[i],cx[j]])
else:
for i,j in itertools.combinations(range(len(cx)),2):
weight_cx[i,j] = min([cx[i],cx[j]])
rotator = np.rot90(weight_cx, 2)
weight_cx = weight_cx + rotator
elif method == 'mean':
if symmetric:
for i,j in list(itertools.product(range(len(cx)),repeat=2)):
weight_cx[i,j] = np.mean([cx[i],cx[j]])
else:
for i,j in itertools.combinations(range(len(cx)),2):
weight_cx[i,j] = np.mean([cx[i],cx[j]])
rotator = np.rot90(weight_cx, 2)
weight_cx = weight_cx + rotator
elif method == 'max':
if symmetric:
for i,j in list(itertools.product(range(len(cx)),repeat=2)):
weight_cx[i,j] = max([cx[i],cx[j]])
else:
for i,j in itertools.combinations(range(len(cx)),2):
weight_cx[i,j] = max([cx[i],cx[j]])
rotator = np.rot90(weight_cx, 2)
weight_cx = weight_cx + rotator
return weight_cx
def plot_best_epicenter_x_subs(output_files, subs_to_select=None, color="blue",title=None, plot=True, dataset="DIAN"):
clinical_df = pandas.read_csv("../../data/DIAN/participant_metadata/CLINICAL_D1801.csv")
pib_df = pandas.read_csv("../../data/DIAN/participant_metadata/pib_D1801.csv")
genetic_df = pandas.read_csv("../../data/DIAN/participant_metadata/GENETIC_D1801.csv")
output_files = sorted(output_files)
example_output = loadmat(output_files[0])
subs = list(example_output['sub_ids'])
visit_labels = example_output['visit_labels']
if dataset == "DIAN":
rois = list(x.rstrip()[5:] for x in example_output['roi_labels'][0:38])
elif dataset == "ADNI":
rois = list(x.rstrip()[5:] for x in example_output['roi_labels'][0:39])
pup_rois = ["precuneus", "superior frontal", "rostral middle frontal", "lateral orbitofrontal", "medial orbitofrontal",
"superior temporal", "middle temporal"]
composite_roi_list = []
for i,roi in enumerate(example_output['roi_labels']):
for roi2 in pup_rois:
if roi2 in roi.lower():
composite_roi_list.append(i)
sub_epicenter_df = pandas.DataFrame(columns=rois, index=subs)
sub_epicenter_df.loc[:, 'visit_label'] = visit_labels
all_rois = []
for i,f in enumerate(output_files):
mat = loadmat(f)
ref_pattern = mat['ref_pattern']
pred_pattern = mat['model_solutions0']
if dataset == "DIAN":
epicenter_idx = output_files[i].split("/")[-1].split("_")[8].split("-")[1]
elif dataset == "ADNI":
epicenter_idx = output_files[i].split("/")[-1].split("_")[2].split("-")[1]
if epicenter_idx.isdigit():
epicenter_name = rois[int(epicenter_idx)-1]
else:
epicenter_name = epicenter_idx
all_rois.append(epicenter_name)
for i, sub in enumerate(sub_epicenter_df.index):
r,p = stats.pearsonr(ref_pattern[:,i], pred_pattern[:,i])
r2 = r**2
sub_epicenter_df.loc[sub, epicenter_name] = r2
sub_epicenter_df.loc[sub, 'esm_idx'] = i
for i,sub in enumerate(sub_epicenter_df.index):
visit = sub_epicenter_df.loc[sub, "visit_label"]
sub_epicenter_df.loc[sub, "AB_Composite"] = np.mean(ref_pattern[composite_roi_list,i])
if dataset == "DIAN":
sub_epicenter_df.loc[sub, "DIAN_EYO"] = clinical_df[(clinical_df.IMAGID == sub) & (clinical_df.visit == visit)].DIAN_EYO.values[0]
ab_composite_bs = pib_df[(pib_df.IMAGID == sub) & (pib_df.visit == visit)].PIB_fSUVR_TOT_CORTMEAN.values[0] / pib_df[(pib_df.IMAGID == sub) & (pib_df.visit == visit)].PIB_fSUVR_TOT_BRAINSTEM.values[0]
sub_epicenter_df.loc[sub, 'AB_COMPOSITE_SUVR_BS'] = ab_composite_bs
if sub_epicenter_df.loc[sub, "AB_Composite"] > 0.1:
sub_epicenter_df.loc[sub, "AB_Positive"] = True
else:
sub_epicenter_df.loc[sub, "AB_Positive"] = False
if sub_epicenter_df.loc[sub, "AB_COMPOSITE_SUVR_BS"] > 0.79:
sub_epicenter_df.loc[sub, "AB_POSITIVE_SUVR"] = True
else:
sub_epicenter_df.loc[sub, "AB_POSITIVE_SUVR"] = False
sub_epicenter_df.loc[sub, 'mut_type'] = genetic_df[genetic_df.IMAGID == sub].MUTATIONTYPE.values[0]
if sub_epicenter_df.loc[sub, 'mut_type'] == 1:
sub_epicenter_df.loc[sub, 'mut_type_name'] = "PSEN1"
elif sub_epicenter_df.loc[sub, 'mut_type'] == 2:
sub_epicenter_df.loc[sub, 'mut_type_name'] = "PSEN2"
else:
sub_epicenter_df.loc[sub, 'mut_type_name'] = "APP"
sub_epicenter_df.loc[sub, 'CDR'] = clinical_df[(clinical_df.IMAGID == sub) & (clinical_df.visit == visit)].cdrglob.values[0]
for sub in sub_epicenter_df.index:
epicenter_vals = list(sub_epicenter_df.loc[sub, all_rois])
esm_idx = int(sub_epicenter_df.loc[sub, 'esm_idx'])
idx = epicenter_vals.index(max(epicenter_vals))
roi = all_rois[idx]
sub_epicenter_df.loc[sub, "Best_Epicenter"] = roi
sub_epicenter_df.loc[sub, "Best_Epicenter_R2"] = sub_epicenter_df.loc[sub, roi]
best_exp = loadmat(output_files[idx])
sub_epicenter_df.loc[sub, "BETAS_est"] = list(best_exp['BETAS_est'])[esm_idx]
sub_epicenter_df.loc[sub, "DELTAS_est"] = list(best_exp['DELTAS_est'])[esm_idx]
if subs_to_select != None:
subs = subs_to_select
if plot == True:
plt.figure(figsize=(20,10))
g = sns.countplot(sub_epicenter_df.loc[subs, "Best_Epicenter"],
color=color,
order = sub_epicenter_df.loc[subs, "Best_Epicenter"].value_counts().index)
g.set_xticklabels(g.get_xticklabels(), rotation=45, horizontalalignment='right',fontsize=16)
g.set_title(title, fontsize=20)
g.set_xlabel("Epicenter", fontsize=20)
g.set_ylabel("Count", fontsize=20)
g.set_yticklabels(g.get_yticks(), fontsize=16)
plt.show()
plt.close()
return sub_epicenter_df
def group_level_performance(output_files, subs_to_select=None, dataset="DIAN"):
output_files = sorted(output_files)
example_output = loadmat(output_files[0])
visit_labels = example_output['visit_labels']
if dataset == "DIAN":
rois = list(x.rstrip()[5:] for x in example_output['roi_labels'][0:38])
elif dataset == "ADNI":
rois = list(x.rstrip()[5:] for x in example_output['roi_labels'][0:39])
subs = list(example_output['sub_ids'].flatten())
# for i,roi in enumerate(example_output['roi_labels']):
# for roi2 in pup_rois:
# if roi2 in roi.lower():
# composite_roi_list.append(i)
global_performance_dict = {}
if dataset == "DIAN":
num_files = 38
elif dataset == "ADNI":
num_files = 39
for i,f in enumerate(output_files[0:num_files]):
mat = loadmat(f)
ref_pattern_df = pandas.DataFrame(index=subs, columns=example_output['roi_labels'])
ref_pattern_df.loc[:,:] = mat['ref_pattern'].transpose()
pred_pattern_df =
|
pandas.DataFrame(index=subs, columns=example_output['roi_labels'])
|
pandas.DataFrame
|
import datetime
import numpy as np
import pandas as pd
from dateutil.tz import tzutc
from datetime import date
import pytz
from lstm_load_forecasting import entsoe, weather
import json
def load_dataset(path=None, update_date=None, modules=None):
indicator_vars = {'bsl_1':10,'bsl_2':11,'bsl_3':12,'brn_1':13,'brn_2':14,'brn_3':15,'zrh_1':16,'zrh_2':17,
'zrh_3':18,'lug_1':19,'lug_2':20,'lug_3':21,'lau_1':22,'lau_2':23,'lau_3':24,'gen_1':25,
'gen_2':26,'gen_3':27,'stg_1':28,'stg_2':29,'stg_3':30,'luz_1':31,'luz_2':32,'luz_3':33,
'holiday':34,'weekday_0':35,'weekday_1':36,'weekday_2':37,'weekday_3':38,'weekday_4':39,'weekday_5':40,
'weekday_6':41,'hour_0': 42, 'hour_1':42, 'hour_2':44, 'hour_3':45, 'hour_4':46, 'hour_5':47,
'hour_6':48, 'hour_7':49, 'hour_8':50,'hour_9':51, 'hour_10':52, 'hour_11':53, 'hour_12':54,
'hour_13':55, 'hour_14':56, 'hour_15':57, 'hour_16':58,'hour_17':59, 'hour_18':60, 'hour_19':61,
'hour_20':62, 'hour_21':63, 'hour_22':64, 'hour_23':65,'month_1':66, 'month_2':67, 'month_3':68,
'month_4':69, 'month_5':70, 'month_6':71, 'month_7':72, 'month_8':73,'month_9':74, 'month_10':75,
'month_11':76, 'month_12':77}
df = pd.read_csv(path, delimiter=';', parse_dates=[0], index_col = 0)
df[list(indicator_vars.keys())] = df[list(indicator_vars.keys())].astype('int')
df = df.tz_localize('utc')
df = df.sort_index()
if update_date:
last_actual_obs = df['actual'].last_valid_index()
last_obs = df.index[-1]
local_timezone = pytz.timezone('Europe/Zurich')
update_date = local_timezone.localize(update_date)
print('============================================')
if update_date - pd.DateOffset(hours=1) > last_actual_obs:
df, df_n = update_dataset(df, update_date)
df.to_csv('data/fulldataset.csv', sep=';')
print('Updated: {}'.format(df_n.shape))
print('New size: {}'.format(df.shape))
else:
print('Nothing to update')
columns = []
if 'actual' in modules or 'all' in modules:
columns.append('actual')
if 'entsoe' in modules or 'all' in modules:
columns.append('entsoe')
if 'weather' in modules or 'all' in modules:
columns.extend(['bsl_t','brn_t','zrh_t','lug_t','lau_t','gen_t','stg_t','luz_t',
'bsl_1', 'bsl_2','bsl_3','brn_1','brn_2','brn_3','zrh_1','zrh_2','zrh_3','lug_1','lug_2','lug_3',
'lau_1','lau_2','lau_3','gen_1','gen_2','gen_3','stg_1','stg_2','stg_3','luz_1','luz_2','luz_3'])
if 'calendar' in modules or 'all' in modules:
columns.extend(['holiday', 'weekday_0','weekday_1','weekday_2','weekday_3','weekday_4','weekday_5','weekday_6',
'hour_0', 'hour_1', 'hour_2', 'hour_3', 'hour_4', 'hour_5', 'hour_6', 'hour_7', 'hour_8',
'hour_9', 'hour_10', 'hour_11', 'hour_12', 'hour_13', 'hour_14', 'hour_15', 'hour_16',
'hour_17', 'hour_18', 'hour_19', 'hour_20', 'hour_21', 'hour_22', 'hour_23',
'month_1', 'month_2', 'month_3', 'month_4', 'month_5', 'month_6', 'month_7', 'month_8',
'month_9', 'month_10', 'month_11', 'month_12'])
df = df[columns]
return df
def update_dataset(df=None, to_date=None):
# Set up
df = df.sort_index()
last_obs = df.index[-1]
last_actual_obs = df['actual'].last_valid_index()
columns = df.columns
starting = last_actual_obs + pd.DateOffset(hours=1)
ending = to_date
starting = starting.replace(minute=0, second=0)
ending = ending.replace(minute=0, second=0)
fmt = '%Y%m%d%H%M'
starting = starting.tz_convert('utc')
ending = ending.astimezone(pytz.utc) - pd.DateOffset(hours=1)
df_n = pd.DataFrame(index=
|
pd.date_range(starting, ending, freq='60min')
|
pandas.date_range
|
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@
|
Appender(_shared_docs["kurt"])
|
pandas.util._decorators.Appender
|
# coding: utf-8
__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'
import datetime
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
import xgboost as xgb
import random
from operator import itemgetter
import zipfile
from sklearn.metrics import roc_auc_score
import time
random.seed(2016)
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def get_importance(gbm, features):
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=itemgetter(1), reverse=True)
return importance
def intersect(a, b):
return list(set(a) & set(b))
def print_features_importance(imp):
for i in range(len(imp)):
print("# " + str(imp[i][1]))
print('output.remove(\'' + imp[i][0] + '\')')
def run_default_test(train, test, features, target, random_state=0):
eta = 0.1
max_depth = 5
subsample = 0.8
colsample_bytree = 0.8
start_time = time.time()
print('XGBoost params. ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, COLSAMPLE_BY_TREE: {}'.format(eta, max_depth, subsample, colsample_bytree))
params = {
"objective": "binary:logistic",
"booster" : "gbtree",
"eval_metric": "auc",
"eta": eta,
"max_depth": max_depth,
"subsample": subsample,
"colsample_bytree": colsample_bytree,
"silent": 1,
"seed": random_state
}
num_boost_round = 260
early_stopping_rounds = 20
test_size = 0.1
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=True)
print("Validating...")
check = gbm.predict(xgb.DMatrix(X_valid[features]), ntree_limit=gbm.best_ntree_limit)
score = roc_auc_score(X_valid[target].values, check)
print('Check error value: {:.6f}'.format(score))
imp = get_importance(gbm, features)
print('Importance array: ', imp)
print("Predict test set...")
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_ntree_limit)
print('Training time: {} minutes'.format(round((time.time() - start_time)/60, 2)))
return test_prediction.tolist(), score
def create_submission(score, test, prediction):
# Make Submission
now = datetime.datetime.now()
sub_file = 'submission_' + str(score) + '_' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.csv'
print('Writing submission: ', sub_file)
f = open(sub_file, 'w')
f.write('id,probability\n')
total = 0
for id in test['id']:
str1 = str(id) + ',' + str(prediction[total])
str1 += '\n'
total += 1
f.write(str1)
f.close()
# print('Creating zip-file...')
# z = zipfile.ZipFile(sub_file + ".zip", "w", zipfile.ZIP_DEFLATED)
# z.write(sub_file)
# z.close()
def get_features(train, test):
trainval = list(train.columns.values)
testval = list(test.columns.values)
output = intersect(trainval, testval)
output.remove('itemID_1')
output.remove('itemID_2')
return output
def prep_train():
testing = 0
start_time = time.time()
types1 = {
'itemID_1': np.dtype(int),
'itemID_2': np.dtype(int),
'isDuplicate': np.dtype(int),
'generationMethod': np.dtype(int),
}
types2 = {
'itemID': np.dtype(int),
'categoryID': np.dtype(int),
'title': np.dtype(str),
'description': np.dtype(str),
'images_array': np.dtype(str),
'attrsJSON': np.dtype(str),
'price': np.dtype(float),
'locationID': np.dtype(int),
'metroID': np.dtype(float),
'lat': np.dtype(float),
'lon': np.dtype(float),
}
print("Load ItemPairs_train.csv")
pairs = pd.read_csv("../input/ItemPairs_train.csv", dtype=types1)
# Add 'id' column for easy merge
print("Load ItemInfo_train.csv")
items = pd.read_csv("../input/ItemInfo_train.csv", dtype=types2)
items.fillna(-1, inplace=True)
location = pd.read_csv("../input/Location.csv")
category =
|
pd.read_csv("../input/Category.csv")
|
pandas.read_csv
|
from util import plotClusterPairGrid, computeMetric
import numpy as np
from copy import deepcopy
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
#from sklearn.linear_model import LogisticRegression
from selectFeatures import selectFeatures
from sklearn.metrics import silhouette_score
from sklearn.cluster import MeanShift
from scipy.stats import pointbiserialr
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
class Interpreter():
"""
This class builds and interprets the model
"""
def __init__(self,
df_X=None, # the predictors, a pandas dataframe
df_Y=None, # the responses, a pandas dataframe
out_p=None, # the path for saving graphs
use_forest=True, # use random forest or not
n_trees=200, # the number of trees for the forest (in the paper we use 1000)
logger=None):
df_X = deepcopy(df_X)
df_Y = deepcopy(df_Y)
# We need to run the random forest in the unsupervised mode
# Consider the original data as class 1
# Create a synthetic second class of the same size that will be labeled as class 2
# The synthetic class is created by sampling at random from the univariate distributions of the original data
n = len(df_X)
synthetic = {}
for c in df_X.columns:
synthetic[c] = df_X[c].sample(n=n, replace=True).values
df_synthetic =
|
pd.DataFrame(data=synthetic)
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear", downcast="infer")
tm.assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="nearest")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="nearest", downcast="infer")
tm.assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method="zero")
tm.assert_series_equal(result, expected.astype("float"))
result = s.interpolate(method="zero", downcast="infer")
tm.assert_series_equal(result, expected)
# quadratic
# GH #15662.
expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])
result = s.interpolate(method="quadratic")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="quadratic", downcast="infer")
tm.assert_series_equal(result, expected)
# cubic
expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])
result = s.interpolate(method="cubic")
tm.assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("limit", [-1, 0])
def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):
# GH 9217: make sure limit is greater than zero.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
with pytest.raises(ValueError, match="Limit must be greater than 0"):
s.interpolate(limit=limit, method=method, **kwargs)
def test_interpolate_invalid_float_limit(self, nontemporal_method):
# GH 9217: make sure limit is an integer.
s = Series([1, 2, np.nan, 4])
method, kwargs = nontemporal_method
limit = 2.0
with pytest.raises(ValueError, match="Limit must be an integer"):
s.interpolate(limit=limit, method=method, **kwargs)
@pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])
def test_interp_invalid_method(self, invalid_method):
s = Series([1, 3, np.nan, 12, np.nan, 25])
msg = f"method must be one of.* Got '{invalid_method}' instead"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method)
# When an invalid method and invalid limit (such as -1) are
# provided, the error message reflects the invalid method.
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method, limit=-1)
def test_interp_invalid_method_and_value(self):
# GH#36624
ser = Series([1, 3, np.nan, 12, np.nan, 25])
msg = "Cannot pass both fill_value and method"
with pytest.raises(ValueError, match=msg):
ser.interpolate(fill_value=3, method="pad")
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="forward")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")
tm.assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="both")
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])
result = s.interpolate(method="linear", limit_direction="forward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])
result = s.interpolate(method="linear", limit_direction="backward")
tm.assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
r"Invalid limit_direction: expecting one of \['forward', "
r"'backward', 'both'\], got 'abc'"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit=2, limit_direction="abc")
# raises an error even if no limit is specified.
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_direction="abc")
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(method="linear", limit_area="inside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]
)
result = s.interpolate(method="linear", limit_area="inside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="inside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])
result = s.interpolate(method="linear", limit_area="outside")
tm.assert_series_equal(result, expected)
expected = Series(
[np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]
)
result = s.interpolate(method="linear", limit_area="outside", limit=1)
tm.assert_series_equal(result, expected)
expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="both", limit=1
)
tm.assert_series_equal(result, expected)
expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])
result = s.interpolate(
method="linear", limit_area="outside", limit_direction="backward"
)
tm.assert_series_equal(result, expected)
# raises an error even if limit type is wrong.
msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"
with pytest.raises(ValueError, match=msg):
s.interpolate(method="linear", limit_area="abc")
@pytest.mark.parametrize(
"method, limit_direction, expected",
[
("pad", "backward", "forward"),
("ffill", "backward", "forward"),
("backfill", "forward", "backward"),
("bfill", "forward", "backward"),
("pad", "both", "forward"),
("ffill", "both", "forward"),
("backfill", "both", "backward"),
("bfill", "both", "backward"),
],
)
def test_interp_limit_direction_raises(self, method, limit_direction, expected):
# https://github.com/pandas-dev/pandas/pull/34746
s = Series([1, 2, 3])
msg = f"`limit_direction` must be '{expected}' for method `{method}`"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method, limit_direction=limit_direction)
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])
result = s.interpolate(method="linear", limit=2, limit_direction="backward")
tm.assert_series_equal(result, expected)
expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])
result = s.interpolate(method="linear", limit=1, limit_direction="both")
tm.assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])
expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0])
result = s.interpolate(method="linear", limit=2, limit_direction="both")
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
from pandas import DataFrame
import typing
from .normalise_data_frame import normalise_data_frame
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from .get_columns_labels import get_columns_labels
class KMeansAnalysisResult():
labels_mapping: DataFrame = None
labels_mapping_labels: typing.List[typing.List[str]] = None
centroids: DataFrame = None
def __init__(self, labels_mapping: DataFrame, labels_mapping_labels: typing.List[typing.List[str]], centroids: DataFrame):
self.labels_mapping = labels_mapping
self.labels_mapping_labels = labels_mapping_labels
self.centroids = centroids
def do_kmeans_analysis(data_frame: DataFrame, clusters_number: int) -> KMeansAnalysisResult:
k_means: KMeans = None
if clusters_number > 0:
k_means = KMeans(n_clusters=clusters_number)
else:
k_means = KMeans()
labels_mapping = normalise_data_frame(data_frame.iloc[:, : 3])
columns = labels_mapping.columns
k_means.fit_transform(labels_mapping)
labels_mapping['Label'] = MinMaxScaler().fit_transform(k_means.labels_.reshape(-1,1))
centroids =
|
DataFrame(data=k_means.cluster_centers_, columns=columns)
|
pandas.DataFrame
|
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
from numpy.testing import assert_allclose, assert_almost_equal
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from .. import timeseries
from .. import utils
DECIMAL_PLACES = 8
class TestDrawdown(TestCase):
drawdown_list = np.array(
[100, 90, 75]
) / 10.
dt = pd.date_range('2000-1-3', periods=3, freq='D')
drawdown_serie = pd.Series(drawdown_list, index=dt)
@parameterized.expand([
(drawdown_serie,)
])
def test_get_max_drawdown_begins_first_day(self, px):
rets = px.pct_change()
drawdowns = timeseries.gen_drawdown_table(rets, top=1)
self.assertEqual(drawdowns.loc[0, 'net drawdown in %'], 25)
drawdown_list = np.array(
[100, 110, 120, 150, 180, 200, 100, 120,
160, 180, 200, 300, 400, 500, 600, 800,
900, 1000, 650, 600]
) / 10.
dt = pd.date_range('2000-1-3', periods=20, freq='D')
drawdown_serie = pd.Series(drawdown_list, index=dt)
@parameterized.expand([
(drawdown_serie,
pd.Timestamp('2000-01-08'),
pd.Timestamp('2000-01-09'),
pd.Timestamp('2000-01-13'),
50,
pd.Timestamp('2000-01-20'),
pd.Timestamp('2000-01-22'),
None,
40
)
])
def test_gen_drawdown_table_relative(
self, px,
first_expected_peak, first_expected_valley,
first_expected_recovery, first_net_drawdown,
second_expected_peak, second_expected_valley,
second_expected_recovery, second_net_drawdown
):
rets = px.pct_change()
drawdowns = timeseries.gen_drawdown_table(rets, top=2)
self.assertEqual(np.round(drawdowns.loc[0, 'net drawdown in %']),
first_net_drawdown)
self.assertEqual(drawdowns.loc[0, 'peak date'],
first_expected_peak)
self.assertEqual(drawdowns.loc[0, 'valley date'],
first_expected_valley)
self.assertEqual(drawdowns.loc[0, 'recovery date'],
first_expected_recovery)
self.assertEqual(np.round(drawdowns.loc[1, 'net drawdown in %']),
second_net_drawdown)
self.assertEqual(drawdowns.loc[1, 'peak date'],
second_expected_peak)
self.assertEqual(drawdowns.loc[1, 'valley date'],
second_expected_valley)
self.assertTrue(pd.isnull(drawdowns.loc[1, 'recovery date']))
px_list_1 = np.array(
[100, 120, 100, 80, 70, 110, 180, 150]) / 100. # Simple
px_list_2 = np.array(
[100, 120, 100, 80, 70, 80, 90, 90]) / 100. # Ends in drawdown
dt = pd.date_range('2000-1-3', periods=8, freq='D')
@parameterized.expand([
(pd.Series(px_list_1,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
pd.Timestamp('2000-1-9')),
(pd.Series(px_list_2,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
None)
])
def test_get_max_drawdown(
self, px, expected_peak, expected_valley, expected_recovery):
rets = px.pct_change().iloc[1:]
peak, valley, recovery = timeseries.get_max_drawdown(rets)
# Need to use isnull because the result can be NaN, NaT, etc.
self.assertTrue(
pd.isnull(peak)) if expected_peak is None else self.assertEqual(
peak,
expected_peak)
self.assertTrue(
pd.isnull(valley)) if expected_valley is None else \
self.assertEqual(
valley,
expected_valley)
self.assertTrue(
|
pd.isnull(recovery)
|
pandas.isnull
|
# -*- coding: UTF-8 -*-
"""
This module contains functions for calculating evaluation metrics for the generated service recommendations.
"""
import numpy
import pandas
runtime_metrics = ["Training time", "Overall testing time", "Individual testing time"]
quality_metrics = ["Recall", "Precision", "F1", "# of recommendations"]
def results_as_dataframe(user_actions, recommendations):
"""
Converts the recommendation results into a pandas dataframe for easier evaluation.
@param user_actions: A list of the actually performed user actions.
@param recommendations: For each of the performed actions the list of calculated service recommendations.
@return: A pandas dataframe that has as index the performed user actions (there is one row per action). The first
column contains for each action the highest scoring recommendation, the second column contains the second best
recommendation etc.
"""
results = pandas.DataFrame(recommendations, index=pandas.Index(user_actions, name="Actual action"))
results.columns = [(r+1) for r in range(len(results.columns))]
return results
class QualityMetricsCalculator():
"""
This is a utility class that contains a number of methods for calculating overall quality metrics for the produced
recommendations. In general these methods produce pandas dataframes with several rows, where each row corresponds
to one "cutoff" point. For example, a cutoff "4" means that the system cuts the number of recommendations at four,
i.e. the user is shown at most four recommendations. If some post-processing method was used (e.g. show fewer
recommendations if the recommendation conflict is low), then it can happen that fewer than four recommendations
are shown. For reference, the column "# of recommendations" lists the average of the number of recommendations that
were actually shown to the user.
"""
def __init__(self, actual_actions, recommendations):
"""
Initialize the calculation of the quality metrics..
@param actual_actions: A list of strings, each representing one actual user action.
@param recommendations: A list of lists of strings with the same length as actual_actions. Each list of
strings contains the calculated recommendations for the corresponding actual user action.
@return:
"""
self.results = results_as_dataframe(actual_actions, recommendations)
def __unique_actions__(self):
"""
It can happen that one potential user action never happened, but that the corresponding service was recommended.
To be able to count these false positives, we must calculate the list of all potential actions.
"""
occurring_actions = set(self.results.index.values)
occurring_services = pandas.melt(self.results).dropna()["value"]
occurring_services = set(occurring_services.unique())
return sorted(occurring_actions | occurring_services)
def true_positives(self, action):
"""
Counts how often the given action was recommended correctly (true positives, TP).
@param action: The name of the user action for which to count true positives.
@return: A pandas dataset with column TP and several rows, first row lists #TP at cutoff "1", the second row at
cutoff "2", etc.
"""
#get all rows where the actual action corresponds to the given action
r = self.results[self.results.index == action]
if len(r) == 0:
#if there are no such rows, then we have zero true positives, fill result dataframe with zeroes
true_positives = pandas.Series(0.0, index=self.results.columns)
else:
#if recommendation matches the action, set column to "1" (true positive), else set to "0" (false negative)
r = r.applymap(lambda col: 1 if col == action else 0).fillna(0)
#count how many true positives there are in each column
r = r.sum()
#if have a true positive for n-th recommendation, then also have true positive for n+1, n+2 etc
#-> calculate cumulative sum
true_positives = r.cumsum(axis=0).apply(float)
true_positives = pandas.DataFrame(true_positives, columns=["TP"])
true_positives.index.name = "cutoff"
return true_positives
def true_positives_for_all(self):
"""
Create a matrix that contains information about true positives for all possible actions.
@return: A pandas with one column for each action, first row lists #TP at cutoff "1", the second row at
cutoff "2", etc.
"""
tp = [self.true_positives(action) for action in self.__unique_actions__()]
tp = pandas.concat(tp, axis=1)
tp.columns = self.__unique_actions__()
return tp
def false_negatives(self, action):
"""
Counts how often the given action was not recommended correctly (false negatives, FN).
@param action: The name of the user action for which to count false negatives.
@return: A pandas dataset with column FN and several rows, first row lists #FN cutoff "1", the second row at
cutoff "2", etc.
"""
#the amount of false negatives corresponds to the difference between the total number of occurrences of the
#action and the number of false positives
true_positives = self.true_positives(action)
total_occurrences = len(self.results[self.results.index == action])
total_occurrences = pandas.Series(total_occurrences, index=true_positives.index)
false_negatives = total_occurrences - true_positives["TP"]
false_negatives = pandas.DataFrame(false_negatives, columns=["FN"])
false_negatives.index.name = "cutoff"
return false_negatives
def false_positives(self, action):
"""
Counts how often the given action was recommended even though it didn't occur (false positives, FP).
@param action: The name of the user action for which to count false positives.
@return: A pandas dataset with column FP and several rows, first row lists #FP at cutoff "1", the second row at
cutoff "2", etc.
"""
#get all rows where the actual service does NOT correspond to the given action
r = self.results[self.results.index != action]
if len(r) == 0:
#if there are no such rows, then we have zero false positives, fill result dataframe with zeroes
false_positives = pandas.Series(0.0, index=self.results.columns)
else:
#if recommendation matches the action, set column to "1" (false positive), else set to "0" (true negative)
r = r.applymap(lambda col: 1 if col == action else 0)
#count how many false positives there are in each column
r = r.sum()
#if have a false positive for n-th recommendation, then also have false positive for n+1, n+2 etc
#-> calculate cumulative sum
false_positives = r.cumsum(axis=0).apply(float)
false_positives = pandas.DataFrame(false_positives, columns=["FP"])
false_positives.index.name = "cutoff"
return false_positives
@staticmethod
def precision(counts):
"""
Calculate the precision as (true positives)/(true positives + false positives).
@param counts: A dataframe that contains a column "TP" with true positives and "FP" with false positives.
@return: A pandas dataframe with one column "Precision". The first row lists the achieved precision at cutoff
"1", the second row at cutoff "2", etc.
"""
p = counts["TP"]/(counts["TP"] + counts["FP"])
p = pandas.DataFrame({"Precision": p}).fillna(0.0)
return p
@staticmethod
def recall(counts):
"""
Calculate the recall as (true positives)/(true positives + false negatives).
@param counts: A dataframe that contains a column "TP" with true positives and "FN" with false negatives.
@return: A pandas dataframe with one column "Recall". The first row lists the achieved recall at cutoff "1",
the second row at cutoff "2", etc.
"""
p = counts["TP"]/(counts["TP"] + counts["FN"])
p = pandas.DataFrame({"Recall": p}).fillna(0.0)
return p
@staticmethod
def f1(metrics):
"""
Calculate the F1 as the harmonic mean of precision and recall.
@param metrics: A dataframe with a column "Precision" and a column "Recall"
@return: A pandas dataframe with one column "F1". The first row lists the achieved F1 at cutoff "1", the second
row at cutoff "2", etc.
"""
f = (2.0*metrics["Precision"]*metrics["Recall"]) / (metrics["Precision"]+metrics["Recall"])
f = pandas.DataFrame({"F1": f}).fillna(0.0)
return f
def number_of_recommendations(self):
"""
Count how many recommendations the user was actually shown (e.g. when using a dynamic cutoff such as "show
less recommendations when recommendation conflict is low").Number of recommendation is not an quality metric
but fits here conceptually.
@return: A pandas dataframe with one column "# of recommendations". The first row lists the # at cutoff "1", the
second row at cutoff "2", etc.
"""
n = (self.results.count(axis=0)/float(len(self.results))).cumsum()
n = pandas.DataFrame({"# of recommendations": n})
n.index.name = "cutoff"
return n
def calculate_for_action(self, action):
"""
Calculate precision, recall and F1 for one action (= one possible user action)
@param action: Which user action to calculate the metrics for.
@return: A pandas dataframe containing columns for "Precision", "Recall", "F1". The first row lists
calculated metrics at cutoff "1", the second row at cutoff "2", etc. A fourth column "action" simply lists the
action name in all rows, this column is necessary for later merging the metrics of all actions.
"""
#count how many true positives, false positives and false negatives occurred for this action
counts = pandas.concat([self.true_positives(action),
self.false_negatives(action),
self.false_positives(action)],
axis=1)
#use these counts to calculate the relevant metrics
metrics = pandas.concat([self.precision(counts),
self.recall(counts)],
axis=1)
metrics["F1"] = self.f1(metrics)["F1"]
#add column that contains name of the action in all rows, to prepare for merging the metrics for all actions
metrics["action"] = pandas.Series(action, index=metrics.index)
return metrics
def calculate(self):
"""
Performs the actual calculation of the weighted average of precision, recall and F1 over all actions and counts
the number of recommendations that where actually shown to the user.
@return: A pandas dataframe containing one column for each of the four quality metrics. The first row lists
calculated metrics at cutoff "1", the second row at cutoff "2"
"""
#make one big matrix with the metrics for all actions
actions = self.__unique_actions__()
metrics = pandas.concat([self.calculate_for_action(action) for action in actions])
#count for each action how often the corresponding action actually occurred
occurrences =
|
pandas.TimeSeries(self.results.index.values)
|
pandas.TimeSeries
|
"""Tests for cell_img.common.df_style."""
from absl.testing import absltest
from cell_img.common import df_style
import numpy as np
import numpy.testing as npt
import pandas as pd
class DfStyleTest(absltest.TestCase):
def testColorizerWithInts(self):
values = [1, 2, 3, 4]
c = df_style.make_colorizer_from_series(pd.Series(values))
for v in values:
self.assertTrue(c(v).startswith('background-color: #'))
def testColorizerWithStrings(self):
values = ['a', 'b', 'cc', 'd']
c = df_style.make_colorizer_from_series(
|
pd.Series(values)
|
pandas.Series
|
import inspect
import numpy as np
from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import ABCSeries
def frame_apply(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if
|
is_list_like(self.f)
|
pandas.core.dtypes.common.is_list_like
|
import numpy as np
import pandas as pd
import sqlalchemy as sa
from dask.dataframe import from_delayed, from_pandas
from dask.delayed import delayed
from sqlalchemy import create_engine
from sqlalchemy import sql
from sqlalchemy.sql import elements
# Optimus plays defensive with the number of rows to be retrieved from the server so if a limit is not specified it will
# only will retrieve the LIMIT value
from optimus.engines.dask.dataframe import DaskDataFrame
from optimus.engines.base.contants import NUM_PARTITIONS, LIMIT_TABLE
from optimus.engines.base.io.driver_context import DriverContext
from optimus.engines.base.io.factory import DriverFactory
from optimus.engines.spark.io.properties import DriverProperties
from optimus.helpers.core import val_to_list
from optimus.helpers.logger import logger
from optimus.helpers.raiseit import RaiseIt
class DaskBaseJDBC:
"""
Helper for JDBC connections and queries
"""
def __init__(self, host, database, user, password, port=None, driver=None, schema="public", oracle_tns=None,
oracle_service_name=None, oracle_sid=None, presto_catalog=None, cassandra_keyspace=None,
cassandra_table=None, bigquery_project=None, bigquery_dataset=None):
"""
Create the JDBC connection object
:return:
"""
if host is None:
host = "127.0.0.1"
# RaiseIt.value_error(host, "host must be specified")
if user is None:
user = "root"
# print("user",user)
# RaiseIt.value_error(user, "user must be specified")
if database is None:
database = ""
self.db_driver = driver
self.oracle_sid = oracle_sid
self.cassandra_keyspace = cassandra_keyspace
self.cassandra_table = cassandra_table
self.driver_context = DriverContext(DriverFactory.get(self.db_driver))
self.driver_properties = self.driver_context.properties()
if port is None:
self.port = self.driver_properties.value["port"]
else:
self.port = port
self.driver_option = self.driver_properties.value["java_class"]
self.uri = self.driver_context.uri(
user=user,
password=password,
driver=driver,
host=host,
port=str(self.port),
database=database,
schema=schema,
oracle_tns=oracle_tns,
oracle_sid=oracle_sid,
oracle_service_name=oracle_service_name,
presto_catalog=presto_catalog,
bigquery_project=bigquery_project,
bigquery_dataset=bigquery_dataset
)
self.database = database
self.user = user
self.password = password
self.schema = schema
print(self.uri)
logger.print(self.uri)
def tables(self, schema=None, database=None, limit=None):
"""
Return all the tables in a database
:return:
"""
# Override the schema used in the constructor
# if database is None:
# database = self.database
#
# if schema is None:
# schema = self.schema
# query = self.driver_context.table_names_query(schema=schema, database=database)
# df = self.execute(query, limit)
# return df.display(limit)
engine = create_engine(self.uri)
return engine.table_names()
@property
def table(self):
"""
Print n rows of every table in a database
:return: Table Object
"""
return Table(self)
def table_to_df(self, table_name: str, columns="*", limit=None):
"""
Return cols as Spark data frames from a specific table
:type table_name: object
:param columns:
:param limit: how many rows will be retrieved
"""
db_table = table_name
if limit == "all":
query = self.driver_context.count_query(db_table=db_table)
count = self.execute(query, "all").first()[0]
# We want to count the number of rows to warn the users how much it can take to bring the whole data
print(str(int(count)) + " rows")
if columns == "*":
columns_sql = "*"
else:
columns = val_to_list(columns)
columns_sql = ",".join(columns)
query = "SELECT " + columns_sql + " FROM " + db_table
logger.print(query)
dfd = self.execute(query, limit)
# Bring the data to local machine if not every time we call an action is going to be
# retrieved from the remote server
# dfd = dfd.run()
# dfd = dask_pandas_to_dask_cudf(dfd)
return DaskDataFrame(dfd)
def execute(self, query, limit=None, num_partitions: int = NUM_PARTITIONS, partition_column: str = None,
table_name=None):
"""
Execute a SQL query
:param limit: default limit the whole query. We play defensive here in case the result is a big chunk of data
:param num_partitions:
:param partition_column:
:param query: SQL query string
:param table_name:
:return:
"""
# play defensive with a select clause
# if self.db_driver == DriverProperties.ORACLE.value["name"]:
# query = "(" + query + ") t"
# elif self.db_driver == DriverProperties.PRESTO.value["name"]:
# query = "(" + query + ")"
# elif self.db_driver == DriverProperties.CASSANDRA.value["name"]:
# query = query
# else:
# query = "(" + query + ") AS t"
# df = dd.read_sql_table(table='test_data', uri=self.url, index_col='id')
# "SELECT table_name, table_rows FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'optimus'"
df = DaskBaseJDBC.read_sql_table(table_name=table_name, uri=self.uri, index_col=partition_column,
npartitions=num_partitions, query=query)
# print(len(df))
# conf = Spark.instance.spark.read \
# .format(
# "jdbc" if not self.db_driver == DriverProperties.CASSANDRA.value["name"] else
# DriverProperties.CASSANDRA.value["java_class"]) \
# .option("url", self.url) \
# .option("user", self.user) \
# .option("dbtable", query)
#
# # Password
# if self.db_driver != DriverProperties.PRESTO.value["name"] and self.password is not None:
# conf.option("password", self.password)
#
# # Driver
# if self.db_driver == DriverProperties.ORACLE.value["name"] \
# or self.db_driver == DriverProperties.POSTGRESQL.value["name"] \
# or self.db_driver == DriverProperties.PRESTO.value["name"]:
# conf.option("driver", self.driver_option)
#
# if self.db_driver == DriverProperties.CASSANDRA.value["name"]:
# conf.options(table=self.cassandra_table, keyspace=self.cassandra_keyspace)
# return self._limit(conf.load(), limit)
return df
@staticmethod
def read_sql_table(
table_name,
uri,
index_col=None,
divisions=None,
npartitions=None,
limits=None,
columns=None,
bytes_per_chunk=256 * 2 ** 20,
head_rows=5,
schema=None,
meta=None,
engine_kwargs=None,
query=None,
**kwargs
):
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
engine = sa.create_engine(uri, **engine_kwargs)
m = sa.MetaData()
# print("table", table)
if isinstance(table_name, str):
table_name = sa.Table(table_name, m, autoload=True, autoload_with=engine, schema=schema)
columns = (
[(table_name.columns[c] if isinstance(c, str) else c) for c in columns]
if columns
else list(table_name.columns)
)
index = None
if index_col:
# raise ValueError("Must specify index column to partition on")
# else:
index = table_name.columns[index_col] if isinstance(index_col, str) else index_col
if not isinstance(index_col, (str, elements.Label)):
raise ValueError(
"Use label when passing an SQLAlchemy instance as the index (%s)" % index
)
if divisions and npartitions:
raise TypeError("Must supply either divisions or npartitions, not both")
if index_col not in columns:
columns.append(
table_name.columns[index_col] if isinstance(index_col, str) else index_col
)
if isinstance(index_col, str):
kwargs["index_col"] = index_col
else:
# function names get pandas auto-named
kwargs["index_col"] = index_col.name
parts = []
if meta is None:
# derive metadata from first few rows
# q = sql.select(columns).limit(head_rows).select_from(table)
head = pd.read_sql(query, engine, **kwargs)
# print("head", head)
# print("META", head.iloc[:0])
if head.empty:
# no results at all
# name = table_name.name
# schema = table_name.schema
# head = pd.read_sql_table(name, uri, schema=schema, index_col=index_col)
return from_pandas(head, npartitions=1)
bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / head_rows
meta = head.iloc[:0]
# print(list(head.columns.values))
else:
if divisions is None and npartitions is None:
raise ValueError(
"Must provide divisions or npartitions when using explicit meta."
)
# if divisions is None and index_col is not None:
if divisions is None:
# print(index)
# print("LIMITS",limits)
if index is not None and limits is None:
# calculate max and min for given index
q = sql.select([sql.func.max(index), sql.func.min(index)]).select_from(
table_name
)
minmax = pd.read_sql(q, engine)
maxi, mini = minmax.iloc[0]
dtype = minmax.dtypes["max_1"]
elif index is None and npartitions:
# User for Limit offset
mini = 0
q = f"SELECT COUNT(*) AS count FROM ({query}) AS query"
maxi = pd.read_sql(q, engine)["count"][0]
limit = maxi / npartitions
dtype = pd.Series((mini, maxi,)).dtype
# Use for ntile calculation
# mini = 0
# maxi = npartitions
# print(pd.Series((mini, maxi,)))
# dtype = pd.Series((mini, maxi,)).dtype
# ntile_columns = ", ".join(list(head.columns.values))
else:
mini, maxi = limits
dtype = pd.Series(limits).dtype
if npartitions is None:
q = sql.select([sql.func.count(index)]).select_from(table_name)
count = pd.read_sql(q, engine)["count_1"][0]
npartitions = int(round(count * bytes_per_row / bytes_per_chunk)) or 1
if dtype.kind == "M":
divisions = pd.date_range(
start=mini,
end=maxi,
freq="%iS" % ((maxi - mini).total_seconds() / npartitions),
).tolist()
divisions[0] = mini
divisions[-1] = maxi
elif dtype.kind in ["i", "u", "f"]:
divisions = np.linspace(mini, maxi, npartitions + 1).tolist()
# else:
# print(dtype)
# raise TypeError(
# 'Provided index column is of type "{}". If divisions is not provided the '
# "index column type must be numeric or datetime.".format(dtype)
# )
lowers, uppers = divisions[:-1], divisions[1:]
for i, (lower, upper) in enumerate(zip(lowers, uppers)):
if index_col:
if i == len(lowers) - 1:
where = f" WHERE {index} > {lower} AND {index} <= {upper}"
else:
where = f" WHERE {index} >= {lower} AND {index} < {upper}"
q = query + where
else:
if i == len(lowers) - 1:
where = f" LIMIT {int(upper) - int(lower)} OFFSET {int(lower)}"
else:
where = f" LIMIT {int(limit)} OFFSET {int(lower)}"
q = query + where
# Ntile calculation
# print("Using Ntile query")
# ntile_column = "temp"
# ntile_sql = f"SELECT *, NTILE({npartitions}) OVER(ORDER BY id DESC) AS {ntile_column} FROM ({query}) AS t";
# q = f"SELECT {ntile_columns} FROM ({ntile_sql}) AS r"
# if i == len(lowers) - 1:
# where = f" WHERE {ntile_column} > {lower} AND {ntile_column} <= {upper}"
# else:
# where = f" WHERE {ntile_column} >= {lower} AND {ntile_column} < {upper}"
# q = q + where
# table = "test_data"
# q = f'SELECT {ntile_columns} FROM {table} WHERE NTILE({npartitions}) OVER (ORDER BY {ntile_column}) = i'
# When we do not have and index
parts.append(
delayed(DaskBaseJDBC._read_sql_chunk)(
q, uri, meta, engine_kwargs=engine_kwargs, **kwargs
)
)
else:
# JDBC._read_sql_chunk(q, uri, meta, engine_kwargs=engine_kwargs, **kwargs)
parts.append(
delayed(DaskBaseJDBC._read_sql_chunk)(
query, uri, meta, engine_kwargs=engine_kwargs, **kwargs
)
)
engine.dispose()
return from_delayed(parts, meta, divisions=divisions)
@staticmethod
def _read_sql_chunk(q, uri, meta, engine_kwargs=None, **kwargs):
import sqlalchemy as sa
engine_kwargs = engine_kwargs or {}
engine = sa.create_engine(uri, **engine_kwargs)
df =
|
pd.read_sql(q, engine, **kwargs)
|
pandas.read_sql
|
from rpyc import Service
from rpyc.utils.server import ThreadedServer
from common import *
import os
import math
import pandas as pd
import numpy as np
class NameNode(Service):
def __init__(self):
super().__init__()
def exposed_format(self):
format_command = 'rm -rf {}'.format(name_node_dir)
os.system(format_command)
return 'format successfully at {}'.format(name_node_dir)
def exposed_new_fat_item(self, dfs_path, file_size):
blk_nums = int(math.ceil(file_size / dfs_blk_size))
data_pd = pd.DataFrame(columns=['blk_no', 'host_name', 'blk_size'])
idx = 0
for i in range(blk_nums):
blk_no = i
hosts_name = np.random.choice(host_lists, dfs_replication, replace = True)
print(hosts_name)
blk_size = min(dfs_blk_size, file_size - i * dfs_blk_size)
for host_name in hosts_name:
data_pd.loc[idx] = [blk_no, host_name, blk_size]
idx = idx + 1
local_path = os.path.join(name_node_dir, dfs_path)
os.system(' mkdir -p {}'.format(os.path.dirname(local_path)))
data_pd.to_csv(local_path, index=False)
return data_pd.to_csv(index=False)
def exposed_rm_fat_item(self, dfs_path):
local_path = os.path.join(name_node_dir, dfs_path)
response = pd.read_csv(local_path)
os.system('rm ' + local_path)
return response.to_csv(index=False)
def exposed_get_fat_item(self, dfs_path):
local_path = os.path.join(name_node_dir, dfs_path)
response = pd.read_csv(local_path)
return response.to_csv(index=False)
def exposed_ls(self, dfs_path):
local_path = os.path.join(name_node_dir, dfs_path)
if not os.path.exists(local_path):
response = 'No such file or directory at {}'.format(dfs_path)
elif os.path.isdir(local_path):
files = os.listdir(local_path)
response = str(files)
else:
response =
|
pd.read_csv(local_path)
|
pandas.read_csv
|
#!/usr/bin/env python
"""
#########################################################################################
#
# This function allows to run a function on a large dataset with a set of parameters.
# Results are extracted and saved in a way that they can easily be compared with another set.
#
# Data should be organized as the following:
# (names of images can be changed but must be passed as parameters to this function)
#
# data/
# ......subject_name_01/
# ......subject_name_02/
# .................t1/
# .........................subject_02_anything_t1.nii.gz
# .........................some_landmarks_of_vertebral_levels.nii.gz
# .........................subject_02_manual_segmentation_t1.nii.gz
# .................t2/
# .........................subject_02_anything_t2.nii.gz
# .........................some_landmarks_of_vertebral_levels.nii.gz
# .........................subject_02_manual_segmentation_t2.nii.gz
# .................t2star/
# .........................subject_02_anything_t2star.nii.gz
# .........................subject_02_manual_segmentation_t2star.nii.gz
# ......subject_name_03/
# .
# .
# .
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: <NAME>, <NAME>
# Modified: 2015-09-30
#
# About the license: see the file LICENSE.TXT
#########################################################################################
usage:
sct_pipeline -f sct_a_tool -d /path/to/data/ -p \" sct_a_tool option \" -cpu-nb 8
"""
# TODO: remove compute duration which is now replaced with results.duration
# TODO: create a dictionnary for param, such that results can display reduced param instead of full. Example: -param t1="blablabla",t2="blablabla"
# TODO: read_database: hard coded fields to put somewhere else (e.g. config file)
from __future__ import print_function, absolute_import
import sys, io, os, types, copy, copy_reg, time, itertools, glob, importlib, pickle
import platform
import signal
path_script = os.path.dirname(__file__)
sys.path.append(os.path.join(sct.__sct_dir__, 'testing'))
import concurrent.futures
if "SCT_MPI_MODE" in os.environ:
from mpi4py.futures import MPIPoolExecutor as PoolExecutor
__MPI__ = True
sys.path.insert(0, path_script)
else:
from concurrent.futures import ProcessPoolExecutor as PoolExecutor
__MPI__ = False
from multiprocessing import cpu_count
import numpy as np
import h5py
import pandas as pd
import sct_utils as sct
import spinalcordtoolbox.utils as utils
import msct_parser
import sct_testing
def _pickle_method(method):
"""
Author: <NAME> (author of argparse)
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
cls_name = ''
if func_name.startswith('__') and not func_name.endswith('__'):
cls_name = cls.__name__.lstrip('_')
if cls_name:
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Author: <NAME>
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def generate_data_list(folder_dataset, verbose=1):
"""
Construction of the data list from the data set
This function return a list of directory (in folder_dataset) in which the contrast is present.
:return data:
"""
list_subj = []
# each directory in folder_dataset should be a directory of a subject
for subject_dir in os.listdir(folder_dataset):
if not subject_dir.startswith('.') and os.path.isdir(os.path.join(folder_dataset, subject_dir)):
# data_subjects.append(os.path.join(folder_dataset, subject_dir))
list_subj.append(subject_dir)
if not list_subj:
logger.error('ERROR: No subject data were found in ' + folder_dataset + '. '
'Please organize your data correctly or provide a correct dataset.',
verbose=verbose, type='error')
return list_subj
def read_database(folder_dataset, specifications=None, fname_database='', verbose=1):
"""
Read subject database from xls file.
Parameters
----------
folder_dataset: path to database
specifications: field-based specifications for subject selection
fname_database: fname of XLS file that contains database
verbose:
Returns
-------
subj_selected: list of subjects selected
"""
# initialization
subj_selected = []
# if fname_database is empty, check if xls or xlsx file exist in the database directory.
if fname_database == '':
logger.info(' Looking for an XLS file describing the database...')
list_fname_database = glob.glob(os.path.join(folder_dataset, '*.xls*'))
if list_fname_database == []:
logger.warning('WARNING: No XLS file found. Returning empty list.')
return subj_selected
elif len(list_fname_database) > 1:
logger.warning('WARNING: More than one XLS file found. Returning empty list.')
return subj_selected
else:
fname_database = list_fname_database[0]
# sct.printv(' XLS file found: ' + fname_database, verbose)
# read data base file and import to panda data frame
logger.info(' Reading XLS: ' + fname_database, verbose, 'normal')
try:
data_base = pd.read_excel(fname_database)
except:
logger.error('ERROR: File '+fname_database+' cannot be read. Please check format or get help from SCT forum.')
#
# correct some values and clean panda data base
# convert columns to int
to_int = ['gm_model', 'PAM50', 'MS_mapping']
for key in to_int:
data_base[key].fillna(0.0).astype(int)
#
for key in data_base.keys():
# remove 'unnamed' columns
if 'Unnamed' in key:
data_base = data_base.drop(key, axis=1)
# duplicate columns with lower case names and with space in names
else:
data_base[key.lower()] = data_base[key]
data_base['_'.join(key.split(' '))] = data_base[key]
#
## parse specifications
## specification format: "center=unf,twh:pathology=hc:sc_seg=t2"
list_fields = specifications.split(':')
dict_spec = {}
for f in list_fields:
field, value = f.split('=')
dict_spec[field] = value.split(',')
#
## select subjects from specification
# type of field for which the subject should be selected if the field CONTAINS the requested value (as opposed to the field is equal to the requested value)
list_field_multiple_choice = ['contrasts', 'sc seg', 'gm seg', 'lesion seg']
list_field_multiple_choice_tmp = copy.deepcopy(list_field_multiple_choice)
for field in list_field_multiple_choice_tmp:
list_field_multiple_choice.append('_'.join(field.split(' ')))
#
data_selected = copy.deepcopy(data_base)
for field, list_val in dict_spec.items():
if field.lower() not in list_field_multiple_choice:
# select subject if field is equal to the requested value
data_selected = data_selected[data_selected[field].isin(list_val)]
else:
# select subject if field contains the requested value
data_selected = data_selected[data_selected[field].str.contains('|'.join(list_val)).fillna(False)]
#
## retrieve list of subjects from database
database_subj = ['_'.join([str(center), str(study), str(subj)]) for center, study, subj in zip(data_base['Center'], data_base['Study'], data_base['Subject'])]
## retrieve list of subjects from database selected
database_subj_selected = ['_'.join([str(center), str(study), str(subj)]) for center, study, subj in zip(data_selected['Center'], data_selected['Study'], data_selected['Subject'])]
# retrieve folders from folder_database
list_folder_dataset = [i for i in os.listdir(folder_dataset) if os.path.isdir(os.path.join(folder_dataset, i))]
# loop across folders
for ifolder in list_folder_dataset:
# check if folder is listed in database
if ifolder in database_subj:
# check if subject is selected
if ifolder in database_subj_selected:
subj_selected.append(ifolder)
# if not, report to user
else:
logger.warning('WARNING: Subject '+ifolder+' is not listed in the database.', verbose, 'warning')
return subj_selected
# <NAME> 2017-10-21
# def process_results(results, subjects_name, function, folder_dataset):
# try:
# results_dataframe = pd.concat([result for result in results])
# results_dataframe.loc[:, 'subject'] = pd.Series(subjects_name, index=results_dataframe.index)
# results_dataframe.loc[:, 'script'] = pd.Series([function] * len(subjects_name), index=results_dataframe.index)
# results_dataframe.loc[:, 'dataset'] = pd.Series([folder_dataset] * len(subjects_name), index=results_dataframe.index)
# # results_dataframe.loc[:, 'parameters'] = pd.Series([parameters] * len(subjects_name), index=results_dataframe.index)
# return results_dataframe
# except KeyboardInterrupt:
# return 'KeyboardException'
# except Exception as e:
# logger.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
# logger.exception(e)
# raise
def function_launcher(args):
# append local script to PYTHONPATH for import
sys.path.append(os.path.join(sct.__sct_dir__, "testing"))
# retrieve param class from sct_testing
param_testing = sct_testing.Param()
param_testing.function_to_test = args[0]
param_testing.path_data = args[1]
param_testing.args = args[2]
param_testing.test_integrity = args[3]
param_testing.redirect_stdout = True # create individual logs for each subject.
# load modules of function to test
module_testing = importlib.import_module('test_' + param_testing.function_to_test)
# initialize parameters specific to the test
param_testing = module_testing.init(param_testing)
try:
param_testing = sct_testing.test_function(param_testing)
except:
import traceback
logger.error('%s: %s' % ('test_' + args[0], traceback.format_exc()))
# output = (1, 'ERROR: Function crashed', 'No result')
from pandas import DataFrame
# TODO: CHECK IF ASSIGNING INDEX WITH SUBJECT IS NECESSARY
param_testing.results = DataFrame(index=[''], data={'status': int(1), 'output': 'ERROR: Function crashed.'})
# status_script = 1
# output_script = 'ERROR: Function crashed.'
# output = (status_script, output_script, DataFrame(data={'status': int(status_script), 'output': output_script}, index=['']))
# TODO: THE THING BELOW: IMPLEMENT INSIDE SCT_TESTING SUB-FUNCTION
# sys.stdout.close()
# sys.stdout = stdout_orig
# # write log file
# write_to_log_file(fname_log, output, mode='r+', prepend=True)
return param_testing.results
# return param_testing.results
# return script_to_be_run.test(*args[1:])
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def get_list_subj(folder_dataset, data_specifications=None, fname_database=''):
"""
Generate list of eligible subjects from folder and file containing database
Parameters
----------
folder_dataset: path to database
data_specifications: field-based specifications for subject selection
fname_database: fname of XLS file that contains database
Returns
-------
list_subj: list of subjects
"""
if data_specifications is None:
list_subj = generate_data_list(folder_dataset)
else:
logger.info('Selecting subjects using the following specifications: ' + data_specifications)
list_subj = read_database(folder_dataset, specifications=data_specifications, fname_database=fname_database)
# logger.info(' Total number of subjects: ' + str(len(list_subj)))
# if no subject to process, raise exception
if len(list_subj) == 0:
raise Exception('No subject to process. Exit function.')
return list_subj
def run_function(function, folder_dataset, list_subj, list_args=[], nb_cpu=None, verbose=1, test_integrity=0):
"""
Run a test function on the dataset using multiprocessing and save the results
:return: results
# results are organized as the following: tuple of (status, output, DataFrame with results)
"""
# add full path to each subject
list_subj_path = [os.path.join(folder_dataset, subject) for subject in list_subj]
# All scripts that are using multithreading with ITK must not use it when using multiprocessing
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "1"
# create list that finds all the combinations for function + subject path + arguments. Example of one list element:
# ('sct_propseg', os.path.join(path_sct, 'data', 'sct_test_function', '200_005_s2''), '-i ' + os.path.join("t2", "t2.nii.gz") + ' -c t2', 1)
list_func_subj_args = list(itertools.product(*[[function], list_subj_path, list_args, [test_integrity]]))
# data_and_params = itertools.izip(itertools.repeat(function), data_subjects, itertools.repeat(parameters))
logger.debug("stating pool with {} thread(s)".format(nb_cpu))
pool = PoolExecutor(nb_cpu)
compute_time = None
try:
compute_time = time.time()
count = 0
all_results = []
# logger.info('Waiting for results, be patient')
future_dirs = {pool.submit(function_launcher, subject_arg): subject_arg
for subject_arg in list_func_subj_args}
for future in concurrent.futures.as_completed(future_dirs):
count += 1
subject = os.path.basename(future_dirs[future][1])
arguments = future_dirs[future][2]
try:
result = future.result()
sct.no_new_line_log('Processing subjects... {}/{}'.format(count, len(list_func_subj_args)))
all_results.append(result)
except Exception as exc:
logger.error('{} {} generated an exception: {}'.format(subject, arguments, exc))
compute_time = time.time() - compute_time
# concatenate all_results into single Panda structure
results_dataframe = pd.concat(all_results)
except KeyboardInterrupt:
logger.warning("\nCaught KeyboardInterrupt, terminating workers")
for job in future_dirs:
job.cancel()
except Exception as e:
logger.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
logger.exception(e)
for job in future_dirs:
job.cancel()
raise
finally:
pool.shutdown()
return {'results': results_dataframe, "compute_time": compute_time}
def get_parser():
# Initialize parser
parser = msct_parser.Parser(__file__)
# Mandatory arguments
parser.usage.set_description("Run a specific SCT function in a list of subjects contained within a given folder. "
"Multiple parameters can be selected by repeating the flag -p as shown in the example below:\n"
"sct_pipeline -f sct_propseg -d PATH_TO_DATA -p \\\"-i t1/t1.nii.gz -c t1\\\" -p \\\"-i t2/t2.nii.gz -c t2\\\"")
parser.add_option(name="-f",
type_value="str",
description="Function to test.",
mandatory=True,
example="sct_propseg")
parser.add_option(name="-d",
type_value="folder",
description="Dataset directory.",
mandatory=True,
example="dataset_full/")
parser.add_option(name="-p",
type_value="str",
description="Arguments to pass to the function that is tested. Put double-quotes if there are "
"spaces in the list of parameters. Path to images are relative to the subject's folder. "
"Use multiple '-p' flags if you would like to test different parameters on the same"
"subjects.",
mandatory=False)
parser.add_option(name="-subj",
type_value="str",
description="Choose the subjects to process based on center, study, [...] to select the testing dataset\n"
"Syntax: field_1=val1,val2:field_2=val3:field_3=val4,val5",
example="center=unf,twh:gm_model=0:contrasts=t2,t2s",
mandatory=False)
parser.add_option(name="-subj-file",
type_value="file",
description="Excel spreadsheet containing database information (center, study, subject, demographics, ...). If this field is empty, it will search for an xls file located in the database folder. If no xls file is present, all subjects will be selected.",
default_value='',
mandatory=False)
parser.add_option(name="-j",
type_value="int",
description="Number of threads for parallel computing (one subject per thread)."
" By default, all available CPU cores will be used. Set to 0 for"
" no multiprocessing.",
mandatory=False,
example='42')
parser.add_option(name="-test-integrity",
type_value="multiple_choice",
description="Run (=1) or not (=0) integrity testing which is defined in test_integrity() function of the test_ script. See example here: https://github.com/neuropoly/spinalcordtoolbox/blob/master/testing/test_sct_propseg.py",
mandatory=False,
example=['0', '1'],
default_value='0') # TODO: this should have values True/False as defined in sct_testing, not 0/1
parser.usage.addSection("\nOUTPUT")
parser.add_option(name="-log",
type_value='multiple_choice',
description="Redirects Terminal verbose to log file.",
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-pickle",
type_value='multiple_choice',
description="Output Pickle file.",
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name='-email',
type_value=[[','], 'str'],
description="Email information to send results." \
" Fields are assigned with '=' and are separated with ',':\n" \
" - addr_to: address to send email to\n" \
" - addr_from: address to send email from (default: <EMAIL>)\n" \
" - login: SMTP login (use if different from email_from)\n"
" - passwd: <PASSWORD>\n"
" - smtp_host: SMTP server (default: 'smtp.gmail.com')\n"
" - smtp_port: port for SMTP server (default: 587)\n"
"Note: will always use TLS",
mandatory=False,
default_value='')
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing, 1: basic, 2: extended.",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
# ====================================================================================================
# Start program
# ====================================================================================================
if __name__ == "__main__":
sct.init_sct()
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
function_to_test = arguments["-f"]
path_data = os.path.abspath(arguments["-d"])
if "-p" in arguments:
# in case users used more than one '-p' flag, the output will be a list of all arguments (for each -p)
if isinstance(arguments['-p'], list):
list_args = arguments['-p']
else:
list_args = [arguments['-p']]
else:
list_args = []
data_specifications = None
if "-subj" in arguments:
data_specifications = arguments["-subj"]
if "-subj-file" in arguments:
fname_database = arguments["-subj-file"]
else:
fname_database = '' # if empty, it will look for xls file automatically in database folder
if "-j" in arguments:
jobs = arguments["-j"]
else:
jobs = cpu_count() # uses maximum number of available CPUs
test_integrity = int(arguments['-test-integrity'])
create_log = int(arguments['-log'])
output_pickle = int(arguments['-pickle'])
send_email = False
if '-email' in arguments:
create_log = True
send_email = True
# loop across fields
for i in arguments['-email']:
k, v = i.split("=")
if k == 'addr_to':
addr_to = v
if k == 'addr_from':
addr_from = v
if k == 'login':
login = v
if k == 'passwd':
passwd_from = v
if k == 'smtp_host':
smtp_host = v
if k == 'smtp_port':
smtp_port = int(v)
verbose = int(arguments["-v"])
# start timer
time_start = time.time()
# create single time variable for output names
output_time = time.strftime("%y%m%d%H%M%S")
# build log file name
if create_log:
# global log:
file_log = "_".join([output_time, function_to_test, sct.__get_branch().replace("/", "~")]).replace("sct_", "")
fname_log = file_log + '.log'
# handle_log = sct.ForkStdoutToFile(fname_log)
file_handler = sct.add_file_handler_to_logger(fname_log)
logger.info('Testing started on: ' + time.strftime("%Y-%m-%d %H:%M:%S"))
# fetch SCT version
logger.info('SCT version: {}'.format(sct.__version__))
# check OS
platform_running = sys.platform
if (platform_running.find('darwin') != -1):
os_running = 'osx'
elif (platform_running.find('linux') != -1):
os_running = 'linux'
logger.info('OS: ' + os_running + ' (' + platform.platform() + ')')
# check hostname
logger.info('Hostname: {}'.format(platform.node()))
# Check number of CPU cores
logger.info('CPU Thread on local machine: {} '.format(cpu_count()))
logger.info(' Requested threads: {} '.format(jobs))
if __MPI__:
logger.info("Running in MPI mode with mpi4py.futures's MPIPoolExecutor")
else:
logger.info("Running with python concurrent.futures's ProcessPoolExecutor")
# check RAM
sct.checkRAM(os_running, 0)
# display command
logger.info('\nCommand(s):')
for args in list_args:
logger.info(' ' + function_to_test + ' ' + args)
logger.info('Dataset: ' + path_data)
logger.info('Test integrity: ' + str(test_integrity))
# test function
try:
# retrieve subjects list
list_subj = get_list_subj(path_data, data_specifications=data_specifications, fname_database=fname_database)
# during testing, redirect to standard output to avoid stacking error messages in the general log
if create_log:
# handle_log.pause()
sct.remove_handler(file_handler)
# run function
logger.debug("enter test fct")
tests_ret = run_function(function_to_test, path_data, list_subj, list_args=list_args, nb_cpu=jobs, verbose=1, test_integrity=test_integrity)
logger.debug("exit test fct")
results = tests_ret['results']
compute_time = tests_ret['compute_time']
# after testing, redirect to log file
if create_log:
logger.addHandler(file_handler)
# build results
|
pd.set_option('display.max_rows', 500)
|
pandas.set_option
|
from __future__ import print_function
import statistics
import time
from os.path import dirname, join
import pandas as pd
import os
import sys
import pickle
import dill
from math import inf
from math import log
from sklearn.preprocessing import Imputer
import numpy as np
from sklearn.preprocessing import StandardScaler
__all__ = ["load_data", "suppress_stdout_stderr", "Benchmark",
"check_name", "dev_model", "load_model", "aggregate_data",
"devmodel_to_array"]
"""
Salty is a toolkit for interacting with ionic liquid data from ILThermo
"""
class qspr_model():
def __init__(self, model, summary, descriptors):
self.Model = model
self.Summary = summary
self.Descriptors = descriptors
class dev_model():
def __init__(self, coef_data, data_summary, data):
self.Coef_data = coef_data
self.Data_summary = data_summary
self.Data = data
def devmodel_to_array(model_name, train_fraction=1):
if model_name is str:
model_outputs = len(model_name.split("_"))
pickle_in = open("../salty/data/MODELS/%s_devmodel.pkl" % model_name,
"rb")
devmodel = dill.load(pickle_in)
else:
model_outputs = -6 + model_name.Data_summary.shape[0]
devmodel = model_name
rawdf = devmodel.Data
rawdf = rawdf.sample(frac=1)
datadf = rawdf.select_dtypes(include=[np.number])
data = np.array(datadf)
n = data.shape[0]
d = data.shape[1]
d -= model_outputs
n_train = int(n * train_fraction) # set fraction for training
n_test = n - n_train
X_train = np.zeros((n_train, d)) # prepare train/test arrays
X_test = np.zeros((n_test, d))
Y_train = np.zeros((n_train, model_outputs))
Y_test = np.zeros((n_test, model_outputs))
X_train[:] = data[:n_train, :-model_outputs]
Y_train[:] = (data[:n_train, -model_outputs:].astype(float))
X_test[:] = data[n_train:, :-model_outputs]
Y_test[:] = (data[n_train:, -model_outputs:].astype(float))
return X_train, Y_train, X_test, Y_test
def aggregate_data(data, T=[0, inf], P=[0, inf], data_ranges=None,
merge="overlap", feature_type=None, impute=False):
"""
Aggregates molecular data for model training
Parameters
----------
data: list
density, cpt, and/or viscosity
T: array
desired min and max of temperature distribution
P: array
desired min and max of pressure distribution
data_ranges: array
desired min and max of property distribution(s)
merge: str
overlap or union, defaults to overlap. Merge type of property sets
feature_type: str
desired feature set, defaults to RDKit's 2D descriptor set
Returns
-----------
devmodel: dev_model obj
returns dev_model object containing scale/center information,
data summary, and the data frame
"""
data_files = []
for i, string in enumerate(data):
data_files.append(load_data("MODELS/%s_premodel.csv" % string))
if i == 0:
merged = data_files[0]
if i == 1:
merged = pd.merge(data_files[0], data_files[1], sort=False,
how='outer')
elif i > 1:
merged = pd.merge(merged, data_files[-1], sort=False, how='outer')
if merge == "overlap":
merged.dropna(inplace=True)
# select state variable and data ranges
merged = merged.loc[merged["Temperature, K"] < T[1]]
merged = merged.loc[merged["Temperature, K"] > T[0]]
merged = merged.loc[merged["Pressure, kPa"] < P[1]]
merged = merged.loc[merged["Pressure, kPa"] > P[0]]
for i in range(1, len(data) + 1):
merged = merged[merged.iloc[:, -i] != 0] # avoid log(0) error
if data_ranges:
merged = merged[merged.iloc[:, -i] < data_ranges[::-1][i - 1][1]]
merged = merged[merged.iloc[:, -i] > data_ranges[::-1][i - 1][0]]
merged.reset_index(drop=True, inplace=True)
# Create summary of dataset
unique_salts = merged["smiles-cation"] + merged["smiles-anion"]
unique_cations = repr(merged["smiles-cation"].unique())
unique_anions = repr(merged["smiles-anion"].unique())
actual_data_ranges = []
for i in range(1, len(data) + 3):
actual_data_ranges.append("{} - {}".format(
str(merged.iloc[:, -i].min()), str(merged.iloc[:, -i].max())))
a = np.array([len(unique_salts.unique()), unique_cations, unique_anions,
len(unique_salts)])
a = np.concatenate((a, actual_data_ranges))
cols1 = ["Unique salts", "Cations", "Anions", "Total datapoints"]
cols2 = ["Temperature range (K)", "Pressure range (kPa)"]
cols = cols1 + data[::-1] + cols2
data_summary = pd.DataFrame(a, cols)
# scale and center
metaDf = merged.select_dtypes(include=["object"])
dataDf = merged.select_dtypes(include=[np.number])
cols = dataDf.columns.tolist()
if impute:
imp = Imputer(missing_values='NaN', strategy="median", axis=0)
X = imp.fit_transform(dataDf)
dataDf =
|
pd.DataFrame(X, columns=cols)
|
pandas.DataFrame
|
import calendar
from ..utils import search_quote
from datetime import datetime, timedelta
from ..utils import process_dataframe_and_series
import rich
from jsonpath import jsonpath
from retry import retry
import pandas as pd
import requests
import multitasking
import signal
from tqdm import tqdm
from typing import (Dict,
List,
Union)
from ..shared import session
from ..common import get_quote_history as get_quote_history_for_stock
from ..common import get_history_bill as get_history_bill_for_stock
from ..common import get_today_bill as get_today_bill_for_stock
from ..common import get_realtime_quotes_by_fs
from ..utils import (to_numeric,
get_quote_id)
from .config import EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS, EASTMONEY_STOCK_BASE_INFO_FIELDS
from ..common.config import (
FS_DICT,
MARKET_NUMBER_DICT,
EASTMONEY_REQUEST_HEADERS,
EASTMONEY_QUOTE_FIELDS
)
signal.signal(signal.SIGINT, multitasking.killall)
@to_numeric
def get_base_info_single(stock_code: str) -> pd.Series:
"""
获取单股票基本信息
Parameters
----------
stock_code : str
股票代码
Returns
-------
Series
单只股票基本信息
"""
fields = ",".join(EASTMONEY_STOCK_BASE_INFO_FIELDS.keys())
secid = get_quote_id(stock_code)
if not secid:
return pd.Series(index=EASTMONEY_STOCK_BASE_INFO_FIELDS.values())
params = (
('ut', 'fa5fd1943c7b386f172d6893dbfba10b'),
('invt', '2'),
('fltt', '2'),
('fields', fields),
('secid', secid),
)
url = 'http://push2.eastmoney.com/api/qt/stock/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
s = pd.Series(json_response['data']).rename(
index=EASTMONEY_STOCK_BASE_INFO_FIELDS)
return s[EASTMONEY_STOCK_BASE_INFO_FIELDS.values()]
def get_base_info_muliti(stock_codes: List[str]) -> pd.DataFrame:
"""
获取股票多只基本信息
Parameters
----------
stock_codes : List[str]
股票代码列表
Returns
-------
DataFrame
多只股票基本信息
"""
@multitasking.task
@retry(tries=3, delay=1)
def start(stock_code: str):
s = get_base_info_single(stock_code)
dfs.append(s)
pbar.update()
pbar.set_description(f'Processing => {stock_code}')
dfs: List[pd.DataFrame] = []
pbar = tqdm(total=len(stock_codes))
for stock_code in stock_codes:
start(stock_code)
multitasking.wait_for_tasks()
df = pd.DataFrame(dfs)
df = df.dropna(subset=['股票代码'])
return df
@to_numeric
def get_base_info(stock_codes: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]:
"""
Parameters
----------
stock_codes : Union[str, List[str]]
股票代码或股票代码构成的列表
Returns
-------
Union[Series, DataFrame]
- ``Series`` : 包含单只股票基本信息(当 ``stock_codes`` 是字符串时)
- ``DataFrane`` : 包含多只股票基本信息(当 ``stock_codes`` 是字符串列表时)
Raises
------
TypeError
当 ``stock_codes`` 类型不符合要求时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票信息
>>> ef.stock.get_base_info('600519')
股票代码 600519
股票名称 贵州茅台
市盈率(动) 39.38
市净率 12.54
所处行业 酿酒行业
总市值 2198082348462.0
流通市值 2198082348462.0
板块编号 BK0477
ROE 8.29
净利率 54.1678
净利润 13954462085.610001
毛利率 91.6763
dtype: object
>>> # 获取多只股票信息
>>> ef.stock.get_base_info(['600519','300715'])
股票代码 股票名称 市盈率(动) 市净率 所处行业 总市值 流通市值 板块编号 ROE 净利率 净利润 毛利率
0 300715 凯伦股份 42.29 3.12 水泥建材 9.160864e+09 6.397043e+09 BK0424 3.97 12.1659 5.415488e+07 32.8765
1 600519 贵州茅台 39.38 12.54 酿酒行业 2.198082e+12 2.198082e+12 BK0477 8.29 54.1678 1.395446e+10 91.6763
"""
if isinstance(stock_codes, str):
return get_base_info_single(stock_codes)
elif hasattr(stock_codes, '__iter__'):
return get_base_info_muliti(stock_codes)
raise TypeError(f'所给的 {stock_codes} 不符合参数要求')
def get_quote_history(stock_codes: Union[str, List[str]],
beg: str = '19000101',
end: str = '20500101',
klt: int = 101,
fqt: int = 1,
**kwargs) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""
获取股票的 K 线数据
Parameters
----------
stock_codes : Union[str,List[str]]
股票代码、名称 或者 股票代码、名称构成的列表
beg : str, optional
开始日期,默认为 ``'19000101'`` ,表示 1900年1月1日
end : str, optional
结束日期,默认为 ``'20500101'`` ,表示 2050年1月1日
klt : int, optional
行情之间的时间间隔,默认为 ``101`` ,可选示例如下
- ``1`` : 分钟
- ``5`` : 5 分钟
- ``15`` : 15 分钟
- ``30`` : 30 分钟
- ``60`` : 60 分钟
- ``101`` : 日
- ``102`` : 周
- ``103`` : 月
fqt : int, optional
复权方式,默认为 ``1`` ,可选示例如下
- ``0`` : 不复权
- ``1`` : 前复权
- ``2`` : 后复权
Returns
-------
Union[DataFrame, Dict[str, DataFrame]]
股票的 K 线数据
- ``DataFrame`` : 当 ``stock_codes`` 是 ``str`` 时
- ``Dict[str, DataFrame]`` : 当 ``stock_codes`` 是 ``List[str]`` 时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票日 K 行情数据
>>> ef.stock.get_quote_history('600519')
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
>>> # 获取多只股票历史行情
>>> stock_df = ef.stock.get_quote_history(['600519','300750'])
>>> type(stock_df)
<class 'dict'>
>>> stock_df.keys()
dict_keys(['300750', '600519'])
>>> stock_df['600519']
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
"""
df = get_quote_history_for_stock(
stock_codes,
beg=beg,
end=end,
klt=klt,
fqt=fqt
)
if isinstance(df, pd.DataFrame):
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
elif isinstance(df, dict):
for stock_code in df.keys():
df[stock_code].rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
# NOTE 扩展接口 设定此关键词即返回 DataFrame 而不是 dict
if kwargs.get('return_df'):
df: pd.DataFrame = pd.concat(df, axis=0, ignore_index=True)
return df
@process_dataframe_and_series(remove_columns_and_indexes=['市场编号'])
@to_numeric
def get_realtime_quotes(fs: Union[str, List[str]] = None) -> pd.DataFrame:
"""
获取单个或者多个市场行情的最新状况
Parameters
----------
fs : Union[str, List[str]], optional
行情名称或者多个行情名列表 可选值及示例如下
- ``None`` 沪深京A股市场行情
- ``'沪深A股'`` 沪深A股市场行情
- ``'沪A'`` 沪市A股市场行情
- ``'深A'`` 深市A股市场行情
- ``北A`` 北证A股市场行情
- ``'可转债'`` 沪深可转债市场行情
- ``'期货'`` 期货市场行情
- ``'创业板'`` 创业板市场行情
- ``'美股'`` 美股市场行情
- ``'港股'`` 港股市场行情
- ``'中概股'`` 中国概念股市场行情
- ``'新股'`` 沪深新股市场行情
- ``'科创板'`` 科创板市场行情
- ``'沪股通'`` 沪股通市场行情
- ``'深股通'`` 深股通市场行情
- ``'行业板块'`` 行业板块市场行情
- ``'概念板块'`` 概念板块市场行情
- ``'沪深系列指数'`` 沪深系列指数市场行情
- ``'上证系列指数'`` 上证系列指数市场行情
- ``'深证系列指数'`` 深证系列指数市场行情
- ``'ETF'`` ETF 基金市场行情
- ``'LOF'`` LOF 基金市场行情
Returns
-------
DataFrame
单个或者多个市场行情的最新状况
Raises
------
KeyError
当参数 ``fs`` 中含有不正确的行情类型时引发错误
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_realtime_quotes()
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 688787 N海天 277.59 139.48 172.39 139.25 171.66 102.54 85.62 - 78.93 74519 1110318832.0 36.94 5969744000 1213908667 1.688787 沪A
1 301045 N天禄 149.34 39.42 48.95 39.2 48.95 23.61 66.66 - 37.81 163061 683878656.0 15.81 4066344240 964237089 0.301045 深A
2 300532 今天国际 20.04 12.16 12.16 10.69 10.69 2.03 8.85 3.02 -22.72 144795 171535181.0 10.13 3322510580 1989333440 0.300532 深A
3 300600 国瑞科技 20.02 13.19 13.19 11.11 11.41 2.2 18.61 2.82 218.75 423779 541164432.0 10.99 3915421427 3003665117 0.300600 深A
4 300985 致远新能 20.01 47.08 47.08 36.8 39.4 7.85 66.65 2.17 58.37 210697 897370992.0 39.23 6277336472 1488300116 0.300985 深A
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4598 603186 华正新材 -10.0 43.27 44.09 43.27 43.99 -4.81 1.98 0.48 25.24 27697 120486294.0 48.08 6146300650 6063519472 1.603186 沪A
4599 688185 康希诺-U -10.11 476.4 534.94 460.13 530.0 -53.6 6.02 2.74 -2088.07 40239 1960540832.0 530.0 117885131884 31831479215 1.688185 沪A
4600 688148 芳源股份 -10.57 31.3 34.39 31.3 33.9 -3.7 26.07 0.56 220.01 188415 620632512.0 35.0 15923562000 2261706043 1.688148 沪A
4601 300034 钢研高纳 -10.96 43.12 46.81 42.88 46.5 -5.31 7.45 1.77 59.49 323226 1441101824.0 48.43 20959281094 18706911861 0.300034 深A
4602 300712 永福股份 -13.71 96.9 110.94 95.4 109.0 -15.4 6.96 1.26 511.21 126705 1265152928.0 112.3 17645877600 17645877600 0.300712 深A
>>> ef.stock.get_realtime_quotes(['创业板','港股'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 00859 中昌国际控股 49.02 0.38 0.38 0.26 0.26 0.125 0.08 86.85 -2.83 938000 262860.0 0.255 427510287 427510287 128.00859 None
1 01058 粤海制革 41.05 1.34 1.51 0.9 0.93 0.39 8.34 1.61 249.89 44878000 57662440.0 0.95 720945460 720945460 128.01058 None
2 00713 世界(集团) 27.94 0.87 0.9 0.68 0.68 0.19 1.22 33.28 3.64 9372000 7585400.0 0.68 670785156 670785156 128.00713 None
3 08668 瀛海集团 24.65 0.177 0.179 0.145 0.145 0.035 0.0 10.0 -9.78 20000 3240.0 0.142 212400000 212400000 128.08668 None
4 08413 亚洲杂货 24.44 0.28 0.28 0.25 0.25 0.055 0.01 3.48 -20.76 160000 41300.0 0.225 325360000 325360000 128.08413 None
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
5632 08429 冰雪集团 -16.75 0.174 0.2 0.166 0.2 -0.035 2.48 3.52 -21.58 11895000 2074645.0 0.209 83520000 83520000 128.08429 None
5633 00524 长城天下 -17.56 0.108 0.118 0.103 0.118 -0.023 0.45 15.43 -6.55 5961200 649171.0 0.131 141787800 141787800 128.00524 None
5634 08377 申酉控股 -17.71 0.395 0.46 0.39 0.46 -0.085 0.07 8.06 -5.07 290000 123200.0 0.48 161611035 161611035 128.08377 None
5635 00108 国锐地产 -19.01 1.15 1.42 1.15 1.42 -0.27 0.07 0.78 23.94 2376000 3012080.0 1.42 3679280084 3679280084 128.00108 None
5636 08237 华星控股 -25.0 0.024 0.031 0.023 0.031 -0.008 0.43 8.74 -2.01 15008000 364188.0 0.032 83760000 83760000 128.08237 None
>>> ef.stock.get_realtime_quotes(['ETF'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 513050 中概互联网ETF 4.49 1.444 1.455 1.433 1.452 0.062 6.71 0.92 - 12961671 1870845984.0 1.382 27895816917 27895816917 1.513050 沪A
1 513360 教育ETF 4.38 0.5 0.502 0.486 0.487 0.021 16.89 1.7 - 1104254 54634387.0 0.479 326856952 326856952 1.513360 沪A
2 159766 旅游ETF 3.84 0.974 0.988 0.95 0.95 0.036 14.46 1.97 - 463730 45254947.0 0.938 312304295 312304295 0.159766 深A
3 159865 养殖ETF 3.8 0.819 0.828 0.785 0.791 0.03 12.13 0.89 - 1405871 114254714.0 0.789 949594189 949594189 0.159865 深A
4 516670 畜牧养殖ETF 3.76 0.856 0.864 0.825 0.835 0.031 24.08 0.98 - 292027 24924513.0 0.825 103803953 103803953 1.516670 沪A
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
549 513060 恒生医疗ETF -4.12 0.861 0.905 0.86 0.902 -0.037 47.96 1.57 - 1620502 141454355.0 0.898 290926128 290926128 1.513060 沪A
550 515220 煤炭ETF -4.46 2.226 2.394 2.194 2.378 -0.104 14.39 0.98 - 2178176 487720560.0 2.330 3369247992 3369247992 1.515220 沪A
551 513000 日经225ETF易方达 -4.49 1.212 1.269 1.21 1.269 -0.057 5.02 2.49 - 25819 3152848.0 1.269 62310617 62310617 1.513000 沪A
552 513880 日经225ETF -4.59 1.163 1.224 1.162 1.217 -0.056 16.93 0.94 - 71058 8336846.0 1.219 48811110 48811110 1.513880 沪A
553 513520 日经ETF -4.76 1.2 1.217 1.196 1.217 -0.06 27.7 1.79 - 146520 17645828.0 1.260 63464640 63464640 1.513520 沪A
Notes
-----
无论股票、可转债、期货还是基金。第一列表头始终叫 ``股票代码``
"""
fs_list: List[str] = []
if fs is None:
fs_list.append(FS_DICT['stock'])
if isinstance(fs, str):
fs = [fs]
if isinstance(fs, list):
for f in fs:
if not FS_DICT.get(f):
raise KeyError(f'指定的行情参数 `{fs}` 不正确')
fs_list.append(FS_DICT[f])
# 给空列表时 试用沪深A股行情
if not fs_list:
fs_list.append(FS_DICT['stock'])
fs_str = ','.join(fs_list)
df = get_realtime_quotes_by_fs(fs_str)
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_history_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票历史单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
沪深市场单只股票历史单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_history_bill('600519')
股票名称 股票代码 日期 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入 主力净流入占比 小单流入净占比 中单流入净占比 大单流入净占比 超大单流入净占比 收盘价 涨跌幅
0 贵州茅台 600519 2021-03-04 -3.670272e+06 -2282056.0 5.952143e+06 1.461528e+09 -1.465199e+09 -0.03 -0.02 0.04 10.99 -11.02 2013.71 -5.05
1 贵州茅台 600519 2021-03-05 -1.514880e+07 -1319066.0 1.646793e+07 -2.528896e+07 1.014016e+07 -0.12 -0.01 0.13 -0.19 0.08 2040.82 1.35
2 贵州茅台 600519 2021-03-08 -8.001702e+08 -877074.0 8.010473e+08 5.670671e+08 -1.367237e+09 -6.29 -0.01 6.30 4.46 -10.75 1940.71 -4.91
3 贵州茅台 600519 2021-03-09 -2.237770e+08 -6391767.0 2.301686e+08 -1.795013e+08 -4.427571e+07 -1.39 -0.04 1.43 -1.11 -0.27 1917.70 -1.19
4 贵州茅台 600519 2021-03-10 -2.044173e+08 -1551798.0 2.059690e+08 -2.378506e+08 3.343331e+07 -2.02 -0.02 2.03 -2.35 0.33 1950.72 1.72
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
97 贵州茅台 600519 2021-07-26 -1.564233e+09 13142211.0 1.551091e+09 -1.270400e+08 -1.437193e+09 -8.74 0.07 8.67 -0.71 -8.03 1804.11 -5.05
98 贵州茅台 600519 2021-07-27 -7.803296e+08 -10424715.0 7.907544e+08 6.725104e+07 -8.475807e+08 -5.12 -0.07 5.19 0.44 -5.56 1712.89 -5.06
99 贵州茅台 600519 2021-07-28 3.997645e+08 2603511.0 -4.023677e+08 2.315648e+08 1.681997e+08 2.70 0.02 -2.72 1.57 1.14 1768.90 3.27
100 贵州茅台 600519 2021-07-29 -9.209842e+08 -2312235.0 9.232964e+08 -3.959741e+08 -5.250101e+08 -8.15 -0.02 8.17 -3.50 -4.65 1749.79 -1.08
101 贵州茅台 600519 2021-07-30 -1.524740e+09 -6020099.0 1.530761e+09 1.147248e+08 -1.639465e+09 -11.63 -0.05 11.68 0.88 -12.51 1678.99 -4.05
"""
df = get_history_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_today_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票最新交易日的日内分钟级单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
单只股票最新交易日的日内分钟级单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_today_bill('600519')
股票代码 时间 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入
0 600519 2021-07-29 09:31 -3261705.0 -389320.0 3651025.0 -12529658.0 9267953.0
1 600519 2021-07-29 09:32 6437999.0 -606994.0 -5831006.0 -42615994.0 49053993.0
2 600519 2021-07-29 09:33 13179707.0 -606994.0 -12572715.0 -85059118.0 98238825.0
3 600519 2021-07-29 09:34 15385244.0 -970615.0 -14414632.0 -86865209.0 102250453.0
4 600519 2021-07-29 09:35 7853716.0 -970615.0 -6883104.0 -75692436.0 83546152.0
.. ... ... ... ... ... ... ...
235 600519 2021-07-29 14:56 -918956019.0 -1299630.0 920255661.0 -397127393.0 -521828626.0
236 600519 2021-07-29 14:57 -920977761.0 -2319213.0 923296987.0 -397014702.0 -523963059.0
237 600519 2021-07-29 14:58 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
238 600519 2021-07-29 14:59 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
239 600519 2021-07-29 15:00 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
"""
df = get_today_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_latest_quote(stock_codes: List[str]) -> pd.DataFrame:
"""
获取沪深市场多只股票的实时涨幅情况
Parameters
----------
stock_codes : List[str]
多只股票代码列表
Returns
-------
DataFrame
沪深市场、港股、美股多只股票的实时涨幅情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_quote(['600519','300750'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 市场类型
0 600519 贵州茅台 0.59 1700.04 1713.0 1679.0 1690.0 10.04 0.30 0.72 43.31 37905 6.418413e+09 1690.0 2135586507912 2135586507912 沪A
1 300750 宁德时代 0.01 502.05 529.9 480.0 480.0 0.05 1.37 1.75 149.57 277258 1.408545e+10 502.0 1169278366994 1019031580505 深A
Notes
-----
当需要获取多只沪深 A 股 的实时涨跌情况时,最好使用 ``efinance.stock.get_realtime_quptes``
"""
if isinstance(stock_codes, str):
stock_codes = [stock_codes]
secids: List[str] = [get_quote_id(stock_code)
for stock_code in stock_codes]
columns = EASTMONEY_QUOTE_FIELDS
fields = ",".join(columns.keys())
params = (
('OSVersion', '14.3'),
('appVersion', '6.3.8'),
('fields', fields),
('fltt', '2'),
('plat', 'Iphone'),
('product', 'EFund'),
('secids', ",".join(secids)),
('serverVersion', '6.3.6'),
('version', '6.3.8'),
)
url = 'https://push2.eastmoney.com/api/qt/ulist.np/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
rows = jsonpath(json_response, '$..diff[:]')
if rows is None:
return pd.DataFrame(columns=columns.values()).rename({
'市场编号': '市场类型'
})
df = pd.DataFrame(rows)[columns.keys()].rename(columns=columns)
df['市场类型'] = df['市场编号'].apply(lambda x: MARKET_NUMBER_DICT.get(str(x)))
del df['市场编号']
return df
@to_numeric
def get_top10_stock_holder_info(stock_code: str,
top: int = 4) -> pd.DataFrame:
"""
获取沪深市场指定股票前十大股东信息
Parameters
----------
stock_code : str
股票代码
top : int, optional
最新 top 个前 10 大流通股东公开信息, 默认为 ``4``
Returns
-------
DataFrame
个股持仓占比前 10 的股东的一些信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_top10_stock_holder_info('600519',top = 1)
股票代码 更新日期 股东代码 股东名称 持股数 持股比例 增减 变动率
0 600519 2021-03-31 80010298 中国贵州茅台酒厂(集团)有限责任公司 6.783亿 54.00% 不变 --
1 600519 2021-03-31 80637337 香港中央结算有限公司 9594万 7.64% -841.1万 -8.06%
2 600519 2021-03-31 80732941 贵州省国有资本运营有限责任公司 5700万 4.54% -182.7万 -3.11%
3 600519 2021-03-31 80010302 贵州茅台酒厂集团技术开发公司 2781万 2.21% 不变 --
4 600519 2021-03-31 80475097 中央汇金资产管理有限责任公司 1079万 0.86% 不变 --
5 600519 2021-03-31 80188285 中国证券金融股份有限公司 803.9万 0.64% -91 0.00%
6 600519 2021-03-31 78043999 深圳市金汇荣盛财富管理有限公司-金汇荣盛三号私募证券投资基金 502.1万 0.40% 不变 --
7 600519 2021-03-31 70400207 中国人寿保险股份有限公司-传统-普通保险产品-005L-CT001沪 434.1万 0.35% 44.72万 11.48%
8 600519 2021-03-31 005827 中国银行股份有限公司-易方达蓝筹精选混合型证券投资基金 432万 0.34% 新进 --
9 600519 2021-03-31 78083830 珠海市瑞丰汇邦资产管理有限公司-瑞丰汇邦三号私募证券投资基金 416.1万 0.33% 不变 --
"""
def gen_fc(stock_code: str) -> str:
"""
Parameters
----------
stock_code : str
股票代码
Returns
-------
str
指定格式的字符串
"""
_type, stock_code = get_quote_id(stock_code).split('.')
_type = int(_type)
# 深市
if _type == 0:
return f'{stock_code}02'
# 沪市
return f'{stock_code}01'
def get_public_dates(stock_code: str) -> List[str]:
"""
获取指定股票公开股东信息的日期
Parameters
----------
stock_code : str
股票代码
Returns
-------
List[str]
公开日期列表
"""
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
data = {"fc": fc}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetFirstRequest2Data'
json_response = requests.post(
url, json=data).json()
dates = jsonpath(json_response, f'$..BaoGaoQi')
if not dates:
return []
return dates
fields = {
'GuDongDaiMa': '股东代码',
'GuDongMingCheng': '股东名称',
'ChiGuShu': '持股数',
'ChiGuBiLi': '持股比例',
'ZengJian': '增减',
'BianDongBiLi': '变动率',
}
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
dates = get_public_dates(stock_code)
dfs: List[pd.DataFrame] = []
empty_df = pd.DataFrame(columns=['股票代码', '日期']+list(fields.values()))
for date in dates[:top]:
data = {"fc": fc, "BaoGaoQi": date}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetShiDaLiuTongGuDong'
response = requests.post(url, json=data)
response.encoding = 'utf-8'
items: List[dict] = jsonpath(
response.json(), f'$..ShiDaLiuTongGuDongList[:]')
if not items:
continue
df = pd.DataFrame(items)
df.rename(columns=fields, inplace=True)
df.insert(0, '股票代码', [stock_code for _ in range(len(df))])
df.insert(1, '更新日期', [date for _ in range(len(df))])
del df['IsLink']
dfs.append(df)
if len(dfs) == 0:
return empty_df
return pd.concat(dfs, axis=0, ignore_index=True)
def get_all_report_dates() -> pd.DataFrame:
"""
获取沪深市场的全部股票报告期信息
Returns
-------
DataFrame
沪深市场的全部股票报告期信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_all_report_dates()
报告日期 季报名称
0 2021-06-30 2021年 半年报
1 2021-03-31 2021年 一季报
2 2020-12-31 2020年 年报
3 2020-09-30 2020年 三季报
4 2020-06-30 2020年 半年报
5 2020-03-31 2020年 一季报
6 2019-12-31 2019年 年报
7 2019-09-30 2019年 三季报
8 2019-06-30 2019年 半年报
9 2019-03-31 2019年 一季报
10 2018-12-31 2018年 年报
11 2018-09-30 2018年 三季报
12 2018-06-30 2018年 半年报
13 2018-03-31 2018年 一季报
14 2017-12-31 2017年 年报
15 2017-09-30 2017年 三季报
16 2017-06-30 2017年 半年报
17 2017-03-31 2017年 一季报
18 2016-12-31 2016年 年报
19 2016-09-30 2016年 三季报
20 2016-06-30 2016年 半年报
21 2016-03-31 2016年 一季报
22 2015-12-31 2015年 年报
24 2015-06-30 2015年 半年报
25 2015-03-31 2015年 一季报
26 2014-12-31 2014年 年报
27 2014-09-30 2014年 三季报
28 2014-06-30 2014年 半年报
29 2014-03-31 2014年 一季报
30 2013-12-31 2013年 年报
31 2013-09-30 2013年 三季报
32 2013-06-30 2013年 半年报
33 2013-03-31 2013年 一季报
34 2012-12-31 2012年 年报
35 2012-09-30 2012年 三季报
36 2012-06-30 2012年 半年报
37 2012-03-31 2012年 一季报
38 2011-12-31 2011年 年报
39 2011-09-30 2011年 三季报
"""
fields = {
'REPORT_DATE': '报告日期',
'DATATYPE': '季报名称'
}
params = (
('type', 'RPT_LICO_FN_CPD_BBBQ'),
('sty', ','.join(fields.keys())),
('p', '1'),
('ps', '2000'),
)
url = 'https://datacenter.eastmoney.com/securities/api/data/get'
response = requests.get(
url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
pd.DataFrame(columns=fields.values())
df = pd.DataFrame(items)
df = df.rename(columns=fields)
df['报告日期'] = df['报告日期'].apply(lambda x: x.split()[0])
return df
@to_numeric
def get_all_company_performance(date: str = None) -> pd.DataFrame:
"""
获取沪深市场股票某一季度的表现情况
Parameters
----------
date : str, optional
报告发布日期 部分可选示例如下(默认为 ``None``)
- ``None`` : 最新季报
- ``'2021-06-30'`` : 2021 年 Q2 季度报
- ``'2021-03-31'`` : 2021 年 Q1 季度报
Returns
-------
DataFrame
获取沪深市场股票某一季度的表现情况
Examples
---------
>>> import efinance as ef
>>> # 获取最新季度业绩表现
>>> ef.stock.get_all_company_performance()
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 688981 中芯国际 2021-08-28 00:00:00 1.609039e+10 22.253453 20.6593 5.241321e+09 278.100000 307.8042 0.6600 11.949525 5.20 26.665642 1.182556
1 688819 天能股份 2021-08-28 00:00:00 1.625468e+10 9.343279 23.9092 6.719446e+08 -14.890000 -36.8779 0.7100 11.902912 6.15 17.323263 -1.562187
2 688789 宏华数科 2021-08-28 00:00:00 4.555604e+08 56.418441 6.5505 1.076986e+08 49.360000 -7.3013 1.8900 14.926761 13.51 43.011243 1.421272
3 688681 科汇股份 2021-08-28 00:00:00 1.503343e+08 17.706987 121.9407 1.664509e+07 -13.100000 383.3331 0.2100 5.232517 4.84 47.455511 -0.232395
4 688670 金迪克 2021-08-28 00:00:00 3.209423e+07 -63.282413 -93.1788 -2.330505e+07 -242.275001 -240.1554 -0.3500 3.332254 -10.10 85.308531 1.050348
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3720 600131 国网信通 2021-07-16 00:00:00 2.880378e+09 6.787087 69.5794 2.171389e+08 29.570000 296.2051 0.1800 4.063260 4.57 19.137437 -0.798689
3721 600644 乐山电力 2021-07-15 00:00:00 1.257030e+09 18.079648 5.7300 8.379727e+07 -14.300000 25.0007 0.1556 3.112413 5.13 23.645137 0.200906
3722 002261 拓维信息 2021-07-15 00:00:00 8.901777e+08 47.505282 24.0732 6.071063e+07 68.320000 30.0596 0.0550 2.351598 2.37 37.047968 -0.131873
3723 601952 苏垦农发 2021-07-13 00:00:00 4.544138e+09 11.754570 47.8758 3.288132e+08 1.460000 83.1486 0.2400 3.888046 6.05 15.491684 -0.173772
3724 601568 北元集团 2021-07-09 00:00:00 6.031506e+09 32.543303 30.6352 1.167989e+09 61.050000 40.8165 0.3200 3.541533 9.01 27.879243 0.389860
>>> # 获取指定日期的季度业绩表现
>>> ef.stock.get_all_company_performance('2020-03-31')
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 605033 美邦股份 2021-08-25 00:00:00 2.178208e+08 NaN NaN 4.319814e+07 NaN NaN 0.4300 NaN NaN 37.250416 NaN
1 301048 金鹰重工 2021-07-30 00:00:00 9.165528e+07 NaN NaN -2.189989e+07 NaN NaN NaN NaN -1.91 20.227118 NaN
2 001213 中铁特货 2021-07-29 00:00:00 1.343454e+09 NaN NaN -3.753634e+07 NaN NaN -0.0100 NaN NaN -1.400708 NaN
3 605588 冠石科技 2021-07-28 00:00:00 1.960175e+08 NaN NaN 1.906751e+07 NaN NaN 0.3500 NaN NaN 16.324650 NaN
4 688798 艾为电子 2021-07-27 00:00:00 2.469943e+08 NaN NaN 2.707568e+07 NaN NaN 0.3300 NaN 8.16 33.641934 NaN
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4440 603186 华正新材 2020-04-09 00:00:00 4.117502e+08 -6.844813 -23.2633 1.763252e+07 18.870055 -26.3345 0.1400 5.878423 2.35 18.861255 0.094249
4441 002838 道恩股份 2020-04-09 00:00:00 6.191659e+08 -8.019810 -16.5445 6.939886e+07 91.601624 76.7419 0.1700 2.840665 6.20 22.575224 0.186421
4442 600396 金山股份 2020-04-08 00:00:00 2.023133e+09 0.518504 -3.0629 1.878432e+08 114.304022 61.2733 0.1275 1.511012 8.81 21.422393 0.085698
4443 002913 奥士康 2020-04-08 00:00:00 4.898977e+08 -3.883035 -23.2268 2.524717e+07 -47.239162 -58.8136 0.1700 16.666749 1.03 22.470020 0.552624
4444 002007 华兰生物 2020-04-08 00:00:00 6.775414e+08 -2.622289 -36.1714 2.472864e+08 -4.708821 -22.6345 0.1354 4.842456 3.71 61.408522 0.068341
Notes
-----
当输入的日期不正确时,会输出可选的日期列表。
你也可以通过函数 ``efinance.stock.get_all_report_dates`` 来获取可选日期
"""
# TODO 加速
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票简称',
'NOTICE_DATE': '公告日期',
'TOTAL_OPERATE_INCOME': '营业收入',
'YSTZ': '营业收入同比增长',
'YSHZ': '营业收入季度环比',
'PARENT_NETPROFIT': '净利润',
'SJLTZ': '净利润同比增长',
'SJLHZ': '净利润季度环比',
'BASIC_EPS': '每股收益',
'BPS': '每股净资产',
'WEIGHTAVG_ROE': '净资产收益率',
'XSMLL': '销售毛利率',
'MGJYXJJE': '每股经营现金流量'
# 'ISNEW':'是否最新'
}
dates = get_all_report_dates()['报告日期'].to_list()
if date is None:
date = dates[0]
if date not in dates:
rich.print('日期输入有误,可选日期如下:')
rich.print(dates)
return pd.DataFrame(columns=fields.values())
date = f"(REPORTDATE=\'{date}\')"
page = 1
dfs: List[pd.DataFrame] = []
while 1:
params = (
('st', 'NOTICE_DATE,SECURITY_CODE'),
('sr', '-1,-1'),
('ps', '500'),
('p', f'{page}'),
('type', 'RPT_LICO_FN_CPD'),
('sty', 'ALL'),
('token', '<KEY>'),
# ! 只选沪深A股
('filter',
f'(SECURITY_TYPE_CODE in ("058001001","058001008")){date}'),
)
url = 'http://datacenter-web.eastmoney.com/api/data/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
dfs.append(df)
page += 1
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, axis=0, ignore_index=True)
df = df.rename(columns=fields)[fields.values()]
return df
@to_numeric
def get_latest_holder_number(date: str = None) -> pd.DataFrame:
"""
获取沪深A股市场最新公开的股东数目变化情况 也可获取指定报告期的股东数目变化情况
Parameters
----------
date : str, optional
报告期日期 部分可选示例如下
- ``None`` 最新的报告期
- ``'2021-06-30'`` 2021年中报
- ``'2021-03-31'`` 2021年一季报
Returns
-------
DataFrame
沪深A股市场最新公开的或指定报告期的股东数目变化情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_holder_number()
股票代码 股票名称 股东人数 股东人数增减 较上期变化百分比 股东户数统计截止日 户均持股市值 户均持股数量 总市值 总股本 公告日期
0 301029 怡合达 12021 -1.636527 -200.0 2021-09-30 00:00:00 2.790187e+06 33275.933783 3.354084e+10 400010000 2021-10-09 00:00:00
1 301006 迈拓股份 10964 -0.463005 -51.0 2021-09-30 00:00:00 3.493433e+05 12703.392922 3.830200e+09 139280000 2021-10-09 00:00:00
2 301003 江苏博云 11642 -2.658863 -318.0 2021-09-30 00:00:00 2.613041e+05 5004.867463 3.042103e+09 58266667 2021-10-09 00:00:00
3 300851 交大思诺 12858 -2.752987 -364.0 2021-09-30 00:00:00 2.177054e+05 6761.035931 2.799255e+09 86933400 2021-10-09 00:00:00
4 300830 金现代 34535 -16.670688 -6909.0 2021-09-30 00:00:00 2.001479e+05 12454.756045 6.912109e+09 430125000 2021-10-09 00:00:00
... ... ... ... ... ... ... ... ... ... ... ...
4435 600618 氯碱化工 45372 -0.756814 -346.0 2014-06-30 00:00:00 1.227918e+05 16526.491581 5.571311e+09 749839976 2014-08-22 00:00:00
4436 601880 辽港股份 89923 -3.589540 -3348.0 2014-03-31 00:00:00 9.051553e+04 37403.111551 8.139428e+09 3363400000 2014-04-30 00:00:00
4437 600685 中船防务 52296 -4.807325 -2641.0 2014-03-11 00:00:00 1.315491e+05 8384.263691 6.879492e+09 438463454 2014-03-18 00:00:00
4438 000017 深中华A 21358 -10.800200 -2586.0 2013-06-30 00:00:00 5.943993e+04 14186.140556 1.269518e+09 302987590 2013-08-24 00:00:00
4439 601992 金隅集团 66736 -12.690355 -9700.0 2013-06-30 00:00:00 2.333339e+05 46666.785918 1.557177e+10 3114354625 2013-08-22 00:00:00
>>> ef.stock.get_latest_holder_number(date='2021-06-30')
股票代码 股票名称 股东人数 股东人数增减 较上期变化百分比 股东户数统计截止日 户均持股市值 户均持股数量 总市值 总股本 公告日期
0 688768 容知日新 24 0.000000 0.0 2021-06-30 00:00:00 NaN 1.714395e+06 NaN 41145491 2021-08-31 00:00:00
1 688669 聚石化学 8355 -11.135929 -1047.0 2021-06-30 00:00:00 3.662956e+05 1.117096e+04 3.060400e+09 93333334 2021-08-31 00:00:00
2 688613 奥精医疗 8768 -71.573999 -22077.0 2021-06-30 00:00:00 1.380627e+06 1.520681e+04 1.210533e+10 133333334 2021-08-31 00:00:00
3 688586 江航装备 20436 -5.642257 -1222.0 2021-06-30 00:00:00 5.508121e+05 1.975653e+04 1.125640e+10 403744467 2021-08-31 00:00:00
4 688559 海目星 7491 -16.460355 -1476.0 2021-06-30 00:00:00 8.071019e+05 2.669871e+04 6.046000e+09 200000000 2021-08-31 00:00:00
... ... ... ... ... ... ... ... ... ... ... ...
4292 002261 拓维信息 144793 0.931290 1336.0 2021-06-30 00:00:00 7.731589e+04 7.602349e+03 1.119480e+10 1100766874 2021-07-15 00:00:00
4293 002471 中超控股 75592 1.026409 768.0 2021-06-30 00:00:00 4.864536e+04 1.677426e+04 3.677200e+09 1268000000 2021-07-12 00:00:00
4294 600093 *ST易见 52497 -2.118099 -1136.0 2021-06-30 00:00:00 1.267904e+05 2.138117e+04 6.656114e+09 1122447500 2021-07-06 00:00:00
4295 688091 上海谊众 25 0.000000 0.0 2021-06-30 00:00:00 NaN 3.174000e+06 NaN 79350000 2021-07-02 00:00:00
4296 301053 远信工业 10 0.000000 0.0 2021-06-30 00:00:00 NaN 6.131250e+06 NaN 61312500 2021-06-30 00:00:00
"""
dfs: List[pd.DataFrame] = []
if date is not None:
date: datetime = datetime.strptime(date, '%Y-%m-%d')
year = date.year
month = date.month
if month % 3 != 0:
month -= month % 3
# TODO 优化处理月份正确但日期不为月份最后一天的逻辑
if month < 3:
year -= 1
# NOTE 对应上一年最后一个月
month = 12
_, last_day = calendar.monthrange(year, month)
date: str = datetime.strptime(
f'{year}-{month}-{last_day}', '%Y-%m-%d').strftime('%Y-%m-%d')
page = 1
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票名称',
'HOLDER_NUM': '股东人数',
'HOLDER_NUM_RATIO': '股东人数增减',
'HOLDER_NUM_CHANGE': '较上期变化百分比',
'END_DATE': '股东户数统计截止日',
'AVG_MARKET_CAP': '户均持股市值',
'AVG_HOLD_NUM': '户均持股数量',
'TOTAL_MARKET_CAP': '总市值',
'TOTAL_A_SHARES': '总股本',
'HOLD_NOTICE_DATE': '公告日期'
}
while 1:
params = [
('sortColumns', 'HOLD_NOTICE_DATE,SECURITY_CODE'),
('sortTypes', '-1,-1'),
('pageSize', '500'),
('pageNumber', page),
('columns', 'SECURITY_CODE,SECURITY_NAME_ABBR,END_DATE,INTERVAL_CHRATE,AVG_MARKET_CAP,AVG_HOLD_NUM,TOTAL_MARKET_CAP,TOTAL_A_SHARES,HOLD_NOTICE_DATE,HOLDER_NUM,PRE_HOLDER_NUM,HOLDER_NUM_CHANGE,HOLDER_NUM_RATIO,END_DATE,PRE_END_DATE'),
('quoteColumns', 'f2,f3'),
('source', 'WEB'),
('client', 'WEB'),
]
if date is not None:
# NOTE 注意不能漏 \'
params.append(('filter', f'(END_DATE=\'{date}\')'))
params.append(('reportName', 'RPT_HOLDERNUM_DET'))
else:
params.append(('reportName', 'RPT_HOLDERNUMLATEST'))
params = tuple(params)
url = 'http://datacenter-web.eastmoney.com/api/data/v1/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
df = df.rename(columns=fields)[fields.values()]
page += 1
dfs.append(df)
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, ignore_index=True)
return df
@to_numeric
@retry(tries=3)
def get_daily_billboard(start_date: str = None,
end_date: str = None) -> pd.DataFrame:
"""
获取指定日期区间的龙虎榜详情数据
Parameters
----------
start_date : str, optional
开始日期
部分可选示例如下
- ``None`` 最新一个榜单公开日(默认值)
- ``"2021-08-27"`` 2021年8月27日
end_date : str, optional
结束日期
部分可选示例如下
- ``None`` 最新一个榜单公开日(默认值)
- ``"2021-08-31"`` 2021年8月31日
Returns
-------
DataFrame
龙虎榜详情数据
Examples
--------
>>> import efinance as ef
>>> # 获取最新一个公开的龙虎榜数据(后面还有获取指定日期区间的示例代码)
>>> ef.stock.get_daily_billboard()
股票代码 股票名称 上榜日期 解读 收盘价 涨跌幅 换手率 龙虎榜净买额 龙虎榜买入额 龙虎榜卖出额 龙虎榜成交额 市场总成交额 净买额占总成交比 成交额占总成交比 流通市值 上榜原因
0 000608 阳光股份 2021-08-27 卖一主卖,成功率48.36% 3.73 -9.9034 3.8430 -8.709942e+06 1.422786e+07 2.293780e+07 3.716565e+07 110838793 -7.858208 33.531268 2.796761e+09 日跌幅偏离值达到7%的前5只证券
1 000751 锌业股份 2021-08-27 主力做T,成功率18.84% 5.32 -2.9197 19.6505 -1.079219e+08 5.638899e+07 1.643109e+08 2.206999e+08 1462953973 -7.376984 15.085906 7.500502e+09 日振幅值达到15%的前5只证券
2 000762 西藏矿业 2021-08-27 北京资金买入,成功率39.42% 63.99 1.0741 15.6463 2.938758e+07 4.675541e+08 4.381665e+08 9.057206e+08 4959962598 0.592496 18.260633 3.332571e+10 日振幅值达到15%的前5只证券
3 000833 粤桂股份 2021-08-27 实力游资买入,成功率44.55% 8.87 10.0496 8.8263 4.993555e+07 1.292967e+08 7.936120e+07 2.086580e+08 895910429 5.573721 23.290046 3.353614e+09 连续三个交易日内,涨幅偏离值累计达到20%的证券
4 001208 华菱线缆 2021-08-27 1家机构买入,成功率40.43% 19.72 4.3386 46.1985 4.055258e+07 1.537821e+08 1.132295e+08 2.670117e+08 1203913048 3.368398 22.178651 2.634710e+09 日换手率达到20%的前5只证券
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
70 688558 国盛智科 2021-08-27 买一主买,成功率38.71% 60.72 1.6064 34.0104 1.835494e+07 1.057779e+08 8.742293e+07 1.932008e+08 802569300 2.287023 24.072789 2.321743e+09 有价格涨跌幅限制的日换手率达到30%的前五只证券
71 688596 正帆科技 2021-08-27 1家机构买入,成功率57.67% 26.72 3.1660 3.9065 -1.371039e+07 8.409046e+07 9.780085e+07 1.818913e+08 745137400 -1.839982 24.410438 4.630550e+09 有价格涨跌幅限制的连续3个交易日内收盘价格涨幅偏离值累计达到30%的证券
72 688663 新风光 2021-08-27 卖一主卖,成功率37.18% 28.17 -17.6316 32.2409 1.036460e+07 5.416901e+07 4.380440e+07 9.797341e+07 274732700 3.772613 35.661358 8.492507e+08 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
73 688663 新风光 2021-08-27 卖一主卖,成功率37.18% 28.17 -17.6316 32.2409 1.036460e+07 5.416901e+07 4.380440e+07 9.797341e+07 274732700 3.772613 35.661358 8.492507e+08 有价格涨跌幅限制的日换手率达到30%的前五只证券
74 688667 菱电电控 2021-08-27 1家机构卖出,成功率49.69% 123.37 -18.8996 17.7701 -2.079877e+06 4.611216e+07 4.819204e+07 9.430420e+07 268503400 -0.774618 35.122163 1.461225e+09 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
>>> # 获取指定日期区间的龙虎榜数据
>>> start_date = '2021-08-20' # 开始日期
>>> end_date = '2021-08-27' # 结束日期
>>> ef.stock.get_daily_billboard(start_date = start_date,end_date = end_date)
股票代码 股票名称 上榜日期 解读 收盘价 涨跌幅 换手率 龙虎榜净买额 龙虎榜买入额 龙虎榜卖出额 龙虎榜成交额 市场总成交额 净买额占总成交比 成交额占总成交比 流通市值 上榜原因
0 000608 阳光股份 2021-08-27 卖一主卖,成功率48.36% 3.73 -9.9034 3.8430 -8.709942e+06 1.422786e+07 2.293780e+07 3.716565e+07 110838793 -7.858208 33.531268 2.796761e+09 日跌幅偏离值达到7%的前5只证券
1 000751 锌业股份 2021-08-27 主力做T,成功率18.84% 5.32 -2.9197 19.6505 -1.079219e+08 5.638899e+07 1.643109e+08 2.206999e+08 1462953973 -7.376984 15.085906 7.500502e+09 日振幅值达到15%的前5只证券
2 000762 西藏矿业 2021-08-27 北京资金买入,成功率39.42% 63.99 1.0741 15.6463 2.938758e+07 4.675541e+08 4.381665e+08 9.057206e+08 4959962598 0.592496 18.260633 3.332571e+10 日振幅值达到15%的前5只证券
3 000833 粤桂股份 2021-08-27 实力游资买入,成功率44.55% 8.87 10.0496 8.8263 4.993555e+07 1.292967e+08 7.936120e+07 2.086580e+08 895910429 5.573721 23.290046 3.353614e+09 连续三个交易日内,涨幅偏离值累计达到20%的证券
4 001208 华菱线缆 2021-08-27 1家机构买入,成功率40.43% 19.72 4.3386 46.1985 4.055258e+07 1.537821e+08 1.132295e+08 2.670117e+08 1203913048 3.368398 22.178651 2.634710e+09 日换手率达到20%的前5只证券
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
414 605580 恒盛能源 2021-08-20 买一主买,成功率33.33% 13.28 10.0249 0.4086 2.413149e+06 2.713051e+06 2.999022e+05 3.012953e+06 2713051 88.945937 111.054054 6.640000e+08 有价格涨跌幅限制的日收盘价格涨幅偏离值达到7%的前三只证券
415 688029 南微医学 2021-08-20 4家机构卖出,成功率55.82% 204.61 -18.5340 8.1809 -1.412053e+08 1.883342e+08 3.295394e+08 5.178736e+08 762045800 -18.529760 67.958326 9.001510e+09 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
416 688408 中信博 2021-08-20 4家机构卖出,成功率47.86% 179.98 -0.0666 15.3723 -4.336304e+07 3.750919e+08 4.184550e+08 7.935469e+08 846547400 -5.122340 93.739221 5.695886e+09 有价格涨跌幅限制的日价格振幅达到30%的前五只证券
417 688556 高测股份 2021-08-20 上海资金买入,成功率60.21% 51.97 17.0495 10.6452 -3.940045e+07 1.642095e+08 2.036099e+08 3.678194e+08 575411600 -6.847351 63.922831 5.739089e+09 有价格涨跌幅限制的日收盘价格涨幅达到15%的前五只证券
418 688636 智明达 2021-08-20 2家机构买入,成功率47.37% 161.90 15.8332 11.9578 2.922406e+07 6.598126e+07 3.675721e+07 1.027385e+08 188330100 15.517464 54.552336 1.647410e+09 有价格涨跌幅限制的日收盘价格涨幅达到15%的前五只证券
"""
today = datetime.today().date()
mode = 'auto'
if start_date is None:
start_date = today
if end_date is None:
end_date = today
if isinstance(start_date, str):
mode = 'user'
start_date = datetime.strptime(start_date, '%Y-%m-%d')
if isinstance(end_date, str):
mode = 'user'
end_date = datetime.strptime(end_date, '%Y-%m-%d')
fields = EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS
bar: tqdm = None
while 1:
dfs: List[pd.DataFrame] = []
page = 1
while 1:
params = (
('sortColumns', 'TRADE_DATE,SECURITY_CODE'),
('sortTypes', '-1,1'),
('pageSize', '500'),
('pageNumber', page),
('reportName', 'RPT_DAILYBILLBOARD_DETAILS'),
('columns', 'ALL'),
('source', 'WEB'),
('client', 'WEB'),
('filter',
f"(TRADE_DATE<='{end_date}')(TRADE_DATE>='{start_date}')"),
)
url = 'http://datacenter-web.eastmoney.com/api/data/v1/get'
response = session.get(url, params=params)
if bar is None:
pages = jsonpath(response.json(), '$..pages')
if pages and pages[0] != 1:
total = pages[0]
bar = tqdm(total=int(total))
if bar is not None:
bar.update()
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
page += 1
df = pd.DataFrame(items).rename(columns=fields)[fields.values()]
dfs.append(df)
if mode == 'user':
break
if len(dfs) == 0:
start_date = start_date-timedelta(1)
end_date = end_date-timedelta(1)
if len(dfs) > 0:
break
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, ignore_index=True)
df['上榜日期'] = df['上榜日期'].astype('str').apply(lambda x: x.split(' ')[0])
return df
def get_members(index_code: str) -> pd.DataFrame:
"""
获取指数成分股信息
Parameters
----------
index_code : str
指数名称或者指数代码
Returns
-------
DataFrame
指数成分股信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_members('000300')
指数代码 指数名称 股票代码 股票名称 股票权重
0 000300 沪深300 600519 贵州茅台 4.77
1 000300 沪深300 601398 工商银行 3.46
2 000300 沪深300 601939 建设银行 3.12
3 000300 沪深300 600036 招商银行 2.65
4 000300 沪深300 601857 中国石油 2.37
.. ... ... ... ... ...
295 000300 沪深300 688126 沪硅产业 NaN
296 000300 沪深300 688169 石头科技 NaN
297 000300 沪深300 688036 传音控股 NaN
298 000300 沪深300 688009 中国通号 NaN
299 000300 沪深300 688008 澜起科技 NaN
>>> ef.stock.get_members('中证白酒')
指数代码 指数名称 股票代码 股票名称 股票权重
0 399997 中证白酒 600519 贵州茅台 49.25
1 399997 中证白酒 000858 五粮液 18.88
2 399997 中证白酒 600809 山西汾酒 8.45
3 399997 中证白酒 000568 泸州老窖 7.03
4 399997 中证白酒 002304 洋河股份 5.72
5 399997 中证白酒 000596 古井贡酒 2.76
6 399997 中证白酒 000799 酒鬼酒 1.77
7 399997 中证白酒 600779 水井坊 1.36
8 399997 中证白酒 603369 今世缘 1.26
9 399997 中证白酒 603198 迎驾贡酒 0.89
10 399997 中证白酒 603589 口子窖 0.67
11 399997 中证白酒 000860 顺鑫农业 0.59
12 399997 中证白酒 600559 老白干酒 0.44
13 399997 中证白酒 603919 金徽酒 0.39
14 399997 中证白酒 600197 伊力特 0.28
15 399997 中证白酒 600199 金种子酒 0.26
"""
fields = {
'IndexCode': '指数代码',
'IndexName': '指数名称',
'StockCode': '股票代码',
'StockName': '股票名称',
'MARKETCAPPCT': '股票权重'
}
qs = search_quote(index_code, count=10)
df = pd.DataFrame(columns=fields.values())
if not qs:
return df
for q in qs:
if q.security_typeName == '指数':
params = (
('IndexCode', f'{q.code}'),
('pageIndex', '1'),
('pageSize', '10000'),
('deviceid', '1234567890'),
('version', '6.9.9'),
('product', 'EFund'),
('plat', 'Iphone'),
('ServerVersion', '6.9.9'),
)
url = 'https://fundztapi.eastmoney.com/FundSpecialApiNew/FundSpecialZSB30ZSCFG'
json_response = requests.get(
url,
params=params,
headers=EASTMONEY_REQUEST_HEADERS).json()
items = json_response['Datas']
# NOTE 这是为了跳过排在前面但无法获取成分股的指数 例如搜索 白酒 时排在前面的 980031
if not items:
continue
df: pd.DataFrame = pd.DataFrame(items).rename(
columns=fields)[fields.values()]
df['股票权重'] = pd.to_numeric(df['股票权重'], errors='coerce')
return df
return df
def get_latest_ipo_info() -> pd.DataFrame:
"""
获取企业 IPO 审核状态
Returns
-------
DataFrame
企业 IPO 审核状态
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_ipo_info()
发行人全称 审核状态 注册地 证监会行业 保荐机构 会计师事务所 更新日期 受理日期 拟上市地点
0 郑州众智科技股份有限公司 已问询 河南 电气机械和器材制造业 民生证券股份有限公司 信永中和会计师事务所(特殊普通合伙) 2021-10-09 00:00:00 2021-06-24 00:00:00 创业板
1 成都盛帮密封件股份有限公司 已问询 四川 橡胶和塑料制品业 国金证券股份有限公司 中审众环会计师事务所(特殊普通合伙) 2021-10-09 00:00:00 2020-12-08 00:00:00 创业板
2 恒勃控股股份有限公司 已问询 浙江 汽车制造业 中信建投证券股份有限公司 中汇会计师事务所(特殊普通合伙) 2021-10-08 00:00:00 2021-09-06 00:00:00 创业板
3 深圳英集芯科技股份有限公司 已问询 广东 计算机、通信和其他电子设备制造业 华泰联合证券有限责任公司 容诚会计师事务所(特殊普通合伙) 2021-10-08 00:00:00 2021-06-10 00:00:00 科创板
4 苏州长光华芯光电技术股份有限公司 上市委会议通过 江苏 计算机、通信和其他电子设备制造业 华泰联合证券有限责任公司 天衡会计师事务所(特殊普通合伙) 2021-10-08 00:00:00 2021-06-24 00:00:00 科创板
... ... ... .. ... ... ... ... ... ...
1376 澜起科技股份有限公司 注册生效 上海 计算机、通信和其他电子设备制造业 中信证券股份有限公司 瑞华会计师事务所(特殊普通合伙) 2019-06-26 00:00:00 2019-04-01 00:00:00 科创板
1377 浙江杭可科技股份有限公司 注册生效 浙江 专用设备制造业 国信证券股份有限公司 天健会计师事务所(特殊普通合伙) 2019-06-24 00:00:00 2019-04-15 00:00:00 科创板
1378 苏州天准科技股份有限公司 注册生效 江苏 专用设备制造业 海通证券股份有限公司 瑞华会计师事务所(特殊普通合伙) 2019-06-20 00:00:00 2019-04-02 00:00:00 科创板
1379 烟台睿创微纳技术股份有限公司 注册生效 山东 计算机、通信和其他电子设备制造业 中信证券股份有限公司 信永中和会计师事务所(特殊普通合伙) 2019-06-18 00:00:00 2019-03-22 00:00:00 科创板
1380 苏州华兴源创科技股份有限公司 注册生效 江苏 专用设备制造业 华泰联合证券有限责任公司 华普天健会计师事务所(特殊普通合伙) 2019-06-18 00:00:00 2019-03-27 00:00:00 科创板
"""
fields = {
# 'ORG_CODE':'发行人代码',
'ISSUER_NAME': '发行人全称',
'CHECK_STATUS': '审核状态',
'REG_ADDRESS': '注册地',
'CSRC_INDUSTRY': '证监会行业',
'RECOMMEND_ORG': '保荐机构',
'ACCOUNT_FIRM': '会计师事务所',
'UPDATE_DATE': '更新日期',
'ACCEPT_DATE': '受理日期',
'TOLIST_MARKET': '拟上市地点'
}
df = pd.DataFrame(columns=fields.values())
dfs: List[pd.DataFrame] = []
page = 1
while 1:
params = (
('st', 'UPDATE_DATE,SECURITY_CODE'),
('sr', '-1,-1'),
('ps', '500'),
('p', page),
('type', 'RPT_REGISTERED_INFO'),
('sty', 'ORG_CODE,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE'),
('token', '<KEY>'),
('client', 'WEB'),
)
url = 'http://datacenter-web.eastmoney.com/api/data/get'
json_response = requests.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
items = jsonpath(json_response, '$..data[:]')
if not items:
break
page += 1
df =
|
pd.DataFrame(items)
|
pandas.DataFrame
|
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize("func, values", [
("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}),
("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]})
])
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'],
'c_int': [1, 2, 3, 4],
'c_float': [4.02, 3.03, 2.04, 1.05],
'c_date': ['2019', '2018', '2016', '2017']})
df['c_date'] = pd.to_datetime(df['c_date'])
result = getattr(df.groupby('name'), func)()
expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
@pytest.mark.parametrize("func", [
'mean', 'var', 'std', 'cumprod', 'cumsum'
])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby('k').describe()
with pytest.raises(ValueError, match=msg):
df2.groupby('key').describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object():
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper():
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
Timestamp('2016-06-28 16:09:30'),
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']}).set_index('time')
result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
expected = test.groupby(
pd.Grouper(freq='h')
)['data'].apply(pd.Series.nunique)
tm.assert_series_equal(result, expected)
def test_nunique_preserves_column_level_names():
# GH 23222
test = pd.DataFrame([1, 2, 2],
columns=pd.Index(['A'], name="level_0"))
result = test.groupby([0, 0, 0]).nunique()
expected = pd.DataFrame([2], columns=test.columns)
tm.assert_frame_equal(result, expected)
# count
# --------------------------------
def test_groupby_timedelta_cython_count():
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, np.nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
tm.assert_series_equal(count_B, expected['B'])
def test_count_object():
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type():
# GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count():
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
# size
# --------------------------------
def test_size(df):
grouped = df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('A')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('B')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
tm.assert_series_equal(left, right, check_names=False)
# GH11699
df = DataFrame(columns=['A', 'B'])
out = Series(dtype='int64', index=Index([], name='A'))
tm.assert_series_equal(df.groupby('A').size(), out)
def test_size_groupby_all_null():
# GH23050
# Assert no 'Value Error : Length of passed values is 2, index implies 0'
df = DataFrame({'A': [None, None]}) # all-null groups
result = df.groupby('A').size()
expected = Series(dtype='int64', index=Index([], name='A'))
tm.assert_series_equal(result, expected)
# quantile
# --------------------------------
@pytest.mark.parametrize("interpolation", [
"linear", "lower", "higher", "nearest", "midpoint"])
@pytest.mark.parametrize("a_vals,b_vals", [
# Ints
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]),
([1, 2, 3, 4], [4, 3, 2, 1]),
([1, 2, 3, 4, 5], [4, 3, 2, 1]),
# Floats
([1., 2., 3., 4., 5.], [5., 4., 3., 2., 1.]),
# Missing data
([1., np.nan, 3., np.nan, 5.], [5., np.nan, 3., np.nan, 1.]),
([np.nan, 4., np.nan, 2., np.nan], [np.nan, 4., np.nan, 2., np.nan]),
# Timestamps
([x for x in pd.date_range('1/1/18', freq='D', periods=5)],
[x for x in pd.date_range('1/1/18', freq='D', periods=5)][::-1]),
# All NA
([np.nan] * 5, [np.nan] * 5),
])
@pytest.mark.parametrize('q', [0, .25, .5, .75, 1])
def test_quantile(interpolation, a_vals, b_vals, q):
if interpolation == 'nearest' and q == 0.5 and b_vals == [4, 3, 2, 1]:
pytest.skip("Unclear numpy expectation for nearest result with "
"equidistant data")
a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)
b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation)
df = DataFrame({
'key': ['a'] * len(a_vals) + ['b'] * len(b_vals),
'val': a_vals + b_vals})
expected = DataFrame([a_expected, b_expected], columns=['val'],
index=Index(['a', 'b'], name='key'))
result = df.groupby('key').quantile(q, interpolation=interpolation)
tm.assert_frame_equal(result, expected)
def test_quantile_raises():
df = pd.DataFrame([
['foo', 'a'], ['foo', 'b'], ['foo', 'c']], columns=['key', 'val'])
with pytest.raises(TypeError, match="cannot be performed against "
"'object' dtypes"):
df.groupby('key').quantile()
# pipe
# --------------------------------
def test_pipe():
# Test the pipe method of DataFrameGroupBy.
# Issue #17871
random_state = np.random.RandomState(1234567890)
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': random_state.randn(8),
'C': random_state.randn(8)})
def f(dfgb):
return dfgb.B.max() - dfgb.C.min().min()
def square(srs):
return srs ** 2
# Note that the transformations are
# GroupBy -> Series
# Series -> Series
# This then chains the GroupBy.pipe and the
# NDFrame.pipe methods
result = df.groupby('A').pipe(f).pipe(square)
index = Index(['bar', 'foo'], dtype='object', name='A')
expected = pd.Series([8.99110003361, 8.17516964785], name='B',
index=index)
tm.assert_series_equal(expected, result)
def test_pipe_args():
# Test passing args to the pipe method of DataFrameGroupBy.
# Issue #17871
df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'],
'x': [1.0, 2.0, 3.0, 2.0, 5.0],
'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]})
def f(dfgb, arg1):
return (dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
.groupby(dfgb.grouper))
def g(dfgb, arg2):
return dfgb.sum() / dfgb.sum().sum() + arg2
def h(df, arg3):
return df.x + df.y - arg3
result = (df
.groupby('group')
.pipe(f, 0)
.pipe(g, 10)
.pipe(h, 100))
# Assert the results here
index = pd.Index(['A', 'B', 'C'], name='group')
expected = pd.Series([-79.5160891089, -78.4839108911, -80],
index=index)
|
tm.assert_series_equal(expected, result)
|
pandas.util.testing.assert_series_equal
|
import os
import re
#from tqdm import tqdm
import numpy as np
import pandas as pd
import preprocessor as tp
pd.set_option('display.max_rows', None)
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 08:47:37 2020
@author: titou
"""
import numpy as np
import pandas as pd
from pathlib import Path
from pvtseg import features_3d as f3d
from pvtseg import data_preparation as dtp
from pvtseg import evaluation as ev
import os
import pickle
import shap
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
import matplotlib.pyplot as plt
def BuildDirectoryTree(path="My_data"):
""" Build the directory structure for the segmentation project.
Parameters
----------
path: string, optional
path to the project root
Returns
-------
path_s: Path
Path to the source directory
path_f: Path
Path to the features directory
path_r: Path
Path to the result directory
"""
if not(os.path.exists(path)):
Path(path).mkdir()
path_s = Path(path) / Path('Sources')
path_f = Path(path) / Path('Features')
path_r = Path(path) / Path('Results')
if not(os.path.exists(path_s)):
path_s.mkdir()
if not(os.path.exists(path_f)):
path_f.mkdir()
if not(os.path.exists(path_r)):
path_r.mkdir()
return path_s, path_f, path_r
def PointsSelection(annotations_file,
mask_file = "auto",
box_file_list = "auto",
vessel_prop = 0.5,
points = 2000,
):
"""
Build a set of points that can be balanced between classes and areas
to be used for training model.
The result of this methode goes in the BuildDataFrame points_set option
Parameters
----------
annotations_file : str or Path
Paths to the annotation file: 3d array where points
from each class i have a value of i and point without class 0.
The first class has to be the vessels annotations
(or the object of interest to be segmented).
mask_file : str, optional
path to the mask file. the default is "auto" for a mask covering all the image
box_file_list : list, otional
list of path to boxe_file, boxe in wich annotations have been performed.
the default is "auto" for a box covering all the image
vessel_prop : int, optional
Vessel proportions.
the default is 0.5
points_set : int, optional
total number of selected points.
Returns
-------
points_set: tuple
A tuple of 3 numpy 1d array, with indices of selected points.
to be passed to the BuildDataFrame methode.
"""
annotations = np.load(annotations_file)
if mask_file == "auto":
mask = np.ones(annotations.shape)
else:
mask = np.load(mask_file)
if box_file_list == "auto":
box_list = [np.ones(annotations.shape)]
else:
box_list = list(map(np.load, box_file_list))
# balancing and splitting datas for different values of vessel proportion
splited_datas = {}
for i, box in enumerate(box_list):
annot = annotations*box*mask
seeds = dtp.Seeds(annot)
balanced = dtp.Balance(seeds, vessel_prop,
points/len(box_list))
splited_datas["box: " + str(i)] = balanced
# merging
points_set = dtp.Merge([
splited_datas["box: " + str(i)]
for i in range(len(box_list))
])
return points_set
def MultiFilesPointSelection(annotations_file_list,
mask_file_list = "auto",
box_file_list_list = "auto",
vessel_prop = 0.5,
points = 2000):
"""
Performe the PointsSelection methode one multiple files.
Parameters
----------
annotations_file_list : list
list of paths to the annotation files: 3d array where points
from each class i have a value of i and point without class 0.
The first class has to be the vessels annotations
(or the object of interest to be segmented).
mask_file_list : list, optional
list of paths to the mask files. the default is "auto" for a list of
mask, each covering all the image. The default is "auto".
box_file_list_list : list, optional
list of list of path to boxe files. each box file is a mask on an area
for one image. For one image, with a list of box files,
the point selection will be balanced between all boxes of the list
If auta, the point selection won't be balanced.
The default is "auto".
vessel_prop : float, optional
proportion of vessel (class 1) in the . The default is 0.5.
point_num : int, optional
number of points to be selected in each annotation file
Returns
-------
points_set_list : list
A list of tuple of 3 numpy 1d array, with indices of selected points.
to be passed to the MultiFileBuildDataFrame methode.
"""
if mask_file_list == "auto":
mask_file_list=["auto" for i in range(len(annotations_file_list))]
if box_file_list_list == "auto":
box_file_list_list = ["auto" for i in range(len(annotations_file_list))]
points_set_list = [PointsSelection(annotations_file_list[i],
mask_file_list[i],
box_file_list_list[i],
vessel_prop = vessel_prop,
points=points
) for i in range(len(annotations_file_list))]
return points_set_list
def BuildDataFrame(path_featur_dir, annotations_file, points_set = None,
feature_list = None):
"""
Build a dataframe with, for each point, a row with features and label. If points
set is None, uses all annotations to build the dataframe, else, uses only points
in the points set.
Parameters
----------
path_featur_dir : str or Path
Path to featured_dir, in wich features as been computed.
annotations_file : str or Path
Path to annotation file.
points_set : list, optional
List of point sets. Result of DataPreparation methode. if None uses all annotated
points as a single set. The default is None
feature_list : list, optional
If None, all files in the featur_dir are loaded as features, else
only files in feature list are loaded.
The default is None
Returns
-------
data: dataframe
dataframe with, for each point, a row with features and label.
"""
annotations = np.load(annotations_file)
if points_set is None:
points_set = np.nonzero(annotations)
if feature_list is None:
feature_list = [feat.split(".npy")[0]
for feat in os.listdir(path_featur_dir)]
data = f3d.LoadFeaturesDir(path_featur_dir, points_set, feature_list)
data["Label"] = annotations[points_set]
return data
def MultiFilesBuildDataFrame(path_featur_dir_list, annotations_file_list,
points_set_list = None, feature_list = None):
"""
Build a dataframe with, for each point, a row with features and label. If points
sets is None, uses all annotations to build the dataframe, else, uses only points
in points sets.
Parameters
----------
path_featur_dir_list : list
List of path to feature folder, in wich features as been computed for
a 3d image.
annotations_file_list : str or Path
list of path to annotation files for each image.
point_set : list, optional
List of point sets. Result of MultiFilesDataPreparation methode.
if None uses all annotated points as a single set for each image.
The default is None
feature_list : list, optional
If None, feature list is set on all the feature names in the first
feature folder, else only files in feature list are loaded from each
feature files.
The default is None
Returns
-------
data: dataframe
dataframe with, for each point, a row with features and label.
"""
if points_set_list is None:
points_set_list = [None for i in range(len(path_featur_dir_list))]
if feature_list is None:
feature_list = [feat.split(".npy")[0]
for feat in os.listdir(path_featur_dir_list[0])]
data_list = [BuildDataFrame(path_featur_dir_list[i],
annotations_file_list[i],
points_set_list[i],
feature_list)
for i in range(len(path_featur_dir_list))]
data = pd.concat(data_list, axis = 0, ignore_index = True)
return data
def DFToXY(dataframe):
"""
Divide a dataframe producted by BuildDataFrames methode between features
and labels
Parameters
----------
dataframe : pandas DataFrame
dataframe with features and labels
Returns
-------
x : pandas DataFrame
dataframe of features.
y : pandas Series
labels
"""
y = dataframe["Label"]
x = dataframe.drop("Label", axis = 1)
return x, y
def Optimise(data, optimiser = None):
"""
Find the best hyperparameters for a random forest classifier model through
an sklearn optimiser.
Parameters
----------
cross_validation_dataframes : Dataframe
the cross_validation set
optimiser : optimiser object, default = None
a RandomizedSearchCV or GridSearchCV object to find best
hyperparameters. if None, a default
optimiser will be constructed instead
the default is None
Returns
-------
optimiser : optimiser object
the optimiser fited to cross validations datas.
"""
x, y = DFToXY(data)
y[y!=1] = 2 #we can only use 2 class in this optimiser, so vessel and not vessel
if optimiser is None:
param_grid = {
"criterion": ["gini", "entropy"],
'max_depth': [5, 10, 20, 50, 100],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10],
'n_estimators': [20, 50, 100, 250, 500]
}
classifier = RandomForestClassifier(n_jobs=6)
optimiser = RandomizedSearchCV(estimator = classifier,
param_distributions = param_grid,
n_iter = 100, cv = 5, verbose=2,
scoring = "roc_auc",
random_state=42, n_jobs = 6)
optimiser.fit(x, y)
return optimiser
def BuildModel(data, hyperparameters = None, shuffle=False):
"""
From a dataframes, build a segmentation model
Parameters
----------
data : dataframe
Dataframe with for each point, a row with features and label.
hyperparameters : dict
Sklearn random forest classifier parameters. The default is None.
If None, those parameter are going to be used:
- "n_estimators": 100,
- "criterion":'gini',
- "max_depth": None,
- "min_samples_split": 2,
- "min_samples_leaf": 1,
- "max_features": 'auto'
cv: int, optional
Number of folds for the cross-validation. The default is 5
shuffle: boolean, optional
If the dataframe has to be shuffuled. The default is False
Returns
-------
model : dict
Output of BuildModel methode.
A dictionary with:
- "model" an sklearn trained random forest classifier.
- "features" the list of the model features.
"""
data = data[sorted(data.columns)]
if shuffle:
data = data.sample(frac=1).reset_index(drop=True)
if hyperparameters is None:
hyperparameters = {"n_estimators": 100, "criterion":
'gini', "max_depth": None,
"min_samples_split": 2,
"min_samples_leaf": 1,
"max_features": 'auto'}
forest = RandomForestClassifier(
n_estimators = hyperparameters["n_estimators"],
max_depth = hyperparameters["max_depth"],
max_features = hyperparameters["max_features"],
min_samples_leaf = hyperparameters["min_samples_leaf"],
min_samples_split = hyperparameters["min_samples_split"],
criterion = hyperparameters["criterion"],
n_jobs=6
)
x_train, y_train = DFToXY(data)
forest.fit(x_train, y_train)
model = {"model": forest, "features": x_train.columns}
return model
def CrossValidation(data, hyperparameters = None, cv=5, shuffle=False):
"""
From a dataframes, performe n-fold cross validation (default is 5)
Parameters
----------
data : dataframe
dataframe with, for each point, a row with features and label.
hyperparameters : dict
best param to optimise forest.
cv: int, optional
number of folds for the cross-validation. The default is 5
shuffle: boolean, optional
if the dataframe has to be shuffuled.
Returns
-------
results_dict : dict
a dictionary with results all cross validation experiments,
to be treated with Summarise.
"""
if shuffle:
data = data.sample(frac=1).reset_index(drop=True)
set_size = len(data)/cv
cross_validation_dataframes = [data.iloc[int(i*set_size):int((i+1)*set_size),:] for i
in range(cv)]
results_dict = { "models": [], "test" : {}, "train" : {},
"features": {"names":[]}}
set_num = len(cross_validation_dataframes)
results_dict["features"]["names"] = list(cross_validation_dataframes[0].columns)
results_dict["features"]["names"].remove('Label')
if hyperparameters is None:
hyperparameters = {"n_estimators": 100, "criterion":
'gini', "max_depth": None,
"min_samples_split": 2,
"min_samples_leaf": 1,
"max_features": 'auto'}
for i in range(set_num):
forest = RandomForestClassifier(
n_estimators = hyperparameters["n_estimators"],
max_depth = hyperparameters["max_depth"],
max_features = hyperparameters["max_features"],
min_samples_leaf = hyperparameters["min_samples_leaf"],
min_samples_split = hyperparameters["min_samples_split"],
criterion = hyperparameters["criterion"],
n_jobs=6
)
results_dict["test"][str(i)] = {}
test = cross_validation_dataframes.pop(0)
train = pd.concat(cross_validation_dataframes, axis = 0,
ignore_index = True)
x_test, y_test = DFToXY(test)
x_train, y_train = DFToXY(train)
model = forest.fit(x_train, y_train)
results_dict["models"].append({"model": model,
"features": x_train.columns})
pred_test = model.predict_proba(x_test)[:,0]
pred_train = model.predict_proba(x_train)[:,0]
eval_test = ev.Eval(y_test, pred_test, n_threshold = 50)
eval_train = ev.Eval(y_train, pred_train, n_threshold = 50)
metrics, curvs = ev.SummarisedMetrics(eval_test)
metrics_train, curvs_train = ev.SummarisedMetrics(eval_train)
results_dict["test"][str(i)] = {"metrics": metrics,
"curvs": curvs}
results_dict["train"][str(i)] = {"metrics": metrics_train,
"curvs": curvs_train}
cross_validation_dataframes.append(test)
return results_dict
def Summarise(result_dict, display=False):
"""
From the results of the CrossValiation methode, build an human readable
summary
Parameters
----------
result_dict : dict
the dict from cross_validation data.
display : bool, optional
if true, display the ROC curv of each test
the default is False
Returns
-------
nice_dataframes : dict
dict of human readable and interpretable dataframes, to analyse cross
validation results
"""
nice_dataframes = {}
if "features" in list(result_dict.keys()):
feat_dataframe = pd.DataFrame()
oob_ranking = pd.DataFrame()
for i in range(len(result_dict["models"])):
oob_ranking[str(i)] = np.argsort(
result_dict["models"][i]["model"].feature_importances_
)
oob_ranking.index=result_dict["features"]["names"]
feat_dataframe["oob mean rank"] = oob_ranking.mean(axis = 1)
feat_dataframe["oob rank std"] = oob_ranking.std(axis = 1)
feat_dataframe = feat_dataframe.T
feat_dataframe = feat_dataframe.sort_values(by = "oob mean rank"
, axis = 1)
nice_dataframes["feat_values"] = feat_dataframe
metric_dataframe_test = pd.DataFrame()
metric_dataframe_train =
|
pd.DataFrame()
|
pandas.DataFrame
|
from collections import defaultdict
import copy
import json
import numpy as np
import pandas as pd
import pickle
import scipy
import seaborn as sb
import torch
from allennlp.common.util import prepare_environment, Params
from matplotlib import pyplot as plt
from pytorch_pretrained_bert import BertTokenizer, BertModel
from scipy.stats import entropy
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import accuracy_score, mean_squared_error
from probing.globals import *
from probing.helpers import _reg_r2
from probing.tasks import ProbingTask
class Analytics:
def __init__(self, workspace):
self.directories = {d: os.path.join(workspace, d) for d in ["out", "tasks", "datasets", "configs"]}
self.scalar_mixes = None
self.tokenizer = None
self.embedder = None
# === Data statistics
def task_statistics(self):
data = []
for task_id in sorted(os.listdir(self.directories["tasks"])):
config = ProbingTask.parse_id(task_id)
stats = json.load(open(os.path.join(self.directories["tasks"], task_id, "_stats.json")))
for split in stats:
c = copy.deepcopy(config)
c["sentences"] = stats[split]["total_sentences"]
c["instances"] = stats[split]["total_instances"]
c["labels"] = stats[split]["total_labels"]
c["split"] = split
data += [c]
return pd.DataFrame(data)
def dataset_statistics(self):
def _collect_stats(sentences):
num_tokens = 0
num_sentences = 0
num_predications = 0
roles_all = 0
roles_core = 0
for s, pp in sentences:
num_tokens += len(s.tokens())
num_sentences += 1
num_predications += len(pp)
for p in pp:
roles_all += len([a for a in p.arguments])
roles_core += len([a for a in p.arguments if not p.arguments[a]["pb"].startswith("AM")])
return {"tokens": num_tokens, "sentences": num_sentences, "predicates": num_predications,
"roles_all": roles_all, "roles_core": roles_core}
rows = []
for ds in os.listdir(self.directories["datasets"]):
ds = pickle.load(open(os.path.join(self.directories["datasets"], ds), "rb"))
for split in ds:
stats = _collect_stats(ds[split].values())
stats["split"] = split
stats["dataset"] = ds.name
rows += [stats]
df = pd.DataFrame(rows)
return df
# === Scalar mix analysis
def get_mixes(self):
if self.scalar_mixes is None:
self.scalar_mixes = self._parse_scalar_mixes()
return self.scalar_mixes
@staticmethod
# Extract a single scalar mix set by layer
def _parse_scalar_mix(th, kind, softmax=True):
mix_map = {"common": "bert_embedder._scalar_mix.scalar_parameters",
"src": "bert_embedder._scalar_mix_1.scalar_parameters",
"tgt": "bert_embedder._scalar_mix_2.scalar_parameters"}
device = torch.device('cpu')
data = torch.load(os.path.join(th), map_location=device)
layers = []
for layer in range(12): # FIXME num layers to global
layers += [data[f"{mix_map[kind]}.{layer}"].item()]
return kind, scipy.special.softmax(np.array(layers)) if softmax else np.array(layers)
@staticmethod
def center_of_gravity(x):
return sum(l * x[l] for l in range(len(x)))
def _parse_scalar_mixes(self):
data = []
for exp_id in os.listdir(self.directories["out"]):
config = ProbingTask.parse_id(exp_id)
task_name = config["name"]
try:
if config["ttype"] == "unary":
mix = [self._parse_scalar_mix(os.path.join(self.directories["out"], exp_id, "best.th"), "common")]
else:
mix = [self._parse_scalar_mix(os.path.join(self.directories["out"], exp_id, "best.th"), m) for m in ["src", "tgt"]]
for kind, m in mix:
task_mix_name = task_name
# prepend regression tasks with *
if config["mtype"] == "reg":
task_mix_name = "*"+task_mix_name
# add src-tgt mix distinction
if kind != "common":
task_mix_name += " " + kind
for layer in range(12):
c = copy.deepcopy(config)
c["name"] = task_mix_name
c["layer"] = layer
c["weight"] = m[layer]
data += [c]
except Exception:
print(f"No best weights for {exp_id} (yet?). Skipping.")
return pd.DataFrame(data)
def plot_scalar_mix_by_task(self, lang, task_order=None, cbar_max=None, show_cbar=True, ax=None):
mix_df = self.get_mixes()
pvt = mix_df[mix_df["language"] == lang].copy().pivot("name", "layer", "weight")
cog = {name: self.center_of_gravity(pvt.loc[name]) for name in pvt.index}
if task_order is None: # if no task order for display, order by center of gravity
pvt = pvt.reindex([x[0] for x in sorted(cog.items(), key=lambda y: y[1])])
else:
pvt = pvt.reindex(task_order)
# set maximum value for heatmap
if cbar_max is None:
cbar_max = pvt.values.max()
ax = sb.heatmap(pvt, cmap="Oranges", vmin=0.0, vmax=cbar_max,
cbar=show_cbar, square=False,
xticklabels=[],
yticklabels=[ix + f" [{round(cog[ix], 2)}]" for ix in pvt.index], ax=ax)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
ax.set_ylabel("")
ax.set_xlabel(r'Layer $\rightarrow$')
ax.set_title(lang)
def plot_anchor_task_map(self, lang, target_tasks, anchor_tasks=None, ax=None, show_cbar=False):
mix_df = self.get_mixes()
pvt = mix_df[mix_df["language"] == lang].copy().pivot("name", "layer", "weight")
if anchor_tasks is None:
anchor_tasks = [a for a in mix_df["name"].unique() if a not in target_tasks]
kl_div =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS, Predictive
import jax
import jax.numpy as np
from jax.random import PRNGKey
import numpy as onp
import pandas as pd
import matplotlib.pyplot as plt
from covid.compartment import SEIRDModel
import pdb
'''Utility to define access method for time varying fields'''
def getter(f):
def get(self, samples, forecast=False):
return samples[f + '_future'] if forecast else self.combine_samples(samples, f)
return get
"""
************************************************************
Base class for models
************************************************************
"""
class Model():
names = {
'S': 'susceptible',
'I': 'infectious',
'R': 'removed',
'E': 'exposed',
'H': 'hospitalized',
'D': 'dead',
'C': 'cumulative infected',
'y': 'confirmed',
'z': 'deaths',
'dy': 'daily confirmed',
'dz': 'daily deaths',
'mean_dy': 'daily confirmed (mean)',
'mean_dz': 'daily deaths (mean)'
}
def __init__(self, data=None, mcmc_samples=None, **args):
self.mcmc_samples = mcmc_samples
self.data = data
self.args = args
@property
def obs(self):
'''Provide extra arguments for observations
Used during inference and forecasting
'''
return {}
"""
***************************************
Inference and sampling routines
***************************************
"""
def infer(self, num_warmup=1000, num_samples=1000, num_chains=1, rng_key=PRNGKey(1), **args):
'''Fit using MCMC'''
args = dict(self.args, **args)
kernel = NUTS(self, init_strategy = numpyro.infer.initialization.init_to_median())
mcmc = MCMC(kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=num_chains)
mcmc.run(rng_key, **self.obs, **args)
mcmc.print_summary()
self.mcmc = mcmc
self.mcmc_samples = mcmc.get_samples()
return self.mcmc_samples
def prior(self, num_samples=1000, rng_key=PRNGKey(2), **args):
'''Draw samples from prior'''
predictive = Predictive(self, posterior_samples={}, num_samples=num_samples)
args = dict(self.args, **args) # passed args take precedence
self.prior_samples = predictive(rng_key, **args)
return self.prior_samples
def predictive(self, rng_key=PRNGKey(3), **args):
'''Draw samples from in-sample predictive distribution'''
if self.mcmc_samples is None:
raise RuntimeError("run inference first")
predictive = Predictive(self, posterior_samples=self.mcmc_samples)
args = dict(self.args, **args)
return predictive(rng_key, **args)
def forecast(self, num_samples=1000, rng_key=PRNGKey(4), **args):
'''Draw samples from forecast predictive distribution'''
if self.mcmc_samples is None:
raise RuntimeError("run inference first")
predictive = Predictive(self, posterior_samples=self.mcmc_samples)
args = dict(self.args, **args)
return predictive(rng_key, **self.obs, **args)
def resample(self, low=0, high=90, rw_use_last=1, **kwargs):
'''Resample MCMC samples by growth rate'''
# TODO: hard-coded for SEIRDModel. Would also
# work for SEIR, but not SIR
beta = self.mcmc_samples['beta']
gamma = self.mcmc_samples['gamma']
sigma = self.mcmc_samples['sigma']
beta_end = beta[:,-rw_use_last:].mean(axis=1)
growth_rate = SEIRDModel.growth_rate((beta_end, sigma, gamma))
low = int(low/100 * len(growth_rate))
high = int(high/100 * len(growth_rate))
sorted_inds = onp.argsort(growth_rate)
selection = onp.random.randint(low, high, size=(1000))
inds = sorted_inds[selection]
new_samples = {k: v[inds, ...] for k, v in self.mcmc_samples.items()}
self.mcmc_samples = new_samples
return new_samples
"""
***************************************
Data access and plotting
***************************************
"""
def combine_samples(self, samples, f, use_future=False):
'''Combine fields like x0, x, x_future into a single array'''
f0, f_future = f + '0', f + '_future'
data = np.concatenate((samples[f0][:,None], samples[f]), axis=1)
if f_future in samples and use_future:
data = np.concatenate((data, samples[f_future]), axis=1)
return data
def get(self, samples, c, **kwargs):
forecast = kwargs.get('forecast', False)
if c in self.compartments:
x = samples['x_future'] if forecast else self.combine_samples(samples, 'x')
j = self.compartments.index(c)
return x[:,:,j]
else:
return getattr(self, c)(samples, **kwargs) # call method named c
def horizon(self, samples, **kwargs):
'''Get time horizon'''
y = self.y(samples, **kwargs)
return y.shape[1]
'''These are methods e.g., call self.z(samples) to get z'''
#z = getter('z')
#y = getter('y')
mean_y = getter('mean_y')
mean_z = getter('mean_z')
z = mean_z
y = mean_y
# There are only available in some models but easier to define here
dz = getter('dz')
dy = getter('dy')
mean_dy = getter('mean_dy')
mean_dz = getter('mean_dz')
def plot_samples(self,
samples,
plot_fields=['y'],
start='2020-03-04',
T=None,
ax=None,
legend=True,
forecast=False,
n_samples=0,
intervals=[50, 80, 95]):
'''
Plotting method for SIR-type models.
'''
ax = plt.axes(ax)
T_data = self.horizon(samples, forecast=forecast)
T = T_data if T is None else min(T, T_data)
fields = {f: 0.0 + self.get(samples, f, forecast=forecast)[:,:T] for f in plot_fields}
names = {f: self.names[f] for f in plot_fields}
medians = {names[f]: onp.median(v, axis=0) for f, v in fields.items()}
t = pd.date_range(start=start, periods=T, freq='D')
ax.set_prop_cycle(None)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Plot medians
df = pd.DataFrame(index=t, data=medians)
df.plot(ax=ax, legend=legend)
median_max = df.max().values
# Plot samples if requested
if n_samples > 0:
for i, f in enumerate(fields):
df = pd.DataFrame(index=t, data=fields[f][:n_samples,:].T)
df.plot(ax=ax, legend=False, alpha=0.1)
# Plot prediction intervals
pi_max = 10
handles = []
for interval in intervals:
low=(100.-interval)/2
high=100.-low
pred_intervals = {names[f]: onp.percentile(v, (low, high), axis=0) for f, v in fields.items()}
for i, pi in enumerate(pred_intervals.values()):
h = ax.fill_between(t, pi[0,:], pi[1,:], alpha=0.1, color=colors[i], label=interval)
handles.append(h)
pi_max = onp.maximum(pi_max, onp.nanmax(pi[1,:]))
return median_max, pi_max
# def get_medians(self,
# samples,
# plot_fields=['y'],
# start='2020-03-04',
# T=None,
# legend=True,
# forecast=False,
# n_samples=0,
# intervals=[50, 80, 95]):
# T_data = self.horizon(samples, forecast=forecast)
# T = T_data if T is None else min(T, T_data)
# fields = {f: 0.0 + self.get(samples, f, forecast=forecast)[:,:T] for f in plot_fields}
# names = {f: self.names[f] for f in plot_fields}
# medians = {names[f]: onp.median(v, axis=0) for f, v in fields.items()}
# return medians
def plot_forecast(self,
variable,
post_pred_samples,
forecast_samples=None,
start='2020-03-04',
T_future=7*4,
ax=None,
obs=None,
scale='lin',
**kwargs):
ax = plt.axes(ax)
# Plot posterior predictive for observed times
median_max1, pi_max1 = self.plot_samples(post_pred_samples, ax=ax, start=start, plot_fields=[variable])
# Plot forecast
T = self.horizon(post_pred_samples)
obs_end = pd.to_datetime(start) + pd.Timedelta(T-1, "d")
forecast_start = obs_end +
|
pd.Timedelta("1d")
|
pandas.Timedelta
|
import os
import io
import locale
import datetime
import numpy as np
import pandas as pd
import requests
import urllib.parse
import urllib.request as ur
from bs4 import BeautifulSoup
from zipfile import ZipFile
import xml.etree.ElementTree as ET
def get_sectors():
url = 'http://bvmf.bmfbovespa.com.br/cias-listadas/empresas-listadas/' + \
'BuscaEmpresaListada.aspx?opcao=1&indiceAba=1&Idioma=pt-br'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
url = soup.find("a", string="Download").get('href')
# Unzip
filehandle, _ = ur.urlretrieve(url)
with ZipFile(filehandle, 'r') as zf:
fn = zf.namelist()[0]
df = pd.read_excel(
io.BytesIO(zf.read(fn)),
skiprows=7, skipfooter=18,
names=['SETOR', 'SUBSETOR', 'NM_PREGAO', 'BTICKER', 'CD_GOVERN']
)
df['SEGMENTO'] = np.where(
df['BTICKER'].isnull(), df['NM_PREGAO'], np.NaN)
for col in ['SETOR', 'SUBSETOR', 'SEGMENTO']:
df[col] = df[col].fillna(method='ffill')
df['CD_GOVERN'] = df['CD_GOVERN'].fillna('')
df = df.dropna(subset=['BTICKER'])
df = df[df['BTICKER'] != 'CÓDIGO']
df = df[df['SUBSETOR'] != 'SUBSETOR']
df['NM_PREGAO'] = df['NM_PREGAO'].str.strip()
df = df.reset_index(drop=True)
return df[['BTICKER', 'NM_PREGAO', 'SETOR', 'SUBSETOR', 'SEGMENTO', 'CD_GOVERN']]
def get_listed_codes():
url = 'http://bvmf.bmfbovespa.com.br/cias-listadas/empresas-listadas/' + \
'BuscaEmpresaListada.aspx?idioma=pt-br'
r = requests.post(url, {'__EVENTTARGET': 'ctl00:contentPlaceHolderConteudo:BuscaNomeEmpresa1:btnTodas'})
soup = BeautifulSoup(r.text, 'html.parser')
anchors = soup.find_all('a')
df = pd.DataFrame({
'CD_CVM': [a['href'] for a in anchors],
'NM_PREGAO': [a.text for a in anchors]
})
df['NM_PREGAO'] = df['NM_PREGAO'].str.strip()
df = df[df['CD_CVM'].str[:28] == 'ResumoEmpresaPrincipal.aspx?']
df['CD_CVM'] = df['CD_CVM'].str.split('=', expand=True).loc[:,1]
df = df[df.index % 2 == 0].reset_index(drop=True)
return df
def get_index_composition(index_name='IBRA'):
url = 'http://bvmf.bmfbovespa.com.br/indices/ResumoCarteiraTeorica.aspx?' + \
f'Indice={index_name}'
acoes = pd.read_html(url)[0]
acoes.columns = ['TICKER', 'NM_PREGAO', 'TIPO', 'QTDE', 'PESO']
acoes['NM_PREGAO'] = acoes['NM_PREGAO'].str.strip()
acoes['PESO'] = acoes['PESO'] / 1000
acoes['TIPO'] = acoes['TIPO'].str.split(' ', expand=True).loc[:,0]
acoes = acoes[acoes['PESO'] != 100]
acoes = acoes.sort_values('PESO', ascending=False)
return acoes.reset_index(drop=True)
def get_num_shares():
numshares = pd.read_html(
'http://bvmf.bmfbovespa.com.br/CapitalSocial/',
decimal=',', thousands='.')[0]
numshares = numshares[['Código', 'Qtde Ações Ordinárias',
'Qtde Ações Preferenciais']]
numshares.columns = ['BTICKER', 'QTDE_ON', 'QTDE_PN']
numshares = numshares.groupby('BTICKER').sum().reset_index()
return numshares.reset_index(drop=True)
def cache_data(fn, fun, *args, **kwargs):
if not os.path.isdir('cache'):
os.mkdir('cache')
fn = os.path.join('cache', fn)
if os.path.exists(fn):
print(f'{fn} exists, using cached version')
return pd.read_csv(fn)
else:
print(f'{fn} does not exist, creating file')
df = fun(*args, **kwargs)
df.to_csv(fn, index=False)
return df
def get_companies():
codigos = cache_data('codigos.csv', get_listed_codes)
setores = cache_data('setores.csv', get_sectors)
numshares = cache_data('num_shares.csv', get_num_shares)
df = pd.merge(codigos, setores, on='NM_PREGAO', how='inner')
df = df.merge(numshares, on='BTICKER')
return df.reset_index(drop=True)
def get_cvm_zip(year, doc_type, accounts=None, companies=None, rmzero=True):
#
fn = f'{doc_type.lower()}_cia_aberta_{year}'
print('Downloading ' + fn)
url = 'http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/'
if doc_type.lower() != 'itr':
url = url + 'DFP/'
url = url + doc_type.upper() + '/DADOS/' + fn + '.zip'
#
filehandle, _ = ur.urlretrieve(url)
with ZipFile(filehandle, 'r') as zf:
flist = zf.namelist()
flist = [f for f in flist if 'con' in f]
if fn + '.csv' in flist:
flist.remove(fn + '.csv')
df = pd.concat([
pd.read_csv(io.BytesIO(zf.read(fn)), delimiter=';',
encoding='latin1')
for fn in flist
])
#
if companies is not None:
df = df[df['CD_CVM'].isin(companies)]
if accounts is not None:
df = df[df['CD_CONTA'].isin(accounts)]
if rmzero:
df = df[df['VL_CONTA'] != 0]
#
df['VL_CONTA'] = df['VL_CONTA'] * 10 ** \
np.where(df['ESCALA_MOEDA'] == 'UNIDADE', 1, 3)
#
cols = df.columns
cols = cols[cols.isin(['DT_REFER', 'VERSAO', 'CD_CVM',
'DT_INI_EXERC', 'DT_FIM_EXERC', 'CD_CONTA',
'DS_CONTA', 'VL_CONTA', 'COLUNA_DF'])]
return df[cols].reset_index(drop=True)
def get_cvm_all(years, doc_types=['dre', 'bpa', 'bpp'],
accounts=None, companies=None):
doc_types.append('itr')
df = (
pd.concat([
get_cvm_zip(year, doc_type, accounts, companies)
for doc_type in doc_types
for year in years
], ignore_index=True)
.sort_values(['CD_CVM', 'CD_CONTA', 'DT_FIM_EXERC', 'DT_REFER',
'VERSAO'])
.drop_duplicates(['CD_CVM', 'CD_CONTA', 'DT_FIM_EXERC'], keep='last')
.assign(VL_CONTA=lambda x: x['VL_CONTA'] / 1000000)
.rename(columns={'VL_CONTA': 'VL_CONTA_YTD'})
.reset_index(drop=True)
)
df['VL_CONTA'] = np.where(
df['CD_CONTA'].str[:1].isin(['1', '2']),
df['VL_CONTA_YTD'],
df['VL_CONTA_YTD'] -
(df.groupby(['CD_CVM', 'CD_CONTA', 'DT_INI_EXERC'])['VL_CONTA_YTD']
.shift(fill_value=0))
)
return df
def get_quotes(tickers):
url = 'http://bvmf.bmfbovespa.com.br/cotacoes2000/' + \
'FormConsultaCotacoes.asp?strListaCodigos=' + '|'.join(tickers)
page = requests.get(url)
xml = ET.fromstring(page.text)
df = pd.DataFrame([p.attrib for p in xml.findall('Papel')])
df = df[['Codigo', 'Data', 'Ultimo']]
df.columns = ['ticker', 'data', 'cotacao']
df['cotacao'] = pd.to_numeric(df['cotacao'].str.replace(',','.'))
return df
def get_mktcap():
url = "http://www.b3.com.br/pt_br/market-data-e-indices/" + \
"servicos-de-dados/market-data/consultas/mercado-a-vista/" + \
"valor-de-mercado-das-empresas-listadas/bolsa-de-valores/"
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
url = soup.find("a", string="Histórico diário").get('href')
url = "http://www.b3.com.br/" + url.replace('../', '')
df = (
pd.read_excel(url, skiprows=7, skipfooter=5)
.dropna(axis=1, how="all")
.rename(columns={"Empresa": "NM_PREGAO", "R$ (Mil)": "MarketCap"})
.assign(
MarketCap=lambda x: x['MarketCap'] / 1000,
NM_PREGAO=lambda x: x['NM_PREGAO'].str.strip()
)
[["NM_PREGAO", "MarketCap"]]
)
return df
def get_pib():
url = "https://sidra.ibge.gov.br/geratabela?format=us.csv&" + \
"name=tabela6613.csv&terr=N&rank=-&query=" + \
"t/6613/n1/all/v/all/p/all/c11255/90687,90691,90696,90707/" + \
"d/v9319%202/l/t,v%2Bc11255,p"
df = (
pd.read_csv(url, skiprows=5, skipfooter=11,
names=['DT_FIM_EXERC', 'PIB_AGRO', 'PIB_IND', 'PIB_SERV', 'PIB'])
.assign(
DT_FIM_EXERC=lambda x: pd.to_datetime({
'year': x['DT_FIM_EXERC'].str[-4:],
'month': x['DT_FIM_EXERC'].str[0].astype(int) * 3,
'day': 1
})
)
.set_index('DT_FIM_EXERC')
.resample('Q').last()
.reset_index()
)
return df
def get_ipca():
locale.setlocale(locale.LC_ALL,'pt_BR.UTF-8')
url = "https://sidra.ibge.gov.br/geratabela?format=us.csv&" + \
"name=tabela1737.csv&terr=N&rank=-&query=" + \
"t/1737/n1/all/v/2266/p/all/d/v2266%2013/l/t%2Bv,,p"
df = (
pd.read_csv(url, skiprows=4, skipfooter=13,
names=['DT_FIM_EXERC', 'IPCA'])
.assign(
IPCA=lambda x: x['IPCA'] / x['IPCA'].iloc[-1],
DT_FIM_EXERC=lambda x: pd.to_datetime(
x['DT_FIM_EXERC'],
format="%B %Y"
)
)
.set_index('DT_FIM_EXERC')
.resample('Q').last()
.reset_index()
)
return df
def bcb_sgs(beg_date, end_date, **kwargs):
return pd.concat([
pd.read_json(f"http://api.bcb.gov.br/dados/serie/bcdata.sgs.{v}" +
f"/dados?formato=json&dataInicial={beg_date}&" +
f"dataFinal={end_date}",
convert_dates=False)
.assign(DT_FIM_EXERC=lambda x: pd.to_datetime(x.data, dayfirst=True))
.set_index('DT_FIM_EXERC')
.rename(columns={'valor': k}) for k, v in kwargs.items()
], axis=1)
def get_usd():
df = (
bcb_sgs('01/01/1996', '31/03/2020', USD=3697)
.resample('Q')
['USD']
.agg(['last', 'mean'])
.reset_index()
.rename(columns={
'last': 'USD_EOP',
'mean': 'USD_AVG'
})
)
return df
def last_friday():
current_time = datetime.datetime.now()
return str(
current_time.date()
- datetime.timedelta(days=current_time.weekday())
+ datetime.timedelta(days=4, weeks=-1))
def get_focus_quarterly(column='PIB Total', date=None):
if date is None:
date = last_friday()
url = \
"https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/" + \
"odata/ExpectativasMercadoTrimestrais?$top=100&$format=text/csv" + \
"&$filter=Indicador%20eq%20'" + urllib.parse.quote(column) + \
"'%20and%20Data%20eq%20'" + date + "'"
dfq = (
pd.read_csv(url, decimal=',')
[['DataReferencia', 'Media', 'Mediana', 'Minimo', 'Maximo']]
.assign(
DataReferencia=lambda x: pd.to_datetime({
'year': x['DataReferencia'].str[-4:],
'month': x['DataReferencia'].str[0].astype(int) * 3,
'day': 1
})
)
.set_index("DataReferencia")
.resample('Q').last()
)
return dfq
def get_focus_monthly(column="IPCA", date=None):
if date is None:
date = last_friday()
url = \
"https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/" + \
"odata/ExpectativaMercadoMensais?$top=100&$format=text/csv" + \
"&$filter=(Indicador%20eq%20'" + urllib.parse.quote(column) + \
"')%20and%20" + \
"Data%20eq%20'" + date + "'%20and%20baseCalculo%20eq%200"
dfm = (
pd.read_csv(url, decimal=',')
[['DataReferencia', 'Media', 'Mediana', 'Minimo', 'Maximo']]
.assign(
DataReferencia=lambda x: pd.to_datetime({
'year': x['DataReferencia'].str[-4:],
'month': x['DataReferencia'].str[:2],
'day': 1
})
)
.set_index('DataReferencia')
.resample('M').last()
)
return dfm
def get_focus_yearly(column="IPCA", date=None):
if date is None:
date = last_friday()
url = \
"https://olinda.bcb.gov.br/olinda/servico/Expectativas/versao/v1/" + \
"odata/ExpectativasMercadoAnuais?$top=100&$format=text/csv" + \
"&$filter=Indicador%20eq%20'" + urllib.parse.quote(column) + \
"'%20and%20Data%20eq%20'" + date + "'"
dfy = (
pd.read_csv(url, decimal=",")
[['DataReferencia', 'Media', 'Mediana', 'Minimo',
'Maximo']]
.assign(
DataReferencia=lambda x: pd.to_datetime({
'year': x['DataReferencia'],
'month': 12,
'day': 1
})
)
.set_index('DataReferencia')
.resample('Y').last()
)
return dfy
def get_focus(historicals, date=None):
if date is None:
date = last_friday()
|
pd.set_option('display.max_rows', None)
|
pandas.set_option
|
##############################################################
#----------------------Libraries------------------------------
##############################################################
#Las librerias tkinter sirven para generar las vistas
#La libreria pandas permite generar archivos excel
from tkinter import* # importar interfaz
from tkinter import messagebox # pop ups mensajes
from tkinter import filedialog # guardar archivos
from tkinter.filedialog import asksaveasfile, askdirectory #descargar archivos
import tkinter as tk # iniciar interfaz
from PIL import ImageTk,Image
import pandas as pd # generar archivos excel
import numpy as np # libreria de matrices
import xlsxwriter # escribir excels
import math #libreria para uso de funciones matematicas
import os #permite acceder a las fucciones principales de la pc - navegar por archivos de pc
from shutil import copyfile #permite copiar y mover archivos en la pc
##############################################################
#----------------------Variables globales---------------------
##############################################################
##############################################################
#----------------------Funciones------------------------------
##############################################################
# La funcion guardar nos permite generar un excel donde se mostraran los resultados con las formulas
def guardar():
#----------------------variables globales------------------------------
global a
file_excel=asksaveasfile(defaultextension=".xlsx", initialfile="Resultados.xlsx", title="Guardar",)
dic={}
lista=[]
for i in range(20):
lista.append(i)
diametro_2=round(pulgadas,0)
angulo=float(a.get())
l0=["","","","","","","","","","","","","","",""]
l1=["","","Parte I:","","","","","","","","","","","",""]
l2=["","","DIÁMETRO DE TUBO","","DIAMETRO DE TUBO","","Area=","Diámetro=","","LONG. DE TRANSICION","","T1=","T2=","Lt1=","Lt2="]
l3=["","","Y LONG. DE ","","","",f"{area} m2",f"{diametro} m","","","",T1,T2,LT,LT2]
l4=["","","TRANSICIÓN","","","","",f"» {pulgadas} plg ≈ {diametro_2} plg","","","","» b+(2yz)","Diametro (m)","Ang°","4*Diametro"]
l5=["------------------------------------","","Parte II:","","","","","","","",""]
l6=["------------------------------------","","COTAS","","NIVEL DE AGUA","","Cota de fondo 1=","Nivel Agua 1=","","COTA DE FONDO 2",""]
l7=["------------------------------------","","","","","",f" {cota_fondo_1} m.s.n.m",f" {nivel_agua_1} m.s.n.m","","", ""]
l8=["------------------------------------","","","","","","» Cota inicial-(Long e*Escala)","» Cota1 + y","","",""]
l9=["","","","","","","",""]
l10=["v1=","vt=","hte=","1.5Hv(1)=","1.5Hv(2)=","1.5Hv=","Hv=","Cota de fondo 2="]
l11=[v1,vt,hte,hv1,hv2,hv15,hv,f" {cota_fondo_2} m.s.n.m"]
l12=["» Q/((T2^2)*Pi/4)","» (y*z+b)*y/2","» T2/cos(Angº)","» (v1^2)/(2*g)","» (vt^2)/(2*g)","» 1.5Hv(1) - 1.5Hv(2)","» 1.5Hv * 1.5","» Nivel Agua 1 -(Hte + Hv)"]
l13=["","","","","","","","","","","","","","","",""]
l14=["","COTA DE FONDO 3","","h=","Cota de fondo 3=","","COTA DE FONDO 4","","h4=","Cota de fondo 4=","","COTA DE FONDO 5","","h=","Cota de fondo 5=",""]
l15=["","","",h,f" {cota_fondo_3} m.s.n.m","","","",h4,f" {cota_fondo_4} m.s.n.m","","","",h5, f" {cota_fondo_5} m.s.n.m",""]
l16=["","","","» Angº *5","» Cota2 - h","","","","» Lth* Escala","» Cota3 - h4","","","","» Angº *4","» Cota4 + h",""]
l17=["------------------------------------","","Parte III:","","","","","","","","","","","","",""]
l18=["------------------------------------","","VALOR P,","","VALOR P","","P de entrada=","P de salida=","Valor P=","","INCLINACION DE LOS","","Entrada x=","Entrada y=","Inclinacion=","","Salida x="]
l19=["------------------------------------","","CARGA HIDRÁULICA","","","",p_entrada,p_salida,p,"","TUBOS DOBLADOS","",x_entrada,y_entrada,inclinacion_entrada,"",x_salida]
l20=["------------------------------------","","Y PERDIDAS DE CARGA","","","","» 3/4 T2","» 1/2 T2","» Cota Final - Cota5","","","","» h(cota3)/Tang(Angº)","» h(cota3)","» Entrada x/Entrada Y","","» h(cota5)/Tang(Angº)"]
l21=["","","","","","","","","","","","","","","","","",""]
l22=["Salida y=","Inclinacion=","","CARGA HIDRÁULICA","","Cota 1 + tirante=","Cota 6 + tirante=","Carga disponible=","","CALCULO DE","","Entrada=","Salida=","Friccion=","Codos=","PERDIDA TOTAL=","10% Seguridad="]
l23=[y_salida,inclinacion_salida,"","DISPONIBLE","",f" {c1_t} m.s.n.m",f" {c6_t} m.s.n.m",f" {carga} m","","LAS PERDIDAS","",entrada,salida,friccion,codos,perdida,porcentaje]
l24=["» h(cota5)","» Salida x/Salida Y","","","","» Cota1 +y","» Cota Final +y (2)","» (1)-(2)","","DE CARGA","","» P entrada *0.0938","» P Salida *0.0938","» 0.025*(longitud/Diametro)*1.5Hv(1)","» 2*(0.25*(Angº^(1/2))/(90º *1.5Hv(1))","»Entrada+Salida+Friccion+Codo","» Perdida*(1+10%)"]
l25=["","","","","","",""]
l26=["","∆ Cota 1 y Cota 2=","Altura de sumergencia=","","LONGITUD DE","","Lp="]
l27=["",cotas,altura,"","PROTECCION CON","",longitud]
l28=["","» Cota1-Cota2","»y+(∆c1 y c2)-hte","","ENRROCADO","","»3*T2"]
dic['']=l1 + l5 +l9 +l13+l17+l21+l25
dic['Diseño ']=l2+l6+l10+l14+l18+l22+l26
dic['hidráulico ']=l3+l7+l11+l15+l19+l23+l27
dic['del sifón']=l4+l8+l12+l16+l20+l24+l28
df1=pd.DataFrame(dic)
columnas=len(df1.columns)
# aqui se genera el excel con el formato q le damos
writer =
|
pd.ExcelWriter(file_excel.name, engine='xlsxwriter')
|
pandas.ExcelWriter
|
import argparse
import os
import pandas as pd
import azureml.train.automl.runtime._hts.hts_runtime_utilities as hru
from azureml.core import Run
from azureml.core.dataset import Dataset
# Parse the arguments.
args = {
"step_size": "--step-size",
"step_number": "--step-number",
"time_column_name": "--time-column-name",
"time_series_id_column_names": "--time-series-id-column-names",
"out_dir": "--output-dir",
}
parser = argparse.ArgumentParser("Parsing input arguments.")
for argname, arg in args.items():
parser.add_argument(arg, dest=argname, required=True)
parsed_args, _ = parser.parse_known_args()
step_number = int(parsed_args.step_number)
step_size = int(parsed_args.step_size)
# Create the working dirrectory to store the temporary csv files.
working_dir = parsed_args.out_dir
os.makedirs(working_dir, exist_ok=True)
# Set input and output
script_run = Run.get_context()
input_dataset = script_run.input_datasets["training_data"]
X_train = input_dataset.to_pandas_dataframe()
# Split the data.
for i in range(step_number):
file_name = os.path.join(working_dir, "backtest_{}.csv".format(i))
if parsed_args.time_series_id_column_names:
dfs = []
for _, one_series in X_train.groupby([parsed_args.time_series_id_column_names]):
one_series = one_series.sort_values(
by=[parsed_args.time_column_name], inplace=False
)
dfs.append(one_series.iloc[: len(one_series) - step_size * i])
|
pd.concat(dfs, sort=False, ignore_index=True)
|
pandas.concat
|
import re
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test__get_intervals(self):
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert result[0] == expected_intervals
def test_fit(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer.fit(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.fuzzy = False
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.25
@patch('scipy.stats.norm.rvs')
def test__get_value_fuzzy(self, rvs_mock):
# setup
rvs_mock.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test_reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer.fit(data)
result = transformer.reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows
Output:
- the output of `_transform_by_category`
Side effects:
- `_transform_by_category` will be called once
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer.reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_fit_no_nans(self):
"""Test the ``fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet.decoder, ['a', 'b', 'c'])
assert ohet.dummy_encoded
assert not ohet.dummy_na
def test_fit_no_nans_numeric(self):
"""Test the ``fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, 3])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, 3])
np.testing.assert_array_equal(ohet.decoder, [1, 2, 3])
assert not ohet.dummy_encoded
assert not ohet.dummy_na
def test_fit_nans(self):
"""Test the ``fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
and NA should be activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', None])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b'])
np.testing.assert_array_equal(ohet.decoder, ['a', 'b', np.nan])
assert ohet.dummy_encoded
assert ohet.dummy_na
def test_fit_nans_numeric(self):
"""Test the ``fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated and NA activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, np.nan])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2])
np.testing.assert_array_equal(ohet.decoder, [1, 2, np.nan])
assert not ohet.dummy_encoded
assert ohet.dummy_na
def test_fit_single(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'a', 'a'])
ohet.fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a'])
def test__transform_no_nan(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation.
Input:
- Series with values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.dummies = ['a', 'b', 'c']
ohet.num_dummies = 3
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nan_categorical(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.dummies = ['a', 'b', 'c']
ohet.indexer = [0, 1, 2]
ohet.num_dummies = 3
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation. Null
values should be represented by the same encoding.
Input:
- Series with values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet.dummies = ['a', 'b']
ohet.dummy_na = True
ohet.num_dummies = 2
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_categorical(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation using
the categorical branch. Null values should be
represented by the same encoding.
Input:
- Series with categorical values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet.dummies = ['a', 'b']
ohet.indexer = [0, 1]
ohet.dummy_na = True
ohet.num_dummies = 2
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet.dummies = ['a']
ohet.num_dummies = 1
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_categorical(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet.dummies = ['a']
ohet.indexer = [0]
ohet.num_dummies = 1
ohet.dummy_encoded = True
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a column of zeros.
Input:
- Series with unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet.dummies = ['a']
ohet.num_dummies = 1
# Run
out = ohet._transform(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros_categorical(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a column of zeros.
Input:
- Series with categorical and unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet.dummies = ['a']
ohet.indexer = [0]
ohet.num_dummies = 1
ohet.dummy_encoded = True
# Run
out = ohet._transform(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown_nan(self):
"""Test the ``_transform`` with unknown and nans.
This is an edge case for ``_transform`` where
unknowns should be zeros and nans should be
the last entry in the column.
Input:
- Series with unknown and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet.dummies = ['a']
ohet.dummy_na = True
ohet.num_dummies = 1
# Run
out = ohet._transform(pd.Series(['b', 'b', np.nan]))
# Assert
expected = np.array([
[0, 0],
[0, 0],
[0, 1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_no_nans(self):
"""Test the ``transform`` without nans.
In this test ``transform`` should return an identity
matrix representing each item in the input.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.fit(data)
# Run
out = ohet.transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_nans(self):
"""Test the ``transform`` with nans.
In this test ``transform`` should return an identity matrix
representing each item in the input as well as nans.
Input:
- Series with categorical values and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet.fit(data)
# Run
out = ohet.transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_single(self):
"""Test the ``transform`` on a single category.
In this test ``transform`` should return a column
filled with ones.
Input:
- Series with a single categorical value
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet.fit(data)
# Run
out = ohet.transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test_transform_unknown(self):
"""Test the ``transform`` with unknown data.
In this test ``transform`` should raise an error
due to the attempt of transforming data with previously
unseen categories.
Input:
- Series with unknown categorical values
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a'])
ohet.fit(data)
# Assert
with np.testing.assert_raises(ValueError):
ohet.transform(['b'])
def test_transform_numeric(self):
"""Test the ``transform`` on numeric input.
In this test ``transform`` should return a matrix
representing each item in the input as one-hot encodings.
Input:
- Series with numeric input
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([1, 2])
ohet.fit(data)
expected = np.array([
[1, 0],
[0, 1],
])
# Run
out = ohet.transform(data)
# Assert
assert not ohet.dummy_encoded
np.testing.assert_array_equal(out, expected)
def test_reverse_transform_no_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet.fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet.reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'b', 'c'])
pd.testing.assert_series_equal(out, expected)
def test_reverse_transform_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet.fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet.reverse_transform(transformed)
# Assert
expected =
|
pd.Series(['a', 'b', None])
|
pandas.Series
|
from dataclasses import dataclass
import pandas as pd
import streamlit as st
from streamlit_drawable_canvas import st_canvas
from helpers import (
download_data,
visualize_pitch,
get_field_lines,
get_converted_positional_data,
VoronoiPitch,
PitchDraw,
)
from pitch import FootballPitch
tags = {
"Direct opponent of pass sender @ Pre pass": "#00fff1",
"Intended pass receiver @ Pre pass": "#00ffff2",
"Interception candidate @ Pre pass": "#00fff3",
"Ball @ Start pass": "#a52a2a",
"Direct opponent of pass sender @ Start pass": "#a52a2b",
"Intended pass receiver @ Start pass": "#a52a2c",
"Interception candidate @ Start pass": "#a52a2d",
"Body orientation visual line @ Start pass": "#a52a2e",
"Body orientation point 1 @ Start pass": "#FFFFF5",
"Body orientation point 2 @ Start pass": "#FFFFF6",
"Ball @ End pass": "#FFFFF1",
"Intended Pass receiver @ End pass": "#FFFFF3",
"Interception candidate @ End pass": "#FFFFF4",
"Hypothetical pass end location @ End pass": "#FFFFF2",
}
columns_of_interest = [
"team",
"x",
"y",
"player_name",
"pass_duration",
"player_role",
"situation_id",
"facing_passing_line",
"nationality",
]
st.set_option("deprecation.showfileUploaderEncoding", False)
st.beta_set_page_config(page_title="BirdsPyView", layout="wide")
@dataclass
class SessionState:
positional_data = pd.DataFrame(columns=columns_of_interest)
@st.cache(allow_output_mutation=True)
def fetch_session():
session = SessionState()
return session
session = fetch_session()
st.title("Upload Image or Video")
uploaded_file = st.file_uploader(
"Select Image file to open:", type=["png", "jpg", "mp4"]
)
pitch = FootballPitch()
if uploaded_file:
snapshot = visualize_pitch(uploaded_file, pitch)
st.title("Pitch lines")
lines_expander = st.beta_expander(
"Draw pitch lines on selected image (2 horizontal lines, then 2 vertical lines)",
expanded=True,
)
with lines_expander:
col1, col2, col_, col3 = st.beta_columns([2, 1, 0.5, 1])
canvas_image, hlines, vlines = get_field_lines(
pitch, snapshot, col1, col2, col3
)
if canvas_image.json_data is not None:
n_lines = len(canvas_image.json_data["objects"])
with col3:
st.write(
f"You have drawn {n_lines} lines. Use the Undo button to delete lines."
)
if n_lines >= 4:
snapshot.set_info(
pd.json_normalize(canvas_image.json_data["objects"]), hlines + vlines
)
with lines_expander:
st.write("Converted image:")
st.image(snapshot.conv_im)
st.title("Annotate positional data")
st.write(
"Draw rectangle over players on image. "
+ "The player location is assumed to the middle of the base of the rectangle."
)
p_col1, p_col2, p_col3 = st.beta_columns([2, 1, 1])
with p_col2:
team_color = st.selectbox(
"Select Player to annotate position: ", list(tags.keys())
)
stroke_color = tags[team_color]
is_facing_the_passingline = st.checkbox(
"Is the pass interception candidate facing the passing line?",
value=True,
)
original = True # st.checkbox('Select on original image', value=True)
situation_id = st.text_input("Situation ID (e.g. 1)", value="1")
player_name = st.text_input(
"Interception candidate player name", value="NaN"
)
player_role = st.selectbox(
"Interception candidate role",
[
"Direct opponent of pass sender",
"Direct opponent of pass receiver",
"any other",
],
index=0,
)
nationality = st.selectbox(
"Nationality of the interception candidate", ["NL", "BIH", "ITA"]
)
pass_duration = st.text_input(
"Pass duration in seconds and fraction of second (e.g. 0.50 for a 500ms pass)",
max_chars=4,
value="0.50",
)
if len(pass_duration) < 3 & len(pass_duration) > 0:
st.warning(
"Pass duration has to be indicated with the format S.FF (e.g. 0.50 s)"
)
update = st.button("Update data")
if team_color == "Body orientation visual line @ Start pass":
body_orientation_lines = True
else:
body_orientation_lines = False
image2 = snapshot.get_image(original)
height2 = image2.height
width2 = image2.width
with p_col1:
canvas_converted = st_canvas(
fill_color="rgba(255, 165, 0, 0.3)",
stroke_width=2,
stroke_color=stroke_color,
background_image=image2,
drawing_mode="line" if body_orientation_lines else "rect",
update_streamlit=update,
height=height2,
width=width2,
key="canvas2",
)
if canvas_converted.json_data is not None:
if len(canvas_converted.json_data["objects"]) > 0:
dfCoords = get_converted_positional_data(
tags, snapshot, original, canvas_converted
)
# Add metadata to dataframe
dfCoords["situation_id"] = situation_id
dfCoords["pass_duration"] = pass_duration
dfCoords["player_name"] = player_name
dfCoords["pass_duration"] = pass_duration
dfCoords["player_role"] = player_role
dfCoords["facing_passing_line"] = is_facing_the_passingline
dfCoords["nationality"] = nationality
session.positional_data = pd.concat(
[session.positional_data, dfCoords[columns_of_interest]],
axis=0,
)
session.positional_data.drop_duplicates(
keep="last",
ignore_index=True,
inplace=True,
)
session.positional_data.drop_duplicates(
keep="first", ignore_index=True, inplace=True
)
st.title("Overlay of positional data of current frame")
voronoi = VoronoiPitch(dfCoords)
sensitivity = 10
player_circle_size = 2
player_opacity = 100
draw = PitchDraw(snapshot, original=True)
for pid, coord in dfCoords.iterrows():
draw.draw_circle(
coord[["x", "y"]].values,
"black",
player_circle_size,
player_opacity,
)
st.image(draw.compose_image(sensitivity))
def empty_uploaded_cache():
global uploaded_file
uploaded_file = None
if "dfCoords" in globals():
st.title("Inspect raw dataframe")
st.dataframe(session.positional_data)
st.title("Downloda data")
download_data(session.positional_data)
if st.button("Clear all cached data"):
session.positional_data =
|
pd.DataFrame(columns=session.positional_data.columns)
|
pandas.DataFrame
|
import model_age as mdl
import pathos.pools as pp
from pathlib import Path
import varcontrol
import time
import pandas as pd
import matplotlib.pyplot as plt
def prepare_jobs(step_size,policy_,output_lst,out_path):
# WARNING: policy_ is now a dict, not a dataframe
steps_day = step_size
initial_time = 90
# writing the files for different measures and different durations of the measures
# sd: social distancing, sq: self quarantine, sdq: social distancing AND self quarantine
# code by <NAME> and <NAME>
job_list = []
for measure_ in ['sd', 'sq', 'sqd']:
for start_day in range(initial_time, 270+steps_day, steps_day):
for duration in range(steps_day, 180+steps_day, steps_day):
end_day = start_day + duration
if end_day <= 270:
if measure_ == 'sd':
for group in varcontrol.age_groups:
policy_['self quarantine policy SWITCH self %s' % group] = 0
policy_['social distancing policy SWITCH self %s' % group] = 1
policy_['social distancing start %s' % group] = start_day
policy_['social distancing end %s' % group] = end_day
elif measure_ == 'sq':
for group in varcontrol.age_groups:
policy_['self quarantine policy SWITCH self %s' % group] = 1
policy_['social distancing policy SWITCH self %s' % group] = 0
policy_['self quarantine start %s' % group] = start_day
policy_['self quarantine end %s' % group] = end_day
else:
for group in varcontrol.age_groups:
policy_['self quarantine policy SWITCH self %s' % group] = 1
policy_['social distancing policy SWITCH self %s' % group] = 1
policy_['social distancing start %s' % group] = start_day
policy_['social distancing end %s' % group] = end_day
policy_['self quarantine start %s' % group] = start_day
policy_['self quarantine end %s' % group] = end_day
# we should write the policy file into a more readable version
# getting the names of the variables could also be hardcoded but if we expand this, this is going to be easier
row_names = list(policy_.keys())
row_names = [name.rsplit(' ',1)[0] for name in row_names]
row_names = list(set(row_names))
pol_df = pd.DataFrame(index=row_names,columns=varcontrol.age_groups)
for key,val in policy_.items():
row,col = key.rsplit(' ',1)
pol_df.loc[row][col] = val
pol_df.to_csv(out_path / 'policy_{0}_{1}_{2}.csv'.format(measure_, start_day-initial_time, end_day-initial_time))
item = (policy_.copy(),(measure_,start_day-initial_time,end_day-initial_time),output_lst)
job_list.append(item)
print('number of jobs:', len(job_list))
return job_list
def worker(args):
from pathlib import Path
import model_age as mdl
model = mdl.setup_model()[0]
results_out_path = Path.cwd() / 'full_results_output'
pol_out = mdl.run_policy(model,args[0],args[2])
pol_out = pol_out.reset_index(drop=True)
pol_out.to_csv(results_out_path / 'results_{0}_{1}_{2}.csv'.format(args[1][0], args[1][1], args[1][2]))
print('Result: {0}_{1}_{2}'.format(args[1][0], args[1][1], args[1][2]))
def check_result(path):
"""
from pathlib import Path
results_out_path = Path.cwd() / 'full_results_output'
import create_output_for_web
create_output_for_web.check_result(results_out_path)
"""
file_list = list(path.glob('*'))[:20]
sum_df = pd.DataFrame()
for file in file_list:
df =
|
pd.read_csv(file,index_col=0)
|
pandas.read_csv
|
import argparse
import pandas as pd
import torch
import configure as conf
parser = argparse.ArgumentParser()
parser.add_argument('model', choices=['GATENet', 'LSTMNet'])
parser.add_argument('pool', choices=['GlobalAddPool', 'NodeAttentionPool', 'StructureAttentionPool'])
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--ckpt', type=str, default="")
parser.add_argument('--fold', type=int, default=-1)
args = parser.parse_args()
class Inferencer(torch.nn.Module):
def __init__(self, gnn, pool):
super(Inferencer, self).__init__()
self.gnn = gnn
self.pool = pool
def forward(self, batch):
nembed = self.gnn(batch)
return nembed
conf.fold = args.fold if args.fold >= 0 else None
conf.setup_dataset()
gnn, pool, _ = conf.setup_gnn_logger(args.model, args.pool, log=False)
model = Inferencer(gnn, pool).to(args.device)
if args.ckpt:
ckpt = torch.load(args.ckpt, map_location=args.device)
model.load_state_dict(ckpt['state_dict'])
metrics = conf.dset[2].evaluate_val(model, args.device, 'euclidean', 'plain').T
eval_list = conf.dset[2].names
pos_pair = conf.dset[2].pos_pair
pdblst = [i for i in eval_list if i in pos_pair]
print(len(pdblst), metrics.shape)
k = [1, 5, 10, 20, 50, 100]
roc, prc = metrics[0], metrics[1]
hitk = metrics[2:8]
d1 = {f'hit@top{n}': h for n, h in zip(k, hitk)}
prec = metrics[8:14]
d2 = {f'prec@top{n}': p for n, p in zip(k, prec)}
recall = metrics[14:20]
d3 = {f'recall@top{n}': r for n, r in zip(k, recall)}
print(len(pdblst), len(roc), len(prc))
df =
|
pd.DataFrame({'name': pdblst, 'auroc': roc, 'auprc': prc, **d1, **d2, **d3})
|
pandas.DataFrame
|
#import AYS_Environment as ays_env
import c_global.cG_LAGTPKS_Environment as c_global
import numpy as np
import pandas as pd
import sys,os
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredText
pars=dict( Sigma = 1.5 * 1e8,
Cstar=5500,
a0=0.03,
aT=3.2*1e3,
l0=26.4,
lT=1.1*1e6,
delta=0.01,
m=1.5,
g=0.02,
p=0.04,
Wp=2000,
q0=20,
qP=0.,
b=5.4*1e-7,
yE=120,
wL=0.,
eB=4*1e10,
eF=4*1e10,
i=0.25,
k0=0.1,
aY=0.,
aB=1.5e4,
aF=2.7e5,
aR=9e-15,
sS=1./50.,
sR=1.,
ren_sub=.5,
carbon_tax=.5 ,
i_DG=0.1,
L0=0.3*2480
)
ics=dict( L=2480.,
A=830.0,
G=1125,
T=5.053333333333333e-6,
P=6e9,
K=5e13,
S=5e11
)
dt=1
reward_type='PB'
my_Env=c_global.cG_LAGTPKS_Environment(dt=dt,pars=pars, reward_type=reward_type, ics=ics, plot_progress=True)
def read_trajectories(learner_type, reward_type, basin, policy='epsilon_greedy', episode=0):
runs=[]
# 0_path_[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]_episode0 limit=150
limit=150
parameters=['time','L', 'A', 'G', 'T', 'P', 'K', 'S' , 'action' , 'Reward' ]
for i in range(limit):
file_name=('./'+learner_type+'/' + policy +'/' +reward_type + '/DQN_Path/' +
basin+ '/' + str(i)+'_path_[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]_episode' + str(episode)+'.txt')
if os.path.isfile(file_name):
tmp_file= pd.read_csv(file_name, sep='\s+' ,header=None, names=parameters, skiprows=2, index_col=False) # Skiprow=2, since we calculate derived variables first!
runs.append(tmp_file)
# print(file_name)
# For not too many files
if len(runs) > 100:
break
print(len(runs))
return runs
def get_LAGTPKS(learning_progress):
time=learning_progress['time']
L=learning_progress['L']
A=learning_progress['A']
G=learning_progress['G']
T=learning_progress['T']
P=learning_progress['P']
K=learning_progress['K']
S=learning_progress['S']
actions= learning_progress['action']
return time, L,A,G,T,P,K,S,actions
def management_distribution_part(learning_progress_arr, savepath, start_time=0, end_time=20, only_long_times=False):
tot_my_actions=pd.DataFrame(columns=['action'])
for learning_progress in learning_progress_arr:
time, L_comp, A_comp, G_comp, T_comp, P_comp, K_comp, S_comp, actions = get_LAGTPKS(learning_progress)
end_time_simulation=time.iloc[-1]
if only_long_times:
if end_time_simulation >100:
print(end_time_simulation)
my_actions= pd.DataFrame(actions[start_time:end_time])
else:
my_actions= pd.DataFrame(actions[start_time:end_time])
tot_my_actions=pd.concat([tot_my_actions, my_actions]).reset_index(drop = True)
tot_my_actions=tot_my_actions.to_numpy()
d = np.diff(np.unique(tot_my_actions)).min()
left_of_first_bin = tot_my_actions.min() - float(d)/2
right_of_last_bin = tot_my_actions.max() + float(d)/2
#print(d, left_of_first_bin, right_of_last_bin)
right_of_last_bin = 7.5
fig, ax= plt.subplots(figsize=(8,5))
plt.hist(tot_my_actions, np.arange(left_of_first_bin, right_of_last_bin + d, d),density=True, edgecolor='grey',rwidth=0.9)
plt.xlabel("Action number", fontsize=15)
plt.ylabel("Probability",fontsize=15)
#plt.xlim([0,7])
box_text=''
for i in range(len(c_global.cG_LAGTPKS_Environment.management_options)):
box_text+=str(i ) + ": " + c_global.cG_LAGTPKS_Environment.management_options[i] +"\n"
at = AnchoredText(box_text, prop=dict(size=14), frameon=True,
loc='lower left', bbox_to_anchor=(1.0, .02),bbox_transform=ax.transAxes
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
#ax.axis('off')
ax.add_artist(at)
fig.tight_layout()
fig.savefig(savepath)
@np.vectorize
def std_err_bin_coefficient(n,p):
#print(n,p)
std_dev=p*(1-p)
return np.sqrt(std_dev/n)
def action_timeline(savepath, learner_type='ddqn_per_is_duel', reward_type='PB', basin='GREEN_FP',
start_time=0, end_time=100):
time_arr= np.arange(0,end_time)
action_names=['default', 'Sub' , 'Tax','NP' , 'Sub+Tax', 'Sub+NP', 'Tax+NP', 'Sub+Tax+NP' ]
tot_average_actions=pd.DataFrame(columns=action_names)
tot_average_actions_errors=
|
pd.DataFrame(columns=action_names)
|
pandas.DataFrame
|
from qc_time_estimator.config import config
import numpy as np
np.random.seed(config.SEED)
import logging
from pprint import pprint
from time import time
from sklearn.metrics import make_scorer
from sklearn.model_selection import RandomizedSearchCV
from qc_time_estimator.pipeline import qc_time_nn
from qc_time_estimator.metrics import mape, percentile_rel_90
from qc_time_estimator.processing.data_management import load_dataset, get_train_test_split
from sklearn.model_selection import KFold
import pandas as pd
from datetime import datetime
from scipy.stats import uniform, randint, loguniform
# from sklearn.utils.fixes import loguniform
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
# Num of iter (samples/model) to try
n_iter = 300
parameters = {
'nn_model__input_dim': [22,],
'nn_model__nodes_per_layer': [(10, 5), (10, 10, 5), (10, 10, 7, 5)],
'nn_model__dropout': uniform(0, 0.3), # max 0.3 is ok
'nn_model__batch_size': [64, 128, 256, 512],
'nn_model__epochs': randint(50, 400),
'nn_model__optimizer': ['adam'],
'nn_model__learning_rate': loguniform(1e-4, 1e-1), # search in smaller nums, < 0.1
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected block
# change max_rows, None means all data
data = load_dataset(file_name=config.TRAINING_DATA_FILE, nrows=None)
X_train, X_test, y_train, y_test = get_train_test_split(data, test_size=0.2)
random_search = RandomizedSearchCV(qc_time_nn,
param_distributions=parameters,
scoring={
'percentile99': make_scorer(percentile_rel_90, greater_is_better=False),
'MAPE': make_scorer(mape, greater_is_better=False),
},
refit='percentile99',
n_jobs=-1, # -2 to use all CPUs except one
return_train_score=True,
n_iter=n_iter,
cv=KFold(n_splits=2, random_state=0),
verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in qc_time_nn.steps])
print("parameters:")
pprint(parameters)
t0 = time()
random_search.fit(X_train, y_train)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % random_search.best_score_)
print("Best parameters set:")
best_parameters = random_search.best_params_
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
df =
|
pd.DataFrame(random_search.cv_results_)
|
pandas.DataFrame
|
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.0))]:
assert s.index.is_floating()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10})
tm.assert_frame_equal(expected, df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df.loc[df.index[:2]] = 1
expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
def test_loc_setitem_fullindex_views(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right_loc, right_iloc):
# label, index, slice
lbl_one, idx_one, slice_one = list("bcd"), [1, 2, 3], slice(1, 4)
lbl_two, idx_two, slice_two = ["joe", "jolie"], [1, 2], slice(1, 3)
left = df.copy()
left.loc[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right_loc)
left = df.copy()
left.iloc[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right_iloc)
left = df.copy()
left.iloc[slice_one, slice_two] = rhs
tm.assert_frame_equal(left, right_iloc)
xs = np.arange(20).reshape(5, 4)
cols = ["jim", "joe", "jolie", "joline"]
df = DataFrame(xs, columns=cols, index=list("abcde"), dtype="int64")
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right_iloc = df.copy()
right_iloc["joe"] = [1, 14, 10, 6, 17]
right_iloc["jolie"] = [2, 13, 9, 5, 18]
right_iloc.iloc[1:4, 1:3] *= -2
right_loc = df.copy()
right_loc.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right_loc, right_iloc)
# make frames multi-type & re-run tests
for frame in [df, rhs, right_loc, right_iloc]:
frame["joe"] = frame["joe"].astype("float64")
frame["jolie"] = frame["jolie"].map("@{}".format)
right_iloc["joe"] = [1.0, "@-28", "@-20", "@-12", 17.0]
right_iloc["jolie"] = ["@2", -26.0, -18.0, -10.0, "@18"]
run_tests(df, rhs, right_loc, right_iloc)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
for idx in [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]:
idx = Index(idx)
ser = Series(np.arange(20), index=idx)
tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1])
tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1])
tm.assert_indexing_slices_equivalent(
ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]
)
tm.assert_indexing_slices_equivalent(
ser, SLC[idx[9] : idx[13] : -1], SLC[:0]
)
def test_slice_with_zero_step_raises(self, indexer_sl, frame_or_series):
obj = frame_or_series(np.arange(20), index=_mklbl("A", 20))
with pytest.raises(ValueError, match="slice step cannot be zero"):
indexer_sl(obj)[::0]
def test_loc_setitem_indexing_assignment_dict_already_exists(self):
index = Index([-5, 0, 5], name="z")
df = DataFrame({"x": [1, 2, 6], "y": [2, 2, 8]}, index=index)
expected = df.copy()
rhs = {"x": 9, "y": 99}
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
# GH#38335 same thing, mixed dtypes
df = DataFrame({"x": [1, 2, 6], "y": [2.0, 2.0, 8.0]}, index=index)
df.loc[5] = rhs
expected = DataFrame({"x": [1, 2, 9], "y": [2.0, 2.0, 99.0]}, index=index)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_indexing_dtypes_on_empty(self):
# Check that .iloc returns correct dtypes GH9983
df = DataFrame({"a": [1, 2, 3], "b": ["b", "b2", "b3"]})
df2 = df.iloc[[], :]
assert df2.loc[:, "a"].dtype == np.int64
tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0])
@pytest.mark.parametrize("size", [5, 999999, 1000000])
def test_loc_range_in_series_indexing(self, size):
# range can cause an indexing error
# GH 11652
s = Series(index=range(size), dtype=np.float64)
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_partial_boolean_frame_indexing(self):
# GH 17170
df = DataFrame(
np.arange(9.0).reshape(3, 3), index=list("abc"), columns=list("ABC")
)
index_df = DataFrame(1, index=list("ab"), columns=list("AB"))
result = df[index_df.notnull()]
expected = DataFrame(
np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]),
index=list("abc"),
columns=list("ABC"),
)
tm.assert_frame_equal(result, expected)
def test_no_reference_cycle(self):
df =
|
DataFrame({"a": [0, 1], "b": [2, 3]})
|
pandas.DataFrame
|
# This file is part of Patsy
# Copyright (C) 2012-2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# There are a number of unit tests in build.py, but this file contains more
# thorough tests of the overall design matrix building system. (These are
# still not exhaustive end-to-end tests, though -- for that see
# test_highlevel.py.)
from __future__ import print_function
import six
import numpy as np
from nose.tools import assert_raises
from patsy import PatsyError
from patsy.util import (atleast_2d_column_default,
have_pandas, have_pandas_categorical)
from patsy.desc import Term, INTERCEPT
from patsy.build import *
from patsy.categorical import C
from patsy.user_util import balanced, LookupFactor
from patsy.design_info import DesignMatrix
if have_pandas:
import pandas
def assert_full_rank(m):
m = atleast_2d_column_default(m)
if m.shape[1] == 0:
return True
u, s, v = np.linalg.svd(m)
rank = np.sum(s > 1e-10)
assert rank == m.shape[1]
def test_assert_full_rank():
assert_full_rank(np.eye(10))
assert_full_rank([[1, 0], [1, 0], [1, 0], [1, 1]])
assert_raises(AssertionError,
assert_full_rank, [[1, 0], [2, 0]])
assert_raises(AssertionError,
assert_full_rank, [[1, 2], [2, 4]])
assert_raises(AssertionError,
assert_full_rank, [[1, 2, 3], [1, 10, 100]])
# col1 + col2 = col3
assert_raises(AssertionError,
assert_full_rank, [[1, 2, 3], [1, 5, 6], [1, 6, 7]])
def make_termlist(*entries):
terms = []
for entry in entries:
terms.append(Term([LookupFactor(name) for name in entry]))
return terms
def check_design_matrix(mm, expected_rank, termlist, column_names=None):
assert_full_rank(mm)
assert set(mm.design_info.terms) == set(termlist)
if column_names is not None:
assert mm.design_info.column_names == column_names
assert mm.ndim == 2
assert mm.shape[1] == expected_rank
def make_matrix(data, expected_rank, entries, column_names=None):
termlist = make_termlist(*entries)
def iter_maker():
yield data
builders = design_matrix_builders([termlist], iter_maker)
matrices = build_design_matrices(builders, data)
matrix = matrices[0]
assert (builders[0].design_info.term_slices
== matrix.design_info.term_slices)
assert (builders[0].design_info.column_names
== matrix.design_info.column_names)
assert matrix.design_info.builder is builders[0]
check_design_matrix(matrix, expected_rank, termlist,
column_names=column_names)
return matrix
def test_simple():
data = balanced(a=2, b=2)
x1 = data["x1"] = np.linspace(0, 1, len(data["a"]))
x2 = data["x2"] = data["x1"] ** 2
m = make_matrix(data, 2, [["a"]], column_names=["a[a1]", "a[a2]"])
assert np.allclose(m, [[1, 0], [1, 0], [0, 1], [0, 1]])
m = make_matrix(data, 2, [[], ["a"]], column_names=["Intercept", "a[T.a2]"])
assert np.allclose(m, [[1, 0], [1, 0], [1, 1], [1, 1]])
m = make_matrix(data, 4, [["a", "b"]],
column_names=["a[a1]:b[b1]", "a[a2]:b[b1]",
"a[a1]:b[b2]", "a[a2]:b[b2]"])
assert np.allclose(m, [[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
m = make_matrix(data, 4, [[], ["a"], ["b"], ["a", "b"]],
column_names=["Intercept", "a[T.a2]",
"b[T.b2]", "a[T.a2]:b[T.b2]"])
assert np.allclose(m, [[1, 0, 0, 0],
[1, 0, 1, 0],
[1, 1, 0, 0],
[1, 1, 1, 1]])
m = make_matrix(data, 4, [[], ["b"], ["a"], ["b", "a"]],
column_names=["Intercept", "b[T.b2]",
"a[T.a2]", "b[T.b2]:a[T.a2]"])
assert np.allclose(m, [[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 0, 1, 0],
[1, 1, 1, 1]])
m = make_matrix(data, 4, [["a"], ["x1"], ["a", "x1"]],
column_names=["a[a1]", "a[a2]", "x1", "a[T.a2]:x1"])
assert np.allclose(m, [[1, 0, x1[0], 0],
[1, 0, x1[1], 0],
[0, 1, x1[2], x1[2]],
[0, 1, x1[3], x1[3]]])
m = make_matrix(data, 3, [["x1"], ["x2"], ["x2", "x1"]],
column_names=["x1", "x2", "x2:x1"])
assert np.allclose(m, np.column_stack((x1, x2, x1 * x2)))
def test_R_bugs():
data = balanced(a=2, b=2, c=2)
data["x"] = np.linspace(0, 1, len(data["a"]))
# For "1 + a:b", R produces a design matrix with too many columns (5
# instead of 4), because it can't tell that there is a redundancy between
# the two terms.
make_matrix(data, 4, [[], ["a", "b"]])
# For "0 + a:x + a:b", R produces a design matrix with too few columns (4
# instead of 6), because it thinks that there is a redundancy which
# doesn't exist.
make_matrix(data, 6, [["a", "x"], ["a", "b"]])
# This can be compared with "0 + a:c + a:b", where the redundancy does
# exist. Confusingly, adding another categorical factor increases the
# baseline dimensionality to 8, and then the redundancy reduces it to 6
# again, so the result is the same as before but for different reasons. (R
# does get this one right, but we might as well test it.)
make_matrix(data, 6, [["a", "c"], ["a", "b"]])
def test_redundancy_thoroughly():
# To make sure there aren't any lurking bugs analogous to the ones that R
# has (see above), we check that we get the correct matrix rank for every
# possible combination of 2 categorical and 2 numerical factors.
data = balanced(a=2, b=2, repeat=5)
data["x1"] = np.linspace(0, 1, len(data["a"]))
data["x2"] = data["x1"] ** 2
def all_subsets(l):
if not l:
yield tuple()
else:
obj = l[0]
for subset in all_subsets(l[1:]):
yield tuple(sorted(subset))
yield tuple(sorted((obj,) + subset))
all_terms = list(all_subsets(("a", "b", "x1", "x2")))
all_termlist_templates = list(all_subsets(all_terms))
print(len(all_termlist_templates))
# eliminate some of the symmetric versions to speed things up
redundant = [[("b",), ("a",)],
[("x2",), ("x1",)],
[("b", "x2"), ("a", "x1")],
[("a", "b", "x2"), ("a", "b", "x1")],
[("b", "x1", "x2"), ("a", "x1", "x2")]]
count = 0
for termlist_template in all_termlist_templates:
termlist_set = set(termlist_template)
for dispreferred, preferred in redundant:
if dispreferred in termlist_set and preferred not in termlist_set:
break
else:
expanded_terms = set()
for term_template in termlist_template:
numeric = tuple([t for t in term_template if t.startswith("x")])
rest = [t for t in term_template if not t.startswith("x")]
for subset_rest in all_subsets(rest):
expanded_terms.add(frozenset(subset_rest + numeric))
# Because our categorical variables have 2 levels, each expanded
# term corresponds to 1 unique dimension of variation
expected_rank = len(expanded_terms)
if termlist_template in [(), ((),)]:
# No data dependence, should fail
assert_raises(PatsyError,
make_matrix,
data, expected_rank, termlist_template)
else:
make_matrix(data, expected_rank, termlist_template)
count += 1
print(count)
test_redundancy_thoroughly.slow = 1
def test_data_types():
basic_dict = {"a": ["a1", "a2", "a1", "a2"],
"x": [1, 2, 3, 4]}
# On Python 2, this is identical to basic_dict:
basic_dict_bytes = dict(basic_dict)
basic_dict_bytes["a"] = [s.encode("ascii") for s in basic_dict_bytes["a"]]
# On Python 3, this is identical to basic_dict:
basic_dict_unicode = {"a": ["a1", "a2", "a1", "a2"],
"x": [1, 2, 3, 4]}
basic_dict_unicode = dict(basic_dict)
basic_dict_unicode["a"] = [six.text_type(s) for s in basic_dict_unicode["a"]]
structured_array_bytes = np.array(list(zip(basic_dict["a"],
basic_dict["x"])),
dtype=[("a", "S2"), ("x", int)])
structured_array_unicode = np.array(list(zip(basic_dict["a"],
basic_dict["x"])),
dtype=[("a", "U2"), ("x", int)])
recarray_bytes = structured_array_bytes.view(np.recarray)
recarray_unicode = structured_array_unicode.view(np.recarray)
datas = [basic_dict, structured_array_bytes, structured_array_unicode,
recarray_bytes, recarray_unicode]
if have_pandas:
df_bytes = pandas.DataFrame(basic_dict_bytes)
datas.append(df_bytes)
df_unicode = pandas.DataFrame(basic_dict_unicode)
datas.append(df_unicode)
for data in datas:
m = make_matrix(data, 4, [["a"], ["a", "x"]],
column_names=["a[a1]", "a[a2]", "a[a1]:x", "a[a2]:x"])
assert np.allclose(m, [[1, 0, 1, 0],
[0, 1, 0, 2],
[1, 0, 3, 0],
[0, 1, 0, 4]])
def test_build_design_matrices_dtype():
data = {"x": [1, 2, 3]}
def iter_maker():
yield data
builder = design_matrix_builders([make_termlist("x")], iter_maker)[0]
mat = build_design_matrices([builder], data)[0]
assert mat.dtype == np.dtype(np.float64)
mat = build_design_matrices([builder], data, dtype=np.float32)[0]
assert mat.dtype == np.dtype(np.float32)
if hasattr(np, "float128"):
mat = build_design_matrices([builder], data, dtype=np.float128)[0]
assert mat.dtype == np.dtype(np.float128)
def test_return_type():
data = {"x": [1, 2, 3]}
def iter_maker():
yield data
builder = design_matrix_builders([make_termlist("x")], iter_maker)[0]
# Check explicitly passing return_type="matrix" works
mat = build_design_matrices([builder], data, return_type="matrix")[0]
assert isinstance(mat, DesignMatrix)
# Check that nonsense is detected
assert_raises(PatsyError,
build_design_matrices, [builder], data,
return_type="asdfsadf")
def test_NA_action():
initial_data = {"x": [1, 2, 3], "c": ["c1", "c2", "c1"]}
def iter_maker():
yield initial_data
builder = design_matrix_builders([make_termlist("x", "c")], iter_maker)[0]
# By default drops rows containing either NaN or None
mat = build_design_matrices([builder],
{"x": [10.0, np.nan, 20.0],
"c": np.asarray(["c1", "c2", None],
dtype=object)})[0]
assert mat.shape == (1, 3)
assert np.array_equal(mat, [[1.0, 0.0, 10.0]])
# NA_action="a string" also accepted:
mat = build_design_matrices([builder],
{"x": [10.0, np.nan, 20.0],
"c": np.asarray(["c1", "c2", None],
dtype=object)},
NA_action="drop")[0]
assert mat.shape == (1, 3)
assert np.array_equal(mat, [[1.0, 0.0, 10.0]])
# And objects
from patsy.missing import NAAction
# allows NaN's to pass through
NA_action = NAAction(NA_types=[])
mat = build_design_matrices([builder],
{"x": [10.0, np.nan],
"c": np.asarray(["c1", "c2"],
dtype=object)},
NA_action=NA_action)[0]
assert mat.shape == (2, 3)
# According to this (and only this) function, NaN == NaN.
np.testing.assert_array_equal(mat, [[1.0, 0.0, 10.0], [0.0, 1.0, np.nan]])
# NA_action="raise"
assert_raises(PatsyError,
build_design_matrices,
[builder],
{"x": [10.0, np.nan, 20.0],
"c": np.asarray(["c1", "c2", None],
dtype=object)},
NA_action="raise")
def test_NA_drop_preserves_levels():
# Even if all instances of some level are dropped, we still include it in
# the output matrix (as an all-zeros column)
data = {"x": [1.0, np.nan, 3.0], "c": ["c1", "c2", "c3"]}
def iter_maker():
yield data
builder = design_matrix_builders([make_termlist("x", "c")], iter_maker)[0]
assert builder.design_info.column_names == ["c[c1]", "c[c2]", "c[c3]", "x"]
mat, = build_design_matrices([builder], data)
assert mat.shape == (2, 4)
assert np.array_equal(mat, [[1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 3.0]])
def test_return_type_pandas():
if not have_pandas:
return
data = pandas.DataFrame({"x": [1, 2, 3],
"y": [4, 5, 6],
"a": ["a1", "a2", "a1"]},
index=[10, 20, 30])
def iter_maker():
yield data
int_builder, = design_matrix_builders([make_termlist([])], iter_maker)
(y_builder, x_builder) = design_matrix_builders([make_termlist("y"),
make_termlist("x")],
iter_maker)
(x_a_builder,) = design_matrix_builders([make_termlist("x", "a")],
iter_maker)
(x_y_builder,) = design_matrix_builders([make_termlist("x", "y")],
iter_maker)
# Index compatibility is always checked for pandas input, regardless of
# whether we're producing pandas output
assert_raises(PatsyError,
build_design_matrices,
[x_a_builder], {"x": data["x"], "a": data["a"][::-1]})
assert_raises(PatsyError,
build_design_matrices,
[y_builder, x_builder],
{"x": data["x"], "y": data["y"][::-1]})
# And we also check consistency between data.index and value indexes
# Creating a mismatch between these is a bit tricky. We want a data object
# such that isinstance(data, DataFrame), but data["x"].index !=
# data.index.
class CheatingDataFrame(pandas.DataFrame):
def __getitem__(self, key):
if key == "x":
return
|
pandas.DataFrame.__getitem__(self, key)
|
pandas.DataFrame.__getitem__
|
## Generate twitter Pre-Trained Word2Vec and trained Word2Vec
## Word2Vec
import os
os.chdir("C:/Users/dordo/Dropbox/Capstone Project")
import pandas as pd
import pickle
from gensim import corpora
from gensim.models import Word2Vec
import gensim.downloader as api
##---------------------------------------------------------------------------##
## Define function to get embeddings from memory
def get_wv(model, dicts):
""" Get word embeddings in memory"""
w2v_embed = {}
missing = []
for val in dicts.values():
try:
it = model.wv[val]
except:
missing.append(val)
it = None
w2v_embed[val] = it
return w2v_embed, missing
##---------------------------------------------------------------------------##
## Reading in pre processed data
with open('Data/Twitter/ProcessedTwitter.pkl', 'rb') as input:
txt_end = pickle.load(input)
## Create dictionary
dicts = corpora.Dictionary(txt_end)
len(dicts)
## Filter by appeareance in documents
dicts.filter_extremes(no_below=40, no_above=0.5, keep_n=None, keep_tokens=None)
len(dicts)
##--------------------------------------------------------------------------##
## PreTrained Word2vec
path = "C:/Users/dordo/Documents/Daniel/LSE/Capstone/Modelo/GoogleNews-vectors-negative300.bin"
model = Word2Vec(txt_end, size = 300, min_count = 40)
model.intersect_word2vec_format(path,
lockf=1.0,
binary=True)
model.train(txt_end, total_examples=model.corpus_count, epochs=25)
embeds_1 = get_wv(model, dicts)
## How many word of our corpus appear in the pre trained?
##---------------------------------------------------------------------------##
## Self Trained Word2Vec
model_t = Word2Vec(txt_end, window=5, min_count=40, workers=4, size = 50)
model_t.train(txt_end, epochs=50, total_words = model_t.corpus_total_words,
total_examples = model_t.corpus_count)
embeds_2 = get_wv(model_t, dicts)
##---------------------------------------------------------------------------##
## Pre Trained GLOVE
model_g = api.load("glove-twitter-50")
embeds_3 = get_wv(model_g, dicts)
embeds_3df = pd.DataFrame(embeds_3[0])
## This are the embeddings that are really available in GLOVE
embeds_3df.T[~embeds_3df.T[1].isnull()]
##---------------------------------------------------------------------------##
## Saving
pretrained_embed = pd.DataFrame(embeds_1[0])
trained_embed = pd.DataFrame(embeds_2[0])
glove_embed =
|
pd.DataFrame(embeds_3df)
|
pandas.DataFrame
|
'''
Created on Nov 12, 2018
@author: <NAME> (<EMAIL>)
'''
import os
import glob
import argparse
import time
import pandas as pd
import numpy as np
import scipy.io as io
from keras.models import Model
from keras.layers import GRU, Dense, Dropout, Input
from keras import optimizers
from keras.utils import multi_gpu_model
import keras
import ipyparallel as ipp
# Constant.
MODEL_FILE_NAME = 'yaw_misalignment_calibrator.h5'
RESULT_FILE_NAME = 'ymc_result.csv'
dt = pd.Timedelta(10.0, 'm')
testTimeRanges = [(pd.Timestamp('2018-05-19'), pd.Timestamp('2018-05-26') - dt)
, (pd.Timestamp('2018-05-26'), pd.Timestamp('2018-06-02') - dt)
, (
|
pd.Timestamp('2018-06-02')
|
pandas.Timestamp
|
'''Assesses instances using cases'''
# TODO: Add output file
import common.constants as cn
from common.data_provider import DataProvider
import common_python.constants as ccn
from common_python.classifier.feature_set import \
FeatureVector
from tools.shared_data import SharedData
import argparse
import collections
import multiprocessing
import numpy as np
import os
import pandas as pd
INSTANCE = "instance"
MAX_SL = 0.001
REPORT_INTERVAL = 25 # Computations between reports
NUM_FSET = 100 # Number of feature sets examined
Arguments = collections.namedtuple("Arguments",
"state df num_fset")
INDEX = "index"
def _runState(arguments):
"""
Does case evaluation for all instances for a single state.
Run in multiple proceses concurrently.
Parameters
----------
state: int
df_instance: pd.DataFrame
Instances of feature vectors
num_fset: int
Return
------
pd.DataFrame
FEATURE_VECTOR
SIGLVL: significance level of FRAC
STATE: state analyzed
INSTANCE: from data feature vector
COUNT: number of cases
FRAC: fraction of positive cases
"""
state = arguments.state
df_instance = arguments.df
num_fset = arguments.num_fset
#
shared_data = SharedData()
fset_selector = lambda f: True
dfs = []
for instance in df_instance.index:
ser_X = df_instance.loc[instance, :]
collection = shared_data.collection_dct[state]
df = collection.getFVEvaluations(ser_X,
fset_selector=fset_selector, num_fset=num_fset,
max_sl=MAX_SL)
if len(df) > 0:
df[cn.STATE] = state
df[INSTANCE] = instance
dfs.append(df)
df_result = pd.concat(dfs)
df_result.index = range(len(df_result.index))
# Augment the dataframe with gene descriptions
provider = DataProvider()
provider.do()
df_go = provider.df_go_terms
descriptions = []
for stg in df_result[ccn.FEATURE_VECTOR]:
if not isinstance(stg, str):
descriptions.append("")
else:
feature_vector = FeatureVector.make(stg)
features = feature_vector.fset.set
description = []
for feature in features:
df_sub = df_go[df_go[cn.GENE_ID] == feature]
this_desc = ["%s: %s " % (feature, f)
for f in df_sub[cn.GO_TERM]]
description.extend(this_desc)
description = "\n".join(description)
descriptions.append(description)
#
df_result[cn.GENE_DESCRIPTION] = descriptions
return df_result
def run(input_fd, output_fd, num_fset=NUM_FSET):
"""
Processes the
Parameters
----------
input_fd: File Descriptor
Input CSV file
output_fd: File Descriptor
Output file
num_fset: int
Number of FeatureSets considered
Returns
-------
None.
"""
# Initializations
df_instance =
|
pd.read_csv(input_fd)
|
pandas.read_csv
|
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("/content/IIITH_Codemixed_new.txt",sep="\t",names = ["Speech", "Labels"]).dropna()
df1 = pd.read_csv("/content/train_new.txt",sep = "\t",names = ["Speech", "Labels"]).dropna()
df_new =
|
pd.concat([df,df1], ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 16:01:32 2018
@author: Adam
"""
import os
import glob
from numbers import Number
import numpy as np
import pandas as pd
def sub_dire(base, dire, fname=None):
""" Build path to a base/dire. Create if does not exist."""
if base is None:
raise ValueError(f"base={base} is not valid")
else:
path = os.path.join(base, dire)
if not os.path.exists(path):
os.makedirs(path)
if fname is not None:
path = os.path.join(path, fname)
return path
def ls(dire, regex="*", full_output=True, report=False):
""" List the contents of dire.
e.g., to list pickle files in the cache,
ls(h5.cache_dire, regex="*.pkl")
"""
# folder
if dire is None:
raise Exception("Cannot read from None directory.")
# check exists
if not os.path.isdir(dire):
raise Exception(f"{dire} does not exist.")
fils = glob.glob(os.path.join(dire, regex))
if report:
print(f"Found {len(fils)} matches to {regex} in {dire}.")
if full_output:
return fils
fnames = [os.path.split(f)[1] for f in fils]
return fnames
def to_pickle(obj, dire, fname, overwrite=False, **kwargs):
""" save `obj` as [dire]/[fname].pkl
args:
obj python object to save
dire directory
fname file name
overwrite=False
kwargs:
[passed to pandas.to_pickle()]
"""
fname, _ = os.path.splitext(fname)
fname += ".pkl"
fil = os.path.join(dire, fname)
# checks
if not os.path.isdir(dire):
raise OSError(f"{dire} not found")
elif os.path.exists(fil) and not overwrite:
raise OSError(f"{fil} already exists. Use overwrite=True")
else:
pd.to_pickle(obj, fil, **kwargs)
def read_pickle(dire, fname, **kwargs):
""" read `obj` from [dire]/[fname].pkl
args:
obj python object to save
dire directory
fname file name.
kwargs:
[passed to pandas.read_pickle()]
"""
fname, _ = os.path.splitext(fname)
fname += ".pkl"
fil = os.path.join(dire, fname)
# checks
if not os.path.exists(fil):
raise OSError(f"{fil} not found")
else:
pd.read_pickle(fil, **kwargs)
def t_index(time, dt=1.0, t0=0.0):
""" Convert time to index using dt [and t0].
"""
if isinstance(time, Number):
return int(round((time - t0) / dt))
elif isinstance(time, tuple):
return tuple([int(round((t - t0) / dt)) for t in time])
elif isinstance(time, list):
return list([int(round((t - t0) / dt)) for t in time])
elif isinstance(time, np.ndarray):
return np.array([int(round((t - t0) / dt)) for t in time])
else:
raise TypeError("time must be a number or list of numbers.")
def utf8_attrs(info):
""" Convert bytes to utf8
args:
info dict()
return:
info dict() (decoded to utf8)
"""
for key, val in info.items():
if isinstance(val, bytes):
info[key] = val.decode("utf8")
return info
def add_level(df, label, position="first"):
""" Add a level to pd.MultiIndex columns.
This can be useful when joining DataFrames with / without multiindex
columns.
>>> st = statistics(df, groupby="squid") # MultiIndex DataFrame
>>> add_level(h5.var, "VAR").join(st)
args:
df object to add index level to pd.DataFrame()
label= value(s) of the added level(s) str() / list(str)
position=0
position of level to add "first", "last" or int
return:
df.copy() with pd.MultiIndex()
"""
df2 = df.copy()
# multiple labels?
if isinstance(label, str):
# ..nope...
label = [label]
# reverse the label list (more intuitve behaviour?)
label = label[::-1]
# position is first?
if position == "first":
position = 0
# if df is Series then convert to DataFrame
if isinstance(df2, pd.Series):
df2 = pd.DataFrame(df2)
for lbl in label:
# add a level for each label
df2_columns = df2.columns.tolist()
new_columns = []
for col in df2_columns:
if not isinstance(col, tuple):
col = (col,)
col = list(col)
if position == "last":
col.append(lbl)
else:
col.insert(position, lbl)
new_columns.append(tuple(col))
df2.columns = pd.MultiIndex.from_tuples(new_columns)
return df2
def df_from_dict_of_tuples(data, names=("value", "error")):
""" Construct a DataFrame with MultiIndex columns
from a dict of tuples.
args:
data : dict
Of the form {row_i : {col_i: (item_i, ...), ...}, ...}
names=("value", "error") : tuple
Names of the items in each entry.
return:
pandas.DataFrame
+-------+-----------------+-----------------+
| | col_1 | col_2 |
| | name_1 | name_2 | name_1 | name_2 |
|-------+-----------------+-----------------+
| row_1 | item_1 | item_2 | item_1 | item_2 |
| row_2 | item_1 | item_2 | item_1 | item_2 |
"""
tmp = pd.DataFrame.from_dict(data, orient="index")
df = pd.DataFrame()
num = len(names)
for c in tmp.columns:
df[list(zip([c] * num, names))] = tmp[c].apply(pd.Series)
df.columns =
|
pd.MultiIndex.from_tuples(df.columns)
|
pandas.MultiIndex.from_tuples
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Load Data, Fill NA with column means and Standardize
dat = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data",header=None,sep="\t")[0]
X=pd.DataFrame(np.array(list(dat.str.split())))
for i in X.columns.values:
X[i]=X[i].replace("?",np.nan).astype(float)
X[i].fillna(np.nanmean(X[i]),inplace=True)
Y=np.array(X[0])
X=X[[i for i in X.columns.values if i>0]]
X=(X-np.mean(X))/np.std(X)
X['int']=1
X=np.array(X)
#Initialize 600 chains of 30,000 iterations to evaluate 8 model coefficients, with regressions scored by the R-squared.
draws = 600
w0=np.random.normal(0,.01,(X.shape[1],draws))
score0=1-np.sum((Y.reshape(len(Y),1)-X@w0)**2,axis=0)/sum((Y-np.mean(Y))**2)
delta=np.zeros((X.shape[1],draws))
stepsize=.0001
updates = 0
while updates < 30000:
w1=w0+np.random.normal(delta,stepsize)
score1=1-np.sum((Y.reshape(len(Y),1)-X@w1)**2,axis=0)/sum((Y-np.mean(Y))**2)
delta = np.where(score1>score0,w1-w0,delta)
w0=np.where(score1>score0,w1,w0)
print(sum(np.where(score1>score0,1,0)))
score0=score1
updates+=1
coef_est=np.round(np.mean(w0,axis=1),2)
print(coef_est)
coef_actual=np.round(np.linalg.inv(X.T@X)@(X.T@Y),2)
print(coef_actual)
def dist_plot(i, fontsize=12):
plt.hist(w0[i],bins=30,label='est. distribution')
plt.bar(coef_actual[i],100,color='.1',width=1,alpha=.5,label='true coef')
plt.ylim((0,60))
plt.legend()
plt.title("feat_"+str(i)+"; Mean: "+str(coef_est[i])+", Exact: "+str(coef_actual[i]))
fig = plt.figure(figsize=(10,15))
ax1 = fig.add_subplot(4,2,1)
ax1 = dist_plot(0, fontsize=12)
ax2 = fig.add_subplot(4,2,2)
ax2 = dist_plot(1, fontsize=12)
ax3 = fig.add_subplot(4,2,3)
ax3 = dist_plot(2, fontsize=12)
ax4 = fig.add_subplot(4,2,4)
ax4 = dist_plot(3, fontsize=12)
ax5 = fig.add_subplot(4,2,5)
ax5 = dist_plot(4, fontsize=12)
ax6 = fig.add_subplot(4,2,6)
ax6 = dist_plot(5, fontsize=12)
ax7 = fig.add_subplot(4,2,7)
ax7 = dist_plot(6, fontsize=12)
ax8 = fig.add_subplot(4,2,8)
ax8 = dist_plot(7, fontsize=12)
plt.show()
#KNN
import pandas as pd
import random
from sklearn.neighbors import KNeighborsClassifier
dat = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00519/heart_failure_clinical_records_dataset.csv")
|
pd.set_option("display.max_columns",500)
|
pandas.set_option
|
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[
|
Term('labels=l1')
|
pandas.io.pytables.Term
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 10:56:59 2020
Author: <NAME>
1. Use parallel computing to find the probability distribution of rate of
change of a parameter.
2. Use parallel computing to find points that fits the above probability
distribution.
3. Read one file per parameter and generate one final file containing rate of
change of each parameter.
Expected input 1:
TimeStamp paramX rate
06/08/2015 12:33:00 199.84 0
06/08/2015 12:33:30 199.14 -0.023333333
06/08/2015 12:34:00 199.96 0.027333333
06/08/2015 12:34:30 200.14 0.006
...
Expected input 2:
TimeStamp paramY rate
06/08/2015 12:41:00 199.18 -0.047666667
06/08/2015 12:41:30 199 -0.006
06/08/2015 12:42:00 200.06 0.035333333
06/08/2015 12:42:30 199.88 -0.006
...
Expected Output:
paramX paramY
-0.014455046 0.005159816
-0.010470647 -0.027910691
-0.008445245 -0.00039344
0.028559856 0.022433843
...
Running on Python 3.7.5.
"""
import time
from os import path
from multiprocessing import Pool
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde
from git import Repo
repo = Repo('.', search_parent_directories=True)
# to calculate total running time
start_time = time.time()
if __name__ == '__main__':
__spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
filenames = ["resources/paramX_rate.csv",
] # add more files here
# initialise variables
random_rate, random_values = [], []
samples = 10000 # number of rate samples required
rate_df = pd.DataFrame()
pool = Pool(processes=1) # define number of parallel processes required
# Generate time series
for file in filenames:
# split filename to get parameter name
name = file.split('/')[-1].split('_')
# print parameter name
print(name[0])
# get full file path
filepath = path.join(repo.working_dir, file)
# read file
data_file =
|
pd.read_csv(filepath, header=0, index_col=False)
|
pandas.read_csv
|
import pytest
import datetime as dt
import numpy as np
import pandas as pd
import cftime
import bokeh.palettes
import forest.state
from forest import db
from forest.db.control import time_array_equal
@pytest.mark.parametrize("left,right,expect", [
(db.State(), db.State(), True),
(db.State(valid_time="2019-01-01 00:00:00"),
db.State(valid_time=dt.datetime(2019, 1, 1)), True),
(db.State(initial_time="2019-01-01 00:00:00"),
db.State(initial_time=dt.datetime(2019, 1, 1)), True),
(db.State(initial_times=np.array([
"2019-01-01 00:00:00"], dtype='datetime64[s]')),
db.State(initial_times=["2019-01-01 00:00:00"]), True),
(db.State(initial_times=[]),
db.State(initial_times=["2019-01-01 00:00:00"]), False),
(db.State(valid_times=np.array([
"2019-01-01 00:00:00"], dtype='datetime64[s]')),
db.State(valid_times=["2019-01-01 00:00:00"]), True),
(db.State(pressure=1000.001), db.State(pressure=1000.0001), True),
(db.State(pressure=1000.001), db.State(pressure=900), False),
(db.State(pressures=np.array([1000.001, 900])),
db.State(pressures=[1000.0001, 900]), True),
(db.State(pressures=[1000.001, 900]),
db.State(pressures=[900, 900]), False),
(db.State(variables=[]), db.State(), False),
(db.State(variables=["a", "b"]), db.State(), False),
(db.State(), db.State(variables=["a", "b"]), False),
(db.State(variables=["a", "b"]), db.State(variables=["a", "b"]), True),
(db.State(variables=np.array(["a", "b"])),
db.State(variables=["a", "b"]), True)
])
def test_equality_and_not_equality(left, right, expect):
assert (left == right) == expect
assert (left != right) == (not expect)
def test_state_equality_valueerror_lengths_must_match():
"""should return False if lengths do not match"""
valid_times = (
pd.date_range("2020-01-01", periods=2),
pd.date_range("2020-01-01", periods=3),
)
left = db.State(valid_times=valid_times[0])
right = db.State(valid_times=valid_times[1])
assert (left == right) == False
def test_time_array_equal():
left = pd.date_range("2020-01-01", periods=2)
right = pd.date_range("2020-01-01", periods=3)
assert time_array_equal(left, right) == False
def test_valueerror_lengths_must_match():
a = ["2020-01-01T00:00:00Z"]
b = ["2020-02-01T00:00:00Z", "2020-02-02T00:00:00Z", "2020-02-03T00:00:00Z"]
with pytest.raises(ValueError):
pd.to_datetime(a) == pd.to_datetime(b)
@pytest.mark.parametrize("left,right,expect", [
pytest.param([cftime.DatetimeGregorian(2020, 1, 1),
cftime.DatetimeGregorian(2020, 1, 2),
cftime.DatetimeGregorian(2020, 1, 3)],
pd.date_range("2020-01-01", periods=3),
True,
id="gregorian/pandas same values"),
pytest.param([cftime.DatetimeGregorian(2020, 2, 1),
cftime.DatetimeGregorian(2020, 2, 2),
cftime.DatetimeGregorian(2020, 2, 3)],
|
pd.date_range("2020-01-01", periods=3)
|
pandas.date_range
|
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with tm.ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
with tm.ensure_clean() as path:
df.to_csv(path, encoding="UTF-8")
df2 = read_csv(path, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
df.to_csv(path, encoding="UTF-8", index=False)
df2 = read_csv(path, index_col=None, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO("")
df = DataFrame(
[["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"],
index=["\u05d0", "\u05d1"],
)
df.to_csv(buf, encoding="UTF-8")
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_stringio(self, float_frame):
buf = StringIO()
float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
tm.assert_frame_equal(recons, float_frame)
def test_to_csv_float_format(self):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as filename:
df.to_csv(filename, float_format="%.2f")
rs = read_csv(filename, index_col=0)
xp = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8")
result = buf.getvalue()
expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({"A": ["hello", '{"hello"}']})
for encoding in (None, "utf-8"):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False)
result = buf.getvalue()
expected_rows = ["A", "hello", '{"hello"}']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
with tm.ensure_clean() as path:
# case 1: CRLF as line terminator
df.to_csv(path, line_terminator="\r\n")
expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 2: LF as line terminator
df.to_csv(path, line_terminator="\n")
expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 3: The default line terminator(=os.linesep)(gh-21406)
df.to_csv(path)
os_linesep = os.linesep.encode("utf-8")
expected = (
b",A,B"
+ os_linesep
+ b"one,1,4"
+ os_linesep
+ b"two,2,5"
+ os_linesep
+ b"three,3,6"
+ os_linesep
)
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output
# as when one would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
res = StringIO()
s.to_csv(res, header=False)
exp = StringIO()
s2.to_csv(exp, header=False)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self, float_frame):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = read_csv(StringIO(csv_str), index_col=0)
tm.assert_frame_equal(float_frame, recons)
@pytest.mark.parametrize(
"df,encoding",
[
(
DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
),
None,
),
# GH 21241, 21118
(DataFrame([["abc", "def", "ghi"]], columns=["X", "Y", "Z"]), "ascii"),
(DataFrame(5 * [[123, "你好", "世界"]], columns=["X", "Y", "Z"]), "gb2312"),
(
DataFrame(5 * [[123, "Γειά σου", "Κόσμε"]], columns=["X", "Y", "Z"]),
"cp737",
),
],
)
def test_to_csv_compression(self, df, encoding, compression):
with tm.ensure_clean() as filename:
df.to_csv(filename, compression=compression, encoding=encoding)
# test the round trip - to_csv -> read_csv
result = read_csv(
filename, compression=compression, index_col=0, encoding=encoding
)
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
filename, "w", compression=compression, encoding=encoding
) as handles:
df.to_csv(handles.handle, encoding=encoding)
assert not handles.handle.closed
result = read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_frame_equal(df, result)
# explicitly make sure file is compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
for col in df.columns:
assert col in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_frame_equal(df, read_csv(fh, index_col=0, encoding=encoding))
def test_to_csv_date_format(self, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_date_format__") as path:
dt_index = datetime_frame.index
datetime_frame = DataFrame(
{"A": dt_index, "B": dt_index.shift(1)}, index=dt_index
)
datetime_frame.to_csv(path, date_format="%Y%m%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime("%Y%m%d"))
)
tm.assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format="%Y-%m-%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime("%Y-%m-%d")
)
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime("%Y-%m-%d")
)
tm.assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format="%Y%m%d")
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(
lambda x: x.strftime("%Y%m%d")
)
tm.assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
["NaT"] * 10 + ["2000-01-01", "1/1/2000", "1-1-2000"]
)
nat_frame = DataFrame({"A": nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format="%Y-%m-%d")
test = read_csv(path, parse_dates=[0, 1], index_col=0)
tm.assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with tm.ensure_clean("csv_date_format_with_dst") as path:
# make sure we are not failing on transitions
times = date_range(
"2013-10-26 23:00",
"2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous="infer",
)
for i in [times, times + pd.Timedelta("10s")]:
i = i._with_freq(None) # freq is not preserved by read_csv
time_range = np.array(range(len(i)), dtype="int64")
df = DataFrame({"A": time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index =
|
to_datetime(result.index, utc=True)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from datetime import timedelta, datetime, date
from dateutil.relativedelta import relativedelta
import pandas as pd
from pytz import utc
from odoo import models, fields, api, _
from odoo.http import request
from odoo.tools import float_utils
ROUNDING_FACTOR = 16
class HrLeave(models.Model):
_inherit = 'hr.leave'
duration_display = fields.Char('Requested (Days/Hours)', compute='_compute_duration_display', store=True,
help="Field allowing to see the leave request duration"
" in days or hours depending on the leave_type_request_unit")
class Employee(models.Model):
_inherit = 'hr.employee'
birthday = fields.Date('Date of Birth', groups="base.group_user", help="Birthday")
@api.model
def check_user_group(self):
uid = request.session.uid
user = self.env['res.users'].sudo().search([('id', '=', uid)], limit=1)
if user.has_group('hr.group_hr_manager'):
return True
else:
return False
@api.model
def get_user_employee_details(self):
uid = request.session.uid
employee = self.env['hr.employee'].sudo().search_read([('user_id', '=', uid)], limit=1)
leaves_to_approve = self.env['hr.leave'].sudo().search_count([('state', 'in', ['confirm', 'validate1'])])
today = datetime.strftime(datetime.today(), '%Y-%m-%d')
query = """
select count(id)
from hr_leave
WHERE (hr_leave.date_from::DATE,hr_leave.date_to::DATE) OVERLAPS ('%s', '%s') and
state='validate'""" % (today, today)
cr = self._cr
cr.execute(query)
leaves_today = cr.fetchall()
first_day = date.today().replace(day=1)
last_day = (date.today() + relativedelta(months=1, day=1)) - timedelta(1)
query = """
select count(id)
from hr_leave
WHERE (hr_leave.date_from::DATE,hr_leave.date_to::DATE) OVERLAPS ('%s', '%s')
and state='validate'""" % (first_day, last_day)
cr = self._cr
cr.execute(query)
leaves_this_month = cr.fetchall()
leaves_alloc_req = self.env['hr.leave.allocation'].sudo().search_count(
[('state', 'in', ['confirm', 'validate1'])])
timesheet_count = self.env['account.analytic.line'].sudo().search_count(
[('project_id', '!=', False), ('user_id', '=', uid)])
timesheet_view_id = self.env.ref('hr_timesheet.hr_timesheet_line_search')
job_applications = self.env['hr.applicant'].sudo().search_count([])
if employee:
sql = """select broad_factor from hr_employee_broad_factor where id =%s"""
self.env.cr.execute(sql, (employee[0]['id'],))
result = self.env.cr.dictfetchall()
broad_factor = result[0]['broad_factor']
if employee[0]['birthday']:
diff = relativedelta(datetime.today(), employee[0]['birthday'])
age = diff.years
else:
age = False
if employee[0]['joining_date']:
diff = relativedelta(datetime.today(), employee[0]['joining_date'])
years = diff.years
months = diff.months
days = diff.days
experience = '{} years {} months {} days'.format(years, months, days)
else:
experience = False
if employee:
data = {
'broad_factor': broad_factor if broad_factor else 0,
'leaves_to_approve': leaves_to_approve,
'leaves_today': leaves_today,
'leaves_this_month': leaves_this_month,
'leaves_alloc_req': leaves_alloc_req,
'emp_timesheets': timesheet_count,
'job_applications': job_applications,
'timesheet_view_id': timesheet_view_id,
'experience': experience,
'age': age
}
employee[0].update(data)
return employee
else:
return False
@api.model
def get_upcoming(self):
cr = self._cr
uid = request.session.uid
employee = self.env['hr.employee'].search([('user_id', '=', uid)], limit=1)
cr.execute("""select *,
(to_char(dob,'ddd')::int-to_char(now(),'ddd')::int+total_days)%total_days as dif
from (select he.id, he.name, to_char(he.birthday, 'Month dd') as birthday,
hj.name as job_id , he.birthday as dob,
(to_char((to_char(now(),'yyyy')||'-12-31')::date,'ddd')::int) as total_days
FROM hr_employee he
join hr_job hj
on hj.id = he.job_id
) birth
where (to_char(dob,'ddd')::int-to_char(now(),'DDD')::int+total_days)%total_days between 0 and 15
order by dif;""")
birthday = cr.fetchall()
# e.is_online # was there below
# where e.state ='confirm' on line 118/9 #change
cr.execute("""select e.name, e.date_begin, e.date_end, rc.name as location
from event_event e
left join res_partner rp
on e.address_id = rp.id
left join res_country rc
on rc.id = rp.country_id
and (e.date_begin >= now()
and e.date_begin <= now() + interval '15 day')
or (e.date_end >= now()
and e.date_end <= now() + interval '15 day')
order by e.date_begin """)
event = cr.fetchall()
announcement = []
if employee:
department = employee.department_id
job_id = employee.job_id
sql = """select ha.name, ha.announcement_reason
from hr_announcement ha
left join hr_employee_announcements hea
on hea.announcement = ha.id
left join hr_department_announcements hda
on hda.announcement = ha.id
left join hr_job_position_announcements hpa
on hpa.announcement = ha.id
where ha.state = 'approved' and
ha.date_start <= now()::date and
ha.date_end >= now()::date and
(ha.is_announcement = True or
(ha.is_announcement = False
and ha.announcement_type = 'employee'
and hea.employee = %s)""" % employee.id
if department:
sql += """ or
(ha.is_announcement = False and
ha.announcement_type = 'department'
and hda.department = %s)""" % department.id
if job_id:
sql += """ or
(ha.is_announcement = False and
ha.announcement_type = 'job_position'
and hpa.job_position = %s)""" % job_id.id
sql += ')'
cr.execute(sql)
announcement = cr.fetchall()
return {
'birthday': birthday,
'event': event,
'announcement': announcement
}
@api.model
def get_dept_employee(self):
cr = self._cr
cr.execute("""select department_id, hr_department.name,count(*)
from hr_employee join hr_department on hr_department.id=hr_employee.department_id
group by hr_employee.department_id,hr_department.name""")
dat = cr.fetchall()
data = []
for i in range(0, len(dat)):
data.append({'label': dat[i][1], 'value': dat[i][2]})
return data
@api.model
def get_department_leave(self):
month_list = []
graph_result = []
for i in range(5, -1, -1):
last_month = datetime.now() - relativedelta(months=i)
text = format(last_month, '%B %Y')
month_list.append(text)
self.env.cr.execute("""select id, name from hr_department where active=True """)
departments = self.env.cr.dictfetchall()
department_list = [x['name'] for x in departments]
for month in month_list:
leave = {}
for dept in departments:
leave[dept['name']] = 0
vals = {
'l_month': month,
'leave': leave
}
graph_result.append(vals)
sql = """
SELECT h.id, h.employee_id,h.department_id
, extract('month' FROM y)::int AS leave_month
, to_char(y, 'Month YYYY') as month_year
, GREATEST(y , h.date_from) AS date_from
, LEAST (y + interval '1 month', h.date_to) AS date_to
FROM (select * from hr_leave where state = 'validate') h
, generate_series(date_trunc('month', date_from::timestamp)
, date_trunc('month', date_to::timestamp)
, interval '1 month') y
where date_trunc('month', GREATEST(y , h.date_from)) >= date_trunc('month', now()) - interval '6 month' and
date_trunc('month', GREATEST(y , h.date_from)) <= date_trunc('month', now())
and h.department_id is not null
"""
self.env.cr.execute(sql)
results = self.env.cr.dictfetchall()
leave_lines = []
for line in results:
employee = self.browse(line['employee_id'])
from_dt = fields.Datetime.from_string(line['date_from'])
to_dt = fields.Datetime.from_string(line['date_to'])
days = employee.get_work_days_dashboard(from_dt, to_dt)
line['days'] = days
vals = {
'department': line['department_id'],
'l_month': line['month_year'],
'days': days
}
leave_lines.append(vals)
if leave_lines:
df =
|
pd.DataFrame(leave_lines)
|
pandas.DataFrame
|
import datetime as dt
import numpy as np
import pandas as pd
import plotly.express as px
import statsmodels.api as sm
###################################
# Time Series Data Quality Checks
###################################
class DataQualityCheck:
"""
A class used to capture summary stats and data quality checks prior to uploading time series data to DataRobot
Attributes:
-----------
df : DataFrame
time series data, including a date column and target variable at a minimum
settings : dict
definitions of date_col, target_col, series_id and time series parameters
stats : dict
summary statistics generated from `calc_summary_stats`
duplicate_dates : int
duplicate dates in the time series date_col
series_timesteps : series
steps between time units for each series_id
series_max_gap : series
maximum time gap per series
series_lenth : series
length of each series_id
series_pct : series
percent of series with complete time steps
irregular : boolean
True if df contains irregular time series data
series_negative_target_pct : float
Percent of target values that are negative
Methods:
--------
calc_summary_stats(settings, df)
generates a dictionary of summary statistics
calc_time_steps(settings, df)
calculate time steps per series_id
hierarchical_check(settings, df)
check if time series data passes heirarchical check
zero_inflated_check(settings, df)
check if target value contains zeros
negative_values_check(settings, df)
check if target value contains negative values
time_steps_gap_check(settings, df)
check if any series has missing time steps
irregular_check(settings, df)
check is time series data irregular
"""
def __init__(self, df, ts_settings):
self.df = df
self.settings = ts_settings
self.stats = None
self.duplicate_dates = None
self.series_time_steps = None
self.series_length = None
self.series_pct = None
self.irregular = None
self.series_negative_target_pct = None
self.project_time_unit = None
self.project_time_step = None
self.calc_summary_stats()
self.calc_time_steps()
self.run_all_checks()
def calc_summary_stats(self):
"""
Analyze time series data to perform checks and gather summary statistics prior to modeling.
"""
date_col = self.settings['date_col']
series_id = self.settings['series_id']
target = self.settings['target']
df = self.df
df[date_col] = pd.to_datetime(df[date_col])
df.sort_values(by=[date_col, series_id], ascending=True, inplace=True)
# Create dictionary of helpful statistics
stats = dict()
stats['rows'] = df.shape[0]
stats['columns'] = df.shape[1]
stats['min_' + str(target)] = df[target].min()
stats['max_' + str(target)] = df[target].max()
stats['series'] = len(df[series_id].unique())
stats['start_date'] = df[date_col].min()
stats['end_date'] = df[date_col].max()
stats['timespan'] = stats['end_date'] - stats['start_date']
stats['median_timestep'] = df.groupby([series_id])[date_col].diff().median()
stats['min_timestep'] = df.groupby([series_id])[date_col].diff().min()
stats['max_timestep'] = df.groupby([series_id])[date_col].diff().max()
# create data for histogram of series lengths
stats['series_length'] = (
df.groupby([series_id])[date_col].apply(lambda x: x.max() - x.min())
/ stats['median_timestep']
)
# calculate max gap per series
stats['series_max_gap'] = (
df.groupby([series_id])[date_col].apply(lambda x: x.diff().max())
/ stats['median_timestep']
)
self.stats = stats
def calc_percent_missing(self, missing_value=np.nan):
"""
Calculate percentage of rows where target is np.nan
"""
target = self.settings['target']
df = self.df
if np.isnan(missing_value):
percent_missing = sum(np.isnan(df[target])) / len(df)
else:
percent_missing = sum(df[target] == missing_value) / len(df)
self.stats['percent_missing'] = percent_missing
print('{:0.2f}% of the rows are missing a target value'.format(percent_missing * 100))
def get_zero_inflated_series(self, cutoff=0.99):
"""
Identify series where the target is 0.0 in more than x% of the rows
Returns:
--------
List of series
"""
assert 0 < cutoff <= 1.0, 'cutoff must be between 0 and 1'
series_id = self.settings['series_id']
target = self.settings['target']
df = self.df
df = df.groupby([series_id])[target].apply(lambda x: (x.dropna() == 0).mean())
series = df[df >= cutoff].index.values
pct = len(series) / self.stats['series']
print(
'{:0.2f}% series have zeros in more than {:0.2f}% or more of the rows'.format(
pct * 100, cutoff * 100
)
)
def calc_time_steps(self):
"""
Calculate timesteps per series
"""
date_col = self.settings['date_col']
series_id = self.settings['series_id']
df = self.df
if self.stats is None:
print('calc_summary_stats must be run first!')
# create data for histogram of timestep
series_timesteps = df.groupby([series_id])[date_col].diff() / self.stats['median_timestep']
self.series_time_steps = series_timesteps
def hierarchical_check(self):
"""
Calculate percentage of series that appear on each timestep
"""
date_col = self.settings['date_col']
series_id = self.settings['series_id']
df = self.df
if self.stats is None:
print('calc_summary_stats must be run first!')
# Test if series passes the hierarchical check
series_pct = df.groupby([date_col])[series_id].apply(
lambda x: x.count() / self.stats['series']
)
if np.where(series_pct > 0.95, 1, 0).mean() > 0.95:
self.stats['passes_hierarchical_check'] = True
print(
'Data passes hierarchical check! DataRobot hierarchical blueprints will run if you enable cross series features.'
)
else:
print('Data fails hierarchical check! No hierarchical blueprints will run.')
self.stats['passes_hierarchical_check'] = False
self.series_pct = series_pct
def zero_inflated_check(self):
"""
Check if minimum target value is 0.0
"""
target = self.settings['target']
df = self.df
if min(df[target]) == 0:
self.stats['passes_zero_inflated_check'] = False
print('The minimum target value is zero. Zero-Inflated blueprints will run.')
else:
self.stats['passes_zero_inflated_check'] = True
print('Minimum target value is <> 0. Zero-inflated blueprints will not run.')
def negative_values_check(self):
"""
Check if any series contain negative values. If yes, identify and call out which series by id.
"""
series_id = self.settings['series_id']
target = self.settings['target']
df = self.df
df['target_sign'] = np.sign(df[target])
try:
# Get percent of series that have at least one negative value
any_series_negative = (
df.groupby([series_id])['target_sign'].value_counts().unstack()[-1]
)
series_negative_target_pct = np.sign(any_series_negative).sum() / len(
df[series_id].unique()
)
df.drop('target_sign', axis=1, inplace=True)
self.stats['passes_negative_values_check'] = False
print(
'{0:.2f}% of series have at least one negative {1} value.'.format(
(round(series_negative_target_pct * 100), 2), target
)
)
# Identify which series have negative values
# print('{} contain negative values. Consider creating a seperate project for these series.'.format(any_series_negative[any_series_negative == 1].index.values))
except:
series_negative_target_pct = 0
self.stats['passes_negative_values_check'] = True
print('No negative values are contained in {}.'.format(target))
self.series_negative_target_pct = series_negative_target_pct
def new_series_check(self):
"""
Check if any series start after the the minimum datetime
"""
min_dates = self.df.groupby(self.settings['series_id'])[self.settings['date_col']].min()
new_series = min_dates > self.stats['start_date'] + dt.timedelta(days=30)
if new_series.sum() == 0:
self.stats['series_introduced_over_time'] = False
print('No new series were introduced after the start of the training data')
else:
self.stats['series_introduced_over_time'] = True
print(
'Warning: You may encounter new series at prediction time. \n {0:.2f}% of the series appeared after the start of the training data'.format(
round(new_series.mean() * 100, 0)
)
)
def old_series_check(self):
"""
Check if any series end before the maximum datetime
"""
max_dates = self.df.groupby(self.settings['series_id'])[self.settings['date_col']].max()
old_series = max_dates < self.stats['end_date'] - dt.timedelta(days=30)
if old_series.sum() == 0:
self.stats['series_removed_over_time'] = False
print('No series were removed before the end of the training data')
else:
self.stats['series_removed_over_time'] = True
print(
'Warning: You may encounter fewer series at prediction time. \n {0:.2f}% of the series were removed before the end of the training data'.format(
round(old_series.mean() * 100, 0)
)
)
def leading_or_trailing_zeros_check(self, threshold=5, drop=True):
"""
Check for contain consecutive zeros at the beginning or end of each series
"""
date_col = self.settings['date_col']
series_id = self.settings['series_id']
target = self.settings['target']
df = self.df
new_df = remove_leading_and_trailing_zeros(
df,
series_id,
date_col,
target,
leading_threshold=threshold,
trailing_threshold=threshold,
drop=drop,
)
if new_df.shape[0] < df.shape[0]:
print(f'Warning: Leading and trailing zeros detected within series')
else:
print(f'No leading or trailing zeros detected within series')
def duplicate_dates_check(self):
"""
Check for duplicate datetimes within each series
"""
duplicate_dates = self.df.groupby([self.settings['series_id'], self.settings['date_col']])[
self.settings['date_col']
].count()
duplicate_dates = duplicate_dates[duplicate_dates > 1]
if len(duplicate_dates) == 0:
print(f'No duplicate timestamps detected within any series')
self.stats['passes_duplicate_timestamp_check'] = True
else:
print('Warning: Data contains duplicate timestamps within series!')
self.stats['passes_duplicate_timestamp_check'] = False
def time_steps_gap_check(self):
"""
Check for missing timesteps within each series
"""
date_col = self.settings['date_col']
series_id = self.settings['series_id']
df = self.df
gap_size = self.stats['median_timestep']
if self.stats is None:
print('calc_summary_stats must be run first!')
# check is series has any missing time steps
self.stats['pct_series_w_gaps'] = (
df.groupby([series_id])[date_col].apply(lambda x: x.diff().max()) > gap_size
).mean()
print(
'{0:.2f}% of series have at least one missing time step.'.format(
round(self.stats['pct_series_w_gaps'] * 100), 2
)
)
def _get_spacing(self, df, project_time_unit):
"""
Helper function for self.irregular_check()
Returns:
--------
List of series
"""
project_time_unit = self.project_time_unit
ts_settings = self.settings
date_col = ts_settings['date_col']
series_id = ts_settings['series_id']
df['indicator'] = 1
df = fill_missing_dates(df=df, ts_settings=ts_settings)
if project_time_unit == 'minute':
df['minute'] = df[date_col].dt.minute
elif project_time_unit == 'hour':
df['hour'] = df[date_col].dt.hour
elif project_time_unit == 'day':
df['day'] = df[date_col].dt.dayofweek
elif project_time_unit == 'week':
df['week'] = df[date_col].dt.week
elif project_time_unit == 'month':
df['month'] = df[date_col].dt.month
sums = df.groupby([series_id, project_time_unit])['indicator'].sum()
counts = df.groupby([series_id, project_time_unit])['indicator'].agg(
lambda x: x.fillna(0).count()
)
pcts = sums / counts
irregular = pcts.reset_index(drop=True) < 0.8
irregular = irregular[irregular]
return irregular
def irregular_check(self, plot=False):
"""
Check for irregular spacing within each series
"""
date_col = self.settings['date_col']
df = self.df.copy()
# first cast date column to a pandas datetime type
df[date_col] = pd.to_datetime(df[date_col])
project_time_unit, project_time_step = get_timestep(self.df, self.settings)
self.project_time_unit = project_time_unit
self.project_time_step = project_time_step
print('Project Timestep: ', project_time_step, ' ', project_time_unit)
if project_time_unit == 'minute':
df['minute'] = df[date_col].dt.minute
elif project_time_unit == 'hour':
df['hour'] = df[date_col].dt.hour
elif project_time_unit == 'day':
df['day'] = df[date_col].dt.dayofweek
elif project_time_unit == 'week':
df['week'] = df[date_col].dt.week
elif project_time_unit == 'month':
df['month'] = df[date_col].dt.month
# Plot histogram of timesteps
time_unit_counts = df[project_time_unit].value_counts()
if plot:
time_unit_percent = time_unit_counts / sum(time_unit_counts.values)
fig = px.bar(
time_unit_percent,
x=time_unit_percent.index,
y=time_unit_percent.values,
title=f'Percentage of records per {project_time_unit}',
)
fig.update_xaxes(title=project_time_unit)
fig.update_yaxes(title='Percentage')
fig.show()
# Detect uncommon time steps
# If time bin has less than 30% of most common bin then it is an uncommon time bin
uncommon_time_bins = list(
time_unit_counts[(time_unit_counts / time_unit_counts.max()) < 0.3].index
)
common_time_bins = list(
time_unit_counts[(time_unit_counts / time_unit_counts.max()) >= 0.3].index
)
if len(uncommon_time_bins) > 0:
print(f'Uncommon {project_time_unit}s:', uncommon_time_bins)
else:
print('There are no uncommon time steps')
# Detect irregular series
df = df.loc[df[project_time_unit].isin(common_time_bins), :]
irregular_series = self._get_spacing(df, project_time_unit)
if len(irregular_series) > 0:
print(
'Series are irregularly spaced. Projects will only be able to run in row-based mode!'
)
self.stats['passes_irregular_check'] = False
else:
self.stats['passes_irregular_check'] = True
print(
'Timesteps are regularly spaced. You will be able to run projects in either time-based or row-based mode'
)
def detect_periodicity(self, alpha=0.05):
"""
Calculate project-level periodicity
"""
timestep = self.project_time_unit
df = self.df
target = self.settings['target']
date_col = self.settings['date_col']
metric = self.settings['metric']
metrics = {
'LogLoss': sm.families.Binomial(),
'RMSE': sm.families.Gaussian(),
'Poisson Deviance': sm.families.Poisson(),
'Gamma Deviance': sm.families.Gamma(),
}
periodicity = {
'moh': 'hourly',
'hod': 'daily',
'dow': 'weekly',
'dom': 'monthly',
'month': 'yearly',
'year': 'multi-year'
}
try:
loss = metrics[metric]
except KeyError:
loss = metrics['RMSE']
# Instantiate a glm with the default link function.
df[date_col] = pd.to_datetime(df[date_col])
df = df.loc[np.isfinite(df[target]), :].copy()
df['moh'] = df[date_col].dt.minute
df['hod'] = df[date_col].dt.hour
df['dow'] = df[date_col].dt.dayofweek
df['dom'] = df[date_col].dt.day
df['month'] = df[date_col].dt.month
df['year'] = df[date_col].dt.year
if timestep == 'minute':
inputs = ['moh', 'hod', 'dow', 'dom', 'month']
elif timestep == 'hour':
inputs = ['hod', 'dow', 'dom', 'month']
elif timestep == 'day':
inputs = ['dow', 'dom', 'month']
elif timestep == 'week':
inputs = ['month']
elif timestep == 'month':
inputs = ['month','year']
else:
raise ValueError('timestep has to be either minute, hour, day, week, or month')
output = []
for i in inputs:
x = pd.DataFrame(df[i])
y = df[target]
x = pd.get_dummies(x.astype('str'), drop_first=True)
x['const'] = 1
clf = sm.GLM(endog=y, exog=x, family=loss)
model = clf.fit()
if any(model.pvalues[:-1] <= alpha):
output.append(periodicity[i])
# print(f'Detected periodicity: {periodicity[i]}')
# return periodicity[i]
if len(output) > 0:
print(f'Detected periodicity: {output}')
else:
print('No periodicity detected')
def run_all_checks(self):
"""
Runner function to run all data checks in one call
"""
print('Running all data quality checks...\n')
series = self.stats['series']
start_date = self.stats['start_date']
end_date = self.stats['end_date']
rows = self.stats['rows']
cols = self.stats['columns']
print(f'There are {rows} rows and {cols} columns')
print(f'There are {series} series')
print(f'The data spans from {start_date} to {end_date}')
self.hierarchical_check()
self.zero_inflated_check()
self.new_series_check()
self.old_series_check()
self.duplicate_dates_check()
self.leading_or_trailing_zeros_check()
self.time_steps_gap_check()
self.calc_percent_missing()
self.get_zero_inflated_series()
self.irregular_check()
self.detect_periodicity()
def get_timestep(df, ts_settings):
"""
Calculate the project-level timestep
Returns:
--------
project_time_unit: minute, hour, day, week, or month
project_time_step: int
Examples:
--------
'1 days'
'4 days'
'1 week'
'2 months'
"""
date_col = ts_settings['date_col']
series_id = ts_settings['series_id']
df = df.copy()
# Cast date column to a pandas datetime type and sort df
df[date_col] = pd.to_datetime(df[date_col])
df.sort_values(by=[date_col, series_id], ascending=True, inplace=True)
# Calculate median timestep
deltas = df.groupby([series_id])[date_col].diff().reset_index(drop=True)
median_timestep = deltas.apply(lambda x: x.total_seconds()).median()
# Logic to detect project time step and time unit
if (60 <= median_timestep < 3600) & (median_timestep % 60 == 0):
project_time_unit = 'minute'
project_time_step = int(median_timestep / 60)
df['minute'] = df[date_col].dt.minute
elif (3600 <= median_timestep < 86400) & (median_timestep % 3600 == 0):
project_time_unit = 'hour'
project_time_step = int(median_timestep / 3600)
df['hour'] = df[date_col].dt.hour
elif (86400 <= median_timestep < 604800) & (median_timestep % 86400 == 0):
project_time_unit = 'day'
project_time_step = int(median_timestep / 86400)
df['day'] = df[date_col].dt.strftime('%A')
elif (604800 <= median_timestep < 2.628e6) & (median_timestep % 604800 == 0):
project_time_unit = 'week'
project_time_step = int(median_timestep / 604800)
df['week'] = df[date_col].dt.week
elif (median_timestep >= 2.628e6) & (median_timestep / 2.628e6 <= 1.02):
# elif (median_timestep >= 2.628e6) & (median_timestep % 2.628e6 == 0): # original
project_time_unit = 'month'
project_time_step = int(median_timestep / 2.628e6)
df['month'] = df[date_col].dt.month
else:
raise ValueError(f'{median_timestep} seconds is not a supported timestep')
# print('Project Timestep: 1', project_time_unit)
return project_time_unit, project_time_step
def _reindex_dates(group, freq):
"""
Helper function for fill_missing_dates()
"""
date_range = pd.date_range(group.index.min(), group.index.max(), freq=freq)
group = group.reindex(date_range)
return group
def fill_missing_dates(df, ts_settings, freq=None):
"""
Insert rows with np.nan targets for series with missing timesteps between the series start and end dates
df: pandas df
ts_settings: dictionary of parameters for time series project
freq: project time unit and timestep
Returns:
--------
pandas df with inserted rows
"""
date_col = ts_settings['date_col']
series_id = ts_settings['series_id']
df = df.copy()
df[date_col] = pd.to_datetime(df[date_col])
df.sort_values(by=[series_id, date_col], ascending=True, inplace=True)
if freq is None:
mapper = {'minute': 'min', 'hour': 'H', 'day': 'D', 'week': 'W', 'month': 'M'}
project_time_unit, project_time_step = get_timestep(df, ts_settings)
freq = str(project_time_step) + mapper[project_time_unit]
df = (
df.set_index(date_col)
.groupby(series_id)
.apply(_reindex_dates, freq)
.rename_axis((series_id, date_col))
.drop(series_id, axis=1)
.reset_index()
)
return df.reset_index(drop=True)
def _remove_leading_zeros(df, date_col, target, threshold=5, drop=False):
"""
Remove excess zeros at the beginning of series
df: pandas df
date_col: str
Column name for datetime column in df
target: str
Column name for target column in df
threshold: minimum number of consecutive zeros at the beginning of a series before rows are dropped
drop: specifies whether to drop the zeros or set them to np.nan
Returns:
--------
pandas df
"""
df[date_col] = pd.to_datetime(df[date_col])
df_non_zero = df[(df[target] != 0) & (~pd.isnull(df[target]))]
min_date = df_non_zero[date_col].min()
df_begin = df[df[date_col] < min_date]
if df_begin[target].dropna().shape[0] >= threshold or pd.isnull(min_date):
if drop:
if pd.isnull(min_date):
return pd.DataFrame(columns=df.columns, dtype=float)
return df[df[date_col] >= min_date]
else:
df[target] = df.apply(
lambda row: np.nan
if pd.isnull(min_date) or row[date_col] < min_date
else row[target],
axis=1,
)
return df
else:
return df
def _remove_trailing_zeros(df, date_col, target, threshold=5, drop=False):
"""
Remove excess zeros at the end of series
df: pandas df
date_col: str
Column name for datetime column in df
target: str
Column name for target column in df
threshold: minimum number of consecutive zeros at the beginning of a series before rows are dropped
drop: specifies whether to drop the zeros or set them to np.nan
Returns:
--------
pandas df
"""
df[date_col] =
|
pd.to_datetime(df[date_col])
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from scipy.stats import pearsonr, spearmanr
from ripser import Rips
from dtw import dtw, accelerated_dtw
from datetime import timedelta, date
import pickle
import os
class dyads():
def __init__(self, dfi, dfp):
self.dfi = dfi
self.dfp = dfp
self.start_date = pd.to_datetime('1955-1-1', format='%Y-%m-%d')
self.end_date = pd.to_datetime('1978-12-31', format='%Y-%m-%d')
self.country_codes_i = {
'USA': ['USA'],
'USSR': ['USR'],
'China': ['CHN'],
'East-Germany': ['GME'],
'West-Germany': ['GMW'],
'Canada': ['CAD']
}
self.country_codes_p = {
'USA': ['USA'],
'USSR': ['SUN', 'RUS'],
'China': ['CHN'],
'East-Germany': ['DDR'],
'West-Germany': ['DEU'],
'Canada': ['CAN']
}
self.complete_dyads = None # track data with complete data in cor
pass
def filter_dates(self, dfi=None, dfp=None):
'''
filter by selected date range
'''
if dfi is None:
dfi_filt = self.dfi
dfp_filt = self.dfp
# convert to datetime
dfi_filt['date'] = pd.to_datetime(
dfi_filt['date'], format='%Y-%m-%d')
dfp_filt['date'] = pd.to_datetime(
dfp_filt['story_date'], format='%m/%d/%Y')
start = self.start_date
end = self.end_date
pass
else:
dfi_filt = dfi
dfp_filt = dfp
start = max(min(dfi_filt.date), min(dfp_filt.date))
end = min(max(dfi_filt.date), max(dfp_filt.date))
pass
# filter by start and end dates
dfi_filt = dfi_filt[(dfi_filt.date >= start) & (dfi_filt.date <= end)]
dfp_filt = dfp_filt[(dfp_filt.date >= start) & (dfp_filt.date <= end)]
return (dfi_filt, dfp_filt)
def initial_manupulations(self):
'''
rename and select columns
'''
self.dfp = self.dfp.rename(
columns={
'source_root': 'actor',
'target': 'something_else',
'target_root': 'target'
})
self.dfp = self.dfp[['date', 'actor', 'target', 'goldstein']]
self.dfi = self.dfi[['date', 'actor', 'target', 'scale']]
# implement azar weights
azar_weighting = [
92, 47, 31, 27, 14, 10, 6, 0, -6, -16, -29, -44, -50, -65, -102
]
self.dfp['score'] = self.dfp['goldstein']
self.dfi['score'] = [
azar_weighting[ind - 1] for ind in self.dfi['scale'].to_list()
]
# create time frame designations
self.dfp['year'] = pd.DatetimeIndex(self.dfp.date).to_period('Y')
self.dfi['year'] = pd.DatetimeIndex(self.dfi.date).to_period('Y')
self.dfp['month'] =
|
pd.DatetimeIndex(self.dfp.date)
|
pandas.DatetimeIndex
|
#coding:utf-8
from PyQt5 import QtWidgets, QtWidgets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import sys
import cv2
import os
import cv2
import numpy as np
import time
import os
from PIL import Image, ImageDraw, ImageFont
import scipy.misc
import pickle
import datetime
import tensorflow as tf
import glog as log
from glob import glob
import pandas as pd
#图像标记类
class Mark(QtWidgets.QWidget):
def __init__(self, imgPath, lablePath, imageOffsetX, imageOffsetY, excelCon):
super(Mark,self).__init__()
self.imgPath=imgPath
self.lablePath=lablePath
self.imageOffsetX=imageOffsetX
self.imageOffsetY=imageOffsetY
self.excelCon=excelCon
self.mouseOnImgX=0.0
self.mouseOnImgY=0.0
self.xPos=0.0
self.yPos=0.0
self.curWidth=0.0
self.curHeight=0.0
#左上角点100,100, 宽高1000,900, 可自己设置,未利用布局
self.setGeometry(100,100,1000,900)
self.setWindowTitle(u"坐标标注") #窗口标题
self.initUI()
def initUI(self):
# self.labelR = QtWidgets.QLabel(u'缩放比例:', self) #label标签
# self.labelR.move(200, 20) #label标签坐标
# self.editR = QtWidgets.QLineEdit(self) #存放图像缩放的比例值
# self.editR.move(250,20) #编辑框坐标
self.buttonSave = QtWidgets.QPushButton(u"保存坐标到EXCEL", self) #保存按钮
self.buttonSave.move(400,20) #保存按钮坐标
self.buttonSave.clicked.connect(self.saveButtonClick) #保存按钮关联的时间
self.allFiles = QtWidgets.QListWidget(self) #列表框,显示所有的图像文件
self.allFiles.move(10,40) #列表框坐标
self.allFiles.resize(180,700) #列表框大小
allImgs = os.listdir(self.imgPath) #遍历路径,将所有文件放到列表框中
allImgs.sort(key= lambda x:int(x[:-4])) #按文件名大小排序
for imgTmp in allImgs:
self.allFiles.addItem(imgTmp)
imgNum=self.allFiles.count()
self.labelShowNum = QtWidgets.QLabel(u'图片数量:'+str(imgNum), self) #label标签
self.labelShowNum.move(20, 20) #label标签坐标
self.allFiles.itemClicked.connect(self.itemClick) #列表框关联时间,用信号槽的写法方式不起作用
self.allFiles.itemSelectionChanged.connect(self.itemSeleChange)
self.labelImg = QtWidgets.QLabel("选中显示图片", self) # 显示图像的标签
self.labelImg.move(self.imageOffsetX, self.imageOffsetY) #显示图像标签坐标
# def closeEvent(self, event):
# self.file.close()
# print('file close')
# # event.ignore() # 忽略关闭事件
# # self.hide() # 隐藏窗体
# cv2img转换Qimage
def img2pixmap(self, image):
Y, X = image.shape[:2]
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order='C')
self._bgra[..., 0] = image[..., 0]
self._bgra[..., 1] = image[..., 1]
self._bgra[..., 2] = image[..., 2]
qimage = QImage(self._bgra.data, X, Y, QImage.Format_RGB32)
pixmap = QPixmap.fromImage(qimage)
return pixmap
# 选择图像列表得到图片和路径
def selectItemGetImg(self):
imgName=self.allFiles.currentItem().text()
imgDirName = self.imgPath + self.allFiles.currentItem().text() #图像的绝对路径
imgOri = cv2.imread(str(imgDirName),1) #读取图像
self.curHeight = imgOri.shape[0] #图像高度
self.curWidth = imgOri.shape[1] # 计算图像宽度,缩放图像
return imgOri, imgName
# 显示坐标和图片
def pointorShow(self, img, x, y):
cv2.circle(img,(x, y),3,(0,0,255),2)
cv2.circle(img,(x, y),5,(0,255,0),2)
self.labelImg.resize(self.curWidth,self.curHeight) #显示图像标签大小,图像按照宽或高缩放到这个尺度
self.labelImg.setPixmap(self.img2pixmap(img))
#鼠标单击事件
def mousePressEvent(self, QMouseEvent):
pointT = QMouseEvent.pos() # 获得鼠标点击处的坐标
self.mouseOnImgX=pointT.x()-200
self.mouseOnImgY=pointT.y()-70
imgOri, _=self.selectItemGetImg()
self.pointorShow(imgOri, self.mouseOnImgX, self.mouseOnImgY)
# 保存标签
self.saveLabelBySelectItem()
# 列表改变显示图片坐标
def itemSelectShowImg(self):
imgOri, imgName=self.selectItemGetImg()
# 从excel表中得到x,y坐标
xScal, yScal = self.excelCon.getXYPoint('imageName', imgName)
# 通过归一化x,y计算真实坐标
self.mouseOnImgX=int(xScal*self.curWidth)
self.mouseOnImgY=int(yScal*self.curHeight)
self.pointorShow(imgOri, self.mouseOnImgX, self.mouseOnImgY)
def itemClick(self): #列表框单击事件
self.itemSelectShowImg()
def itemSeleChange(self): #列表框改变事件
self.itemSelectShowImg()
def saveLabelBySelectItem(self):
curItem=self.allFiles.currentItem()
if(curItem==None):
print('please select a item')
return
name=str(curItem.text())
# 坐标归一化
self.xPos=self.mouseOnImgX/self.curWidth
self.yPos=self.mouseOnImgY/self.curHeight
# 更新或追加记录
self.excelCon.updateAppendRowBycolName('imageName', name, self.xPos, self.yPos)
def saveButtonClick(self): #保存按钮事件
self.saveLabelBySelectItem()
class imgTools():
def __init__(self):
self.name = "ray"
def png2jpg(self, path):
# path:=>'images/*.png'
pngs = glob(path)
for j in pngs:
img = cv2.imread(j)
cv2.imwrite(j[:-3] + 'jpg', img)
def txt2Excel(self, txtPathName, excelCon):
with open(txtPathName, 'r') as f:
lines = f.readlines()
imagesNum=len(lines)
imgNameList=[]
xList=[]
yList=[]
for i in range (imagesNum):
line=lines[i].strip().split()
imageName=line[0]
# 去掉路径
imageName=imageName[44:]
print(imageName)
imgNameList.append(imageName)
landmark = np.asarray(line[1:197], dtype=np.float32)
nosice=landmark[54*2:54*2+2]
xList.append(nosice[0])
yList.append(nosice[1])
# 批量追加数据
colNames=['imageName', 'x', 'y']
datas=[]
datas.append(imgNameList)
datas.append(xList)
datas.append(yList)
excelCon.appendRowsAnyway(colNames, datas)
def CenterLabelHeatMap(self, img_width, img_height, posX, posY, sigma):
X1 = np.linspace(1, img_width, img_width)
Y1 = np.linspace(1, img_height, img_height)
[X, Y] = np.meshgrid(X1, Y1)
X = X - posX
Y = Y - posY
D2 = X * X + Y * Y
E2 = 2.0 * sigma * sigma
Exponent = D2 / E2
heatmap = np.exp(-Exponent)
return heatmap
# Compute gaussian kernel
def CenterGaussianHeatMap(self, img_height, img_width, posX, posY, variance):
gaussian_map = np.zeros((img_height, img_width))
for x_p in range(img_width):
for y_p in range(img_height):
dist_sq = (x_p - posX) * (x_p - posX) + \
(y_p - posY) * (y_p - posY)
exponent = dist_sq / 2.0 / variance / variance
gaussian_map[y_p, x_p] = np.exp(-exponent)
return gaussian_map
class excelTools():
def __init__(self, lablePath, excelName, sheetName=None):
self.lablePath = lablePath
self.excelName=excelName
self.sheetName=sheetName
def mkEmptyExecl(self, titleFormat):
writer = pd.ExcelWriter(self.lablePath+self.excelName, engine='xlsxwriter')
df=pd.DataFrame(titleFormat)
# df=pd.DataFrame()
if(self.sheetName==None):
df.to_excel(writer, index=False)
else:
df.to_excel(writer, sheet_name=self.sheetName, index=False)
writer.save()
def updateAppendRowBycolName(self, colName, keyWord, x, y):
dirName=self.lablePath+self.excelName
if(self.sheetName==None):
df = pd.read_excel(dirName)
else:
df =
|
pd.read_excel(dirName, sheet_name=self.sheetName)
|
pandas.read_excel
|
import baostock as bs
import pandas as pd
from wdc.util.logger import logger
class BaoStockData:
def __init__(self):
pass
@staticmethod
def query_trade_dates(start_date=None, end_date=None):
"""
交易日查询
方法说明:通过API接口获取股票交易日信息,可以通过参数设置获取起止年份数据,提供上交所1990-今年数据。 返回类型:pandas的DataFrame类型。
:param start_date:开始日期,为空时默认为2015-01-01。
:param end_date:结束日期,为空时默认为当前日期。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_trade_dates(start_date=start_date, end_date=end_date)
if rs.error_code != '0':
logger.error('query_trade_dates respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_all_stock(day=None):
"""
证券代码查询
方法说明:获取指定交易日期所有股票列表。通过API接口获取证券代码及股票交易状态信息,与日K线数据同时更新。可以通过参数‘某交易日’获取数据(包括:A股、指数),提供2006-今数据。
返回类型:pandas的DataFrame类型。
更新时间:与日K线同时更新。
:param day:需要查询的交易日期,为空时默认当前日期。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_all_stock(day=day)
if rs.error_code != '0':
logger.error('query_all_stock respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_stock_basic(code=None, code_name=None):
"""
证券基本资料
方法说明:获取证券基本资料,可以通过参数设置获取对应证券代码、证券名称的数据。
返回类型:pandas的DataFrame类型。
:param code:A股股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。可以为空;
:param code_name:股票名称,支持模糊查询,可以为空。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_stock_basic(code=code, code_name=code_name)
if rs.error_code != '0':
logger.error('query_stock_basic respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_history_k_data_plus(symbol, timeframe='1d', fields=None, adj=None, start_date=None, end_date=None):
"""
获取k线数据
注意:
股票停牌时,对于日线,开、高、低、收价都相同,且都为前一交易日的收盘价,成交量、成交额为0,换手率为空。
:param symbol:股票代码,6位数字代码,或者指数代码,如:sh601398。sh:上海;sz:深圳。此参数不可为空;
:param fields:获取的字段
:param timeframe:k线周期,"5m"为5分钟,"15m"为15分钟,"30m"为30分钟,"1h"为1小时,"1d"为日,"1w"为一周,"1M"为一月。指数没有分钟线数据;周线每周最后一个交易日才可以获取,月线每月最后一个交易日才可以获取。
:param adj:复权类型,默认是"3"不复权;前复权:"2";后复权:"1"。已支持分钟线、日线、周线、月线前后复权。 BaoStock提供的是涨跌幅复权算法复权因子,具体介绍见:复权因子简介或者BaoStock复权因子简介。
:param start_date:开始日期(包含),格式“YYYY-MM-DD”,为空时取2015-01-01;
:param end_date:结束日期(包含),格式“YYYY-MM-DD”,为空时取最近一个交易日;
:return:返回一个列表
"""
frequency = ''
if timeframe == "5m":
frequency = "5"
elif timeframe == "15m":
frequency = "15"
elif timeframe == "30m":
frequency = "30"
elif timeframe == "1h":
frequency = "60"
elif timeframe == "1d":
frequency = "d"
elif timeframe == "1w":
frequency = 'w'
elif timeframe == "1M":
frequency = "m"
else:
logger.error("timeframe error !")
if fields is None:
if 'm' in timeframe or 'h' in timeframe:
fields = "date,time,code,open,high,low,close,volume,amount,adjustflag"
elif "d" in timeframe:
fields = "date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,pctChg,isST"
elif 'w' in timeframe or 'M' in timeframe:
fields = "date,code,open,high,low,close,volume,amount,adjustflag,turn,pctChg"
else:
logger.error("timeframe error !")
if symbol.startswith('6'):
stock_name = 'sh.' + symbol
else:
stock_name = 'sz.' + symbol
#stock_name = 'sh.' + str(symbol).split('sh')[1] if str(symbol).startswith("sh") else 'sz.' + str(symbol).split('sz')[1]
adjust_flag = "3" if not adj else adj
rs = bs.query_history_k_data_plus(
code=stock_name,
fields=fields,
start_date=start_date,
end_date=end_date,
frequency=frequency,
adjustflag=adjust_flag
)
if rs.error_code != "0":
logger.error('query_history_k_data_plus respond error: {}'.format(rs.error_msg))
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
result['code'] = symbol
#result.set_index(['date', 'code'],drop=False)
#bs.logout()
"""
if 'm' in timeframe or 'h' in timeframe:
result = result.drop("date", axis=1)
#result = result.drop('code', axis=1)
result = result.values.tolist()
"""
return result
@staticmethod
def query_dividend_data(code, year, yearType):
"""
查询除权除息信息
:param code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
:param year:年份,如:2017。此参数不可为空;
:param yearType:年份类别。"report":预案公告年份,"operate":除权除息年份。此参数不可为空。
"""
lg = bs.login()
if lg.error_code != '1':
logger.error('login respond error_msg:' + lg.error_msg)
rs_list = []
rs_dividend = bs.query_dividend_data(code=code, year=year, yearType=yearType)
while (rs_dividend.error_code == '0') & rs_dividend.next():
rs_list.append(rs_dividend.get_row_data())
result_dividend = pd.DataFrame(rs_list, columns=rs_dividend.fields)
bs.logout()
return result_dividend
@staticmethod
def query_adjust_factor(code, start_date=None, end_date=None):
"""
查询复权因子信息
BaoStock提供的是涨跌幅复权算法复权因子
:param code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
:param start_date:开始日期,为空时默认为2015-01-01,包含此日期;
:param end_date:结束日期,为空时默认当前日期,包含此日期。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs_list = []
rs_factor = bs.query_adjust_factor(code=code, start_date=start_date, end_date=end_date)
while (rs_factor.error_code == '0') & rs_factor.next():
rs_list.append(rs_factor.get_row_data())
result_factor = pd.DataFrame(rs_list, columns=rs_factor.fields)
bs.logout()
return result_factor
@staticmethod
def query_profit_data(code, year=None, quarter=None):
"""
季频盈利能力
方法说明:通过API接口获取季频盈利能力信息,可以通过参数设置获取对应年份、季度数据,提供2007年至今数据。
返回类型:pandas的DataFrame类型。
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
year:统计年份,为空时默认当前年;
quarter:统计季度,可为空,默认当前季度。不为空时只有4个取值:1,2,3,4。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
profit_list = []
rs_profit = bs.query_profit_data(code=code, year=year, quarter=quarter)
while (rs_profit.error_code == '0') & rs_profit.next():
profit_list.append(rs_profit.get_row_data())
result_profit = pd.DataFrame(profit_list, columns=rs_profit.fields)
bs.logout()
return result_profit
@staticmethod
def query_operation_data(code, year=None, quarter=None):
"""
季频营运能力
方法说明:通过API接口获取季频营运能力信息,可以通过参数设置获取对应年份、季度数据,提供2007年至今数据。
返回类型:pandas的DataFrame类型。
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
year:统计年份,为空时默认当前年;
quarter:统计季度,为空时默认当前季度。不为空时只有4个取值:1,2,3,4。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
operation_list = []
rs_operation = bs.query_operation_data(code=code, year=year, quarter=quarter)
while (rs_operation.error_code == '0') & rs_operation.next():
operation_list.append(rs_operation.get_row_data())
result_operation = pd.DataFrame(operation_list, columns=rs_operation.fields)
bs.logout()
return result_operation
@staticmethod
def query_growth_data(code, year=None, quarter=None):
"""
季频成长能力
方法说明:通过API接口获取季频成长能力信息,可以通过参数设置获取对应年份、季度数据,提供2007年至今数据。
返回类型:pandas的DataFrame类型。
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
year:统计年份,为空时默认当前年;
quarter:统计季度,为空时默认当前季度。不为空时只有4个取值:1,2,3,4。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
growth_list = []
rs_growth = bs.query_growth_data(code=code, year=year, quarter=quarter)
while (rs_growth.error_code == '0') & rs_growth.next():
growth_list.append(rs_growth.get_row_data())
result_growth = pd.DataFrame(growth_list, columns=rs_growth.fields)
bs.logout()
return result_growth
@staticmethod
def query_balance_data(code, year=None, quarter=None):
"""
季频偿债能力
通过API接口获取季频偿债能力信息,可以通过参数设置获取对应年份、季度数据,提供2007年至今数据。
返回类型:pandas的DataFrame类型。
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
year:统计年份,为空时默认当前年;
quarter:统计季度,为空时默认当前季度。不为空时只有4个取值:1,2,3,4。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
balance_list = []
rs_balance = bs.query_balance_data(code=code, year=year, quarter=quarter)
while (rs_balance.error_code == '0') & rs_balance.next():
balance_list.append(rs_balance.get_row_data())
result_balance = pd.DataFrame(balance_list, columns=rs_balance.fields)
bs.logout()
return result_balance
@staticmethod
def query_cash_flow_data(code, year=None, quarter=None):
"""
季频现金流量
方法说明:通过API接口获取季频现金流量信息,可以通过参数设置获取对应年份、季度数据,提供2007年至今数据。
返回类型:pandas的DataFrame类型.
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
year:统计年份,为空时默认当前年;
quarter:统计季度,为空时默认当前季度。不为空时只有4个取值:1,2,3,4。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
cash_flow_list = []
rs_cash_flow = bs.query_cash_flow_data(code=code, year=year, quarter=quarter)
while (rs_cash_flow.error_code == '0') & rs_cash_flow.next():
cash_flow_list.append(rs_cash_flow.get_row_data())
result_cash_flow = pd.DataFrame(cash_flow_list, columns=rs_cash_flow.fields)
bs.logout()
return result_cash_flow
@staticmethod
def query_dupont_data(code, year=None, quarter=None):
"""
季频杜邦指数
方法说明:通过API接口获取季频杜邦指数信息,可以通过参数设置获取对应年份、季度数据,提供2007年至今数据。
返回类型:pandas的DataFrame类型。
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
year:统计年份,为空时默认当前年;
quarter:统计季度,为空时默认当前季度。不为空时只有4个取值:1,2,3,4。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
dupont_list = []
rs_dupont = bs.query_dupont_data(code=code, year=year, quarter=quarter)
while (rs_dupont.error_code == '0') & rs_dupont.next():
dupont_list.append(rs_dupont.get_row_data())
result_profit = pd.DataFrame(dupont_list, columns=rs_dupont.fields)
bs.logout()
return result_profit
@staticmethod
def query_performance_express_report(code, start_date, end_date):
"""
季频公司业绩快报
方法说明:通过API接口获取季频公司业绩快报信息,可以通过参数设置获取起止年份数据,提供2006年至今数据。
返回类型:pandas的DataFrame类型。
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
start_date:开始日期,发布日期或更新日期在这个范围内;
end_date:结束日期,发布日期或更新日期在这个范围内。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_performance_express_report(code, start_date=start_date, end_date=end_date)
if rs.error_code != '0':
logger.error('query_performance_express_report respond error_msg:' + rs.error_msg)
result_list = []
while (rs.error_code == '0') & rs.next():
result_list.append(rs.get_row_data())
result = pd.DataFrame(result_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_forcast_report(code, start_date, end_date):
"""
季频公司业绩预告
方法说明:通过API接口获取季频公司业绩预告信息,可以通过参数设置获取起止年份数据,提供2003年至今数据。
返回类型:pandas的DataFrame类型。
参数含义:
code:股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。此参数不可为空;
start_date:开始日期,发布日期或更新日期在这个范围内;
end_date:结束日期,发布日期或更新日期在这个范围内。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs_forecast = bs.query_forecast_report(code, start_date=start_date, end_date=end_date)
if rs_forecast.error_code != '0':
logger.error('query_forecast_reprot respond error_msg:' + rs_forecast.error_msg)
rs_forecast_list = []
while (rs_forecast.error_code == '0') & rs_forecast.next():
rs_forecast_list.append(rs_forecast.get_row_data())
result_forecast = pd.DataFrame(rs_forecast_list, columns=rs_forecast.fields)
bs.logout()
return result_forecast
@staticmethod
def query_deposit_rate_data(start_date=None, end_date=None):
"""
存款利率
方法说明:通过API接口获取存款利率,可以通过参数设置获取对应起止日期的数据。
返回类型:pandas的DataFrame类型。
参数含义:
start_date:开始日期,格式XXXX-XX-XX,发布日期在这个范围内,可以为空;
end_date:结束日期,格式XXXX-XX-XX,发布日期在这个范围内,可以为空。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_deposit_rate_data(start_date=start_date, end_date=end_date)
if rs.error_code != '0':
logger.error('query_deposit_rate_data respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_loan_rate_data(start_date=None, end_date=None):
"""
贷款利率
方法说明:通过API接口获取贷款利率,可以通过参数设置获取对应起止日期的数据。
返回类型:pandas的DataFrame类型。
参数含义:
start_date:开始日期,格式XXXX-XX-XX,发布日期在这个范围内,可以为空;
end_date:结束日期,格式XXXX-XX-XX,发布日期在这个范围内,可以为空。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_loan_rate_data(start_date=start_date, end_date=end_date)
if rs.error_code != '0':
logger.error('query_loan_rate_data respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_required_reserve_ratio_data(start_date=None, end_date=None, yearType=None):
"""
存款准备金率
方法说明:通过API接口获取存款准备金率,可以通过参数设置获取对应起止日期的数据。
返回类型:pandas的DataFrame类型。
参数含义:
start_date:开始日期,格式XXXX-XX-XX,发布日期在这个范围内,可以为空;
end_date:结束日期,格式XXXX-XX-XX,发布日期在这个范围内,可以为空;
yearType:年份类别,默认为0,查询公告日期;1查询生效日期。
"""
yearType = yearType or '0'
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_required_reserve_ratio_data(start_date=start_date, end_date=end_date, yearType=yearType)
if rs.error_code != '0':
logger.error('query_required_reserve_ratio_data respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_money_supply_data_month(start_date=None, end_date=None):
"""
货币供应量
方法说明:通过API接口获取货币供应量,可以通过参数设置获取对应起止日期的数据。
返回类型:pandas的DataFrame类型。
参数含义:
start_date:开始日期,格式XXXX-XX,发布日期在这个范围内,可以为空;
end_date:结束日期,格式XXXX-XX,发布日期在这个范围内,可以为空。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_money_supply_data_month(start_date=start_date, end_date=end_date)
if rs.error_code != '0':
logger.error('query_money_supply_data_month respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_money_supply_data_year(start_date=None, end_date=None):
"""
货币供应量(年底余额)
方法说明:通过API接口获取货币供应量(年底余额),可以通过参数设置获取对应起止日期的数据。
返回类型:pandas的DataFrame类型。
参数含义:
start_date:开始日期,格式XXXX,发布日期在这个范围内,可以为空;
end_date:结束日期,格式XXXX,发布日期在这个范围内,可以为空。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_money_supply_data_year(start_date=start_date, end_date=end_date)
if rs.error_code != '0':
logger.error('query_money_supply_data_year respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_shibor_data(start_date=None, end_date=None):
"""
银行间同业拆放利率
方法说明:通过API接口获取银行间同业拆放利率,可以通过参数设置获取对应起止日期的数据。
返回类型:pandas的DataFrame类型。
参数含义:
start_date:开始日期,格式XXXX,发布日期在这个范围内,可以为空;
end_date:结束日期,格式XXXX,发布日期在这个范围内,可以为空。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_shibor_data(start_date=start_date, end_date=end_date)
if rs.error_code != '0':
logger.error('query_shibor_data respond error_msg:' + rs.error_msg)
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_stock_industry(code=None, date=None):
"""
行业分类
方法说明:通过API接口获取行业分类信息,更新频率:每周一更新。
返回类型:pandas的DataFrame类型。
参数含义:
code:A股股票代码,sh或sz.+6位数字代码,或者指数代码,如:sh.601398。sh:上海;sz:深圳。可以为空;
date:查询日期,格式XXXX-XX-XX,为空时默认最新日期。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_stock_industry(code, date)
if rs.error_code != '0':
logger.error('query_stock_industry respond error_msg:' + rs.error_msg)
industry_list = []
while (rs.error_code == '0') & rs.next():
industry_list.append(rs.get_row_data())
result = pd.DataFrame(industry_list, columns=rs.fields)
bs.logout()
return result
@staticmethod
def query_sz50_stocks(date=None):
"""
上证50成分股
方法说明:通过API接口获取上证50成分股信息,更新频率:每周一更新。
返回类型:pandas的DataFrame类型。
参数含义:
date:查询日期,格式XXXX-XX-XX,为空时默认最新日期。
"""
lg = bs.login()
if lg.error_code != '0':
logger.error('login respond error_msg:' + lg.error_msg)
rs = bs.query_sz50_stocks(date)
if rs.error_code != '0':
logger.error('query_sz50_stocks respond error_msg:' + rs.error_msg)
sz50_stocks = []
while (rs.error_code == '0') & rs.next():
sz50_stocks.append(rs.get_row_data())
result =
|
pd.DataFrame(sz50_stocks, columns=rs.fields)
|
pandas.DataFrame
|
from keepa_request import get_varies
import pandas as pd
import datetime
import pymysql
def stock_handle(file):
stock_list = []
# data = pd.read_excel(file)
# asin_list = data['asin'].tolist()
asin_list = ['B07XFCX2Z5']
for asin in asin_list:
stock_list.extend(get_varies(asin))
print(stock_list)
aft = "./data/stock_" + datetime.datetime.now().strftime("%m%d%H%M")
data_pd =
|
pd.DataFrame(stock_list, columns=['parent_asin', 'asin', 'style', 'stock', 'model'])
|
pandas.DataFrame
|
import argparse
import pandas as pd
import swifter
import re
from typing import List, Optional, Dict
from pythainlp.tokenize import word_tokenize, sent_tokenize
def break_long_sentence(text:str, sent_tokenizer=sent_tokenize,
word_toknizer=word_tokenize,
max_sent_len=300) -> List[str]:
sents = sent_tokenizer(text)
sents_n_toks = [ len(word_toknizer(sent)) for sent in sents ]
groupped_sents = []
seq_len_counter = 0
temp_groupped_sent = ''
for i, sent in enumerate(sents):
if seq_len_counter + sents_n_toks[i] >= max_sent_len:
groupped_sents.append(temp_groupped_sent)
seq_len_counter = 0
temp_groupped_sent = sent
else:
temp_groupped_sent += sent
seq_len_counter += sents_n_toks[i]
if i == len(sents) - 1:
groupped_sents.append(temp_groupped_sent)
return groupped_sents
def drop_na(df):
return df.dropna(subset=['text'])
def drop_no_thai_char(df):
return df[df['text'].str.contains(r'[ก-๙]')]
def drop_by_min_max_newmm_tokens(df, min_tokens:int, max_tokens:int):
return df[(df['nb_tokens'] >= min_tokens) & (df['nb_tokens'] <= max_tokens)]
def strip_text(text: str):
if type(text) != str:
return text
return text.strip()
def replace_nbspace(text: str):
if type(text) != str:
return text
nbspace = '\xa0'
cleaned_text = re.sub(fr'{nbspace}', ' ', text)
return cleaned_text
def remove_thwiki_section(text:str):
if type(text) != str:
return text
search_obj = re.search(r'Section::::', text)
cleaned_text = text
if search_obj:
cleaned_text = re.sub(r'^Section::::', '', text)
cleaned_text = re.sub(r'Section::::', '', text)
cleaned_text = re.sub(r'\.$', '', cleaned_text)
return cleaned_text
def remove_soft_hyphen(text: str):
if type(text) != str:
return text
soft_hyphen = '\u00ad' # discretionary hyphen
cleaned_text = re.sub(fr'{soft_hyphen}', '', text)
return cleaned_text
def remove_zero_width_nbspace(text: str):
if type(text) != str:
return text
zero_width_nbspace = '\ufeff'
cleaned_text = re.sub(fr'{zero_width_nbspace}', '', text)
return cleaned_text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_path', type=str)
parser.add_argument('output_path', type=str)
parser.add_argument('--drop_na', action='store_true', default=True)
parser.add_argument('--remove_thwiki_section', action='store_true', default=True)
parser.add_argument('--break_long_sentence', action='store_true', default=True)
parser.add_argument('--max_sentence_length', type=int, default=300)
parser.add_argument('--drop_no_thai_char', action='store_true', default=True)
parser.add_argument('--min_newmm_token_len', type=int, default=4)
parser.add_argument('--max_newmm_token_len', type=int, default=500)
parser.add_argument('--space_token', type=str, default='<th_roberta_space_token>')
args = parser.parse_args()
print(f'INFO: Load csv file from {args.input_path}')
df = pd.read_csv(args.input_path)
TEXT_FILTERING_RULES = [drop_na, drop_no_thai_char]
for fn in TEXT_FILTERING_RULES:
print(f'INFO: Perform filtering rule: {fn.__name__}')
print(f'INFO: df.shape (before): {df.shape}')
df = fn(df)
print(f'INFO: df.shape (after): {df.shape}')
print(f'INFO: Done.')
print('\nDone all text filtering rules. \n')
TEXT_CLEANING_RULES = [replace_nbspace, remove_soft_hyphen, remove_zero_width_nbspace, strip_text ]
if args.remove_thwiki_section:
TEXT_CLEANING_RULES.append(remove_thwiki_section)
for fn in TEXT_CLEANING_RULES:
print(f'INFO: Start cleaning rule: {fn.__name__}')
print(f'INFO: df.shape (before): {df.shape}')
df = fn(df)
print(f'INFO: df.shape (after): {df.shape}')
print(f'INFO: Done.')
print(f'INFO: Write cleaned dataset as csv file to {args.input_path}')
print(f'INFO: df.columns : {df.columns}')
print('\nINFO: Done all text cleaning rules. \n')
print('INFO: Perform sentnece breakdown. ')
print(f' max_sentence_length: {args.max_sentence_length}')
print(f'INFO: df.shape (before): {df.shape}')
# split short and long sentences:
df_short = df[df['nb_tokens'] <= 450]
long_segments = df[df['nb_tokens'] > 450]['text'].tolist()
breaked_segments = []
for s in long_segments:
breaked_segments += break_long_sentence(s, max_sent_len=args.max_sentence_length)
print(f'\n\tNumber of long segments: {len(long_segments)}\n\tNumber of new segments: {len(breaked_segments)}')
nb_tokens = [ len(word_tokenize(s)) for s in breaked_segments ]
breaked_segments_df = pd.DataFrame({'text': breaked_segments, 'nb_tokens': nb_tokens})
breaked_segments_df = breaked_segments_df[breaked_segments_df['nb_tokens'] > 0]
df =
|
pd.concat([df, breaked_segments_df])
|
pandas.concat
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
|
tm.assert_numpy_array_equal(result.values, expected)
|
pandas.util.testing.assert_numpy_array_equal
|
import s3fs
import numpy as np
import pandas as pd
import xarray as xr
from glob import glob
from os.path import join, exists
from sklearn.preprocessing import StandardScaler, RobustScaler, MaxAbsScaler, MinMaxScaler
from operator import lt, le, eq, ne, ge, gt
scalers = {"MinMaxScaler": MinMaxScaler,
"MaxAbsScaler": MaxAbsScaler,
"StandardScaler": StandardScaler,
"RobustScaler": RobustScaler}
ops = {"<": lt, "<=": le, "==": eq, "!=": ne, ">=": ge, ">": gt}
def log10_transform(x, eps=1e-18):
return np.log10(np.maximum(x, eps))
def neg_log10_transform(x, eps=1e-18):
return np.log10(np.maximum(-x, eps))
def zero_transform(x, eps=None):
return np.zeros(x.shape, dtype=np.float32)
def inverse_log10_transform(x):
return 10.0 ** x
def inverse_neg_log10_transform(x):
return -10.0 ** x
transforms = {"log10_transform": log10_transform,
"neg_log10_transform": neg_log10_transform,
"zero_transform": zero_transform}
inverse_transforms = {"log10_transform": inverse_log10_transform,
"neg_log10_transform": inverse_neg_log10_transform,
"zero_transform": zero_transform}
def load_cam_output(path, file_start="TAU_run1.cam.h1", file_end="nc"):
"""
Load set of model output from CAM/CESM into xarray Dataset object.
Args:
path: Path to directory containing model output
file_start: Shared beginning of model files
file_end: Filetype shared by all files.
Returns:
xarray Dataset object containing the model output
"""
if not exists(path):
raise FileNotFoundError("Specified path " + path + " does not exist")
data_files = sorted(glob(join(path, file_start + "*" + file_end)))
if len(data_files) > 0:
cam_dataset = xr.open_mfdataset(data_files, decode_times=False)
else:
raise FileNotFoundError("No matching CAM output files found in " + path)
return cam_dataset
def get_cam_output_times(path, time_var="time", file_start="TAU_run1.cam.h1", file_end="nc"):
if not exists(path):
raise FileNotFoundError("Specified path " + path + " does not exist")
data_files = sorted(glob(join(path, file_start + "*" + file_end)))
file_time_list = []
for data_file in data_files:
ds = xr.open_dataset(data_file, decode_times=False, decode_cf=False)
time_minutes = (ds[time_var].values * 24 * 60).astype(int)
file_time_list.append(pd.DataFrame({"time": time_minutes,
"filename": [data_file] * len(time_minutes)}))
ds.close()
del ds
return pd.concat(file_time_list, ignore_index=True)
def unstagger_vertical(dataset, variable, vertical_dim="lev"):
"""
Interpolate a 4D variable on a staggered vertical grid to an unstaggered vertical grid. Will not execute
until compute() is called on the result of the function.
Args:
dataset: xarray Dataset object containing the variable to be interpolated
variable: Name of the variable being interpolated
vertical_dim: Name of the vertical coordinate dimension.
Returns:
xarray DataArray containing the vertically interpolated data
"""
var_data = dataset[variable]
unstaggered_var_data = xr.DataArray(0.5 * (var_data[:, :-1].values + var_data[:, 1:].values),
coords=[var_data.time, dataset[vertical_dim], var_data.lat, var_data.lon],
dims=("time", vertical_dim, "lat", "lon"),
name=variable + "_" + vertical_dim)
return unstaggered_var_data
def split_staggered_variable(dataset, variable, vertical_dim="lev"):
"""
Split vertically staggered variable into top and bottom subsets with the unstaggered
vertical coordinate
Args:
dataset: xarray Dataset object
variable: Name of staggered variable
vertical_dim: Unstaggered vertical dimension
Returns:
top_var_data, bottom_var_data: xarray DataArrays containing the unstaggered vertical data
"""
var_data = dataset[variable]
top_var_data = xr.DataArray(var_data[:, :-1], coords=[var_data.time,
dataset[vertical_dim],
var_data["lat"],
var_data["lon"]],
dims=("time", vertical_dim, "lat", "lon"),
name=variable + "_top")
bottom_var_data = xr.DataArray(var_data[:, 1:], coords=[var_data.time,
dataset[vertical_dim],
var_data["lat"],
var_data["lon"]],
dims=("time", vertical_dim, "lat", "lon"),
name=variable + "_bottom")
return xr.Dataset({variable + "_top": top_var_data, variable + "_bottom": bottom_var_data})
def add_index_coords(dataset, row_coord="lat", col_coord="lon", depth_coord="lev"):
"""
Calculate the index values of the row, column, and depth coordinates in a Dataset.
Indices range from 0 to length of coordinate - 1.
Args:
dataset: xarray Dataset
row_coord: name of the row coordinate variable. Default lat.
col_coord: name of the column coordinate variable. Default lon.
depth_coord: name of the depth coordinate variable. Default lev.
Returns:
row, col, depth: DataArrays with the row, col, and depth indices
"""
row = xr.DataArray(np.arange(dataset[row_coord].shape[0]), dims=(row_coord,), name="row")
col = xr.DataArray(np.arange(dataset[col_coord].shape[0]), dims=(col_coord,), name="col")
depth = xr.DataArray(np.arange(dataset[depth_coord].shape[0]), dims=(depth_coord,), name="depth")
return xr.Dataset({"row": row, "col": col, "depth": depth})
def calc_pressure_field(dataset, pressure_var_name="pressure"):
"""
Calculate pressure at each location based on the surface pressure and vertical coordinate
information.
Args:
dataset:
pressure_var_name:
Returns:
"""
pressure = xr.DataArray((dataset["hyam"] * dataset["P0"] + dataset["hybm"] * dataset["PS"]).transpose("time", "lev", "lat", "lon"))
pressure.name = pressure_var_name
pressure.attrs["units"] = "Pa"
pressure.attrs["long_name"] = "atmospheric pressure"
return pressure
def calc_temperature(dataset, density_variable="RHO_CLUBB_lev", pressure_variable="pressure"):
"""
Calculation temperature from pressure and density. The temperature variable is added to the
dataset object in place.
Args:
dataset: xarray Dataset object containing pressure and density variable
density_variable: name of the density variable
pressure_variable: name of the pressure variable
"""
temperature = dataset[pressure_variable] / dataset[density_variable] / 287.0
temperature.attrs["units"] = "K"
temperature.attrs["long_name"] = "temperature derived from pressure and density"
temperature.name = "temperature"
return temperature
def convert_to_dataframe(dataset, variables, times, time_var,
subset_variable, subset_threshold):
"""
Convert 4D Dataset to flat dataframe for machine learning.
Args:
dataset: xarray Dataset containing all relevant variables and times.
variables: List of variables in dataset to be included in DataFrame. All variables should have the same
dimensions and coordinates.
times: Iterable of times to select from dataset.
time_var: Variable used as the time coordinate.
subset_variable: Variable used to select a subset of grid points from file
subset_threshold: Threshold that must be exceeded for examples to be kept.
Returns:
"""
data_frames = []
for t, time in enumerate(times):
print(t, time)
time_df = dataset[variables].sel(**{time_var: time}).to_dataframe()
if type(subset_variable) == list:
valid = np.zeros(time_df.shape[0], dtype=bool)
for s, sv in enumerate(subset_variable):
valid[time_df[subset_variable] >= subset_threshold[s]] = True
else:
valid = time_df[subset_variable] >= subset_threshold
data_frames.append(time_df.loc[valid].reset_index())
print(data_frames[-1])
del time_df
return pd.concat(data_frames)
def load_csv_data(csv_path, index_col="Index"):
"""
Read pre-processed csv files into memory.
Args:
csv_path: Path to csv files
index_col: Column label used as the index
Returns:
`pandas.DataFrame` containing data from all csv files in the csv_path directory.
"""
csv_files = sorted(glob(join(csv_path, "*.csv")))
all_data = []
for csv_file in csv_files:
all_data.append(
|
pd.read_csv(csv_file, index_col=index_col)
|
pandas.read_csv
|
"""Evaluate multiple models in multiple experiments, or evaluate baseline on multiple datasets
TODO: use hydra or another model to manage the experiments
"""
import os
import sys
import json
import argparse
import logging
from glob import glob
import time
import string
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO)
import numpy as np
import pandas as pd
import h5py
import scipy
import scipy.interpolate
import scipy.stats
import torch
import dill
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.colors as colors
import matplotlib.patheffects as pe
import matplotlib.animation as animation
from tqdm import tqdm
from tabulate import tabulate
import utility as util
from helper import load_model, prediction_output_to_trajectories
pd.set_option('io.hdf.default_format','table')
############################
# Bag-of-N (BoN) FDE metrics
############################
def compute_min_FDE(predict, future):
return np.min(np.linalg.norm(predict[...,-1,:] - future[-1], axis=-1))
def compute_min_ADE(predict, future):
mean_ades = np.mean(np.linalg.norm(predict - future, axis=-1), axis=-1)
return np.min(mean_ades)
def evaluate_scene_BoN(scene, ph, eval_stg, hyp, n_predictions=20, min_fde=True, min_ade=True):
predictconfig = util.AttrDict(ph=ph, num_samples=n_predictions, z_mode=False, gmm_mode=False,
full_dist=False, all_z_sep=False)
max_hl = hyp['maximum_history_length']
with torch.no_grad():
predictions = eval_stg.predict(scene,
np.arange(scene.timesteps), predictconfig.ph,
num_samples=predictconfig.num_samples,
min_future_timesteps=predictconfig.ph,
z_mode=predictconfig.z_mode,
gmm_mode=predictconfig.gmm_mode,
full_dist=predictconfig.full_dist,
all_z_sep=predictconfig.all_z_sep)
prediction_dict, histories_dict, futures_dict = \
prediction_output_to_trajectories(
predictions, dt=scene.dt, max_h=max_hl, ph=predictconfig.ph, map=None)
batch_metrics = {'min_ade': list(), 'min_fde': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
if min_ade:
batch_metrics['min_ade'].append(compute_min_ADE(prediction_dict[t][node], futures_dict[t][node]))
if min_fde:
batch_metrics['min_fde'].append(compute_min_FDE(prediction_dict[t][node], futures_dict[t][node]))
return batch_metrics
def evaluate_BoN(env, ph, eval_stg, hyp, n_predictions=20, min_fde=True, min_ade=True):
batch_metrics = {'min_ade': list(), 'min_fde': list()}
prefix = f"Evaluate Bo{n_predictions} (ph = {ph}): "
for scene in tqdm(env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
_batch_metrics = evaluate_scene_BoN(scene, ph, eval_stg, hyp,
n_predictions=n_predictions, min_fde=min_fde, min_ade=min_ade)
batch_metrics['min_ade'].extend(_batch_metrics['min_ade'])
batch_metrics['min_fde'].extend(_batch_metrics['min_fde'])
return batch_metrics
###############
# Other metrics
###############
def make_interpolate_map(scene):
map = scene.map['VEHICLE']
obs_map = 1 - np.max(map.data[..., :, :, :], axis=-3) / 255
interp_obs_map = scipy.interpolate.RectBivariateSpline(
range(obs_map.shape[0]),
range(obs_map.shape[1]),
obs_map, kx=1, ky=1)
return interp_obs_map
def compute_num_offroad_viols(interp_map, scene_map, predicted_trajs):
"""Count the number of predicted trajectories that go off the road.
Note this does not count trajectories that go over road/lane dividers.
Parameters
==========
interp_map : scipy.interpolate.RectBivariateSpline
Interpolation to get road obstacle indicator value from predicted points.
scene_map : trajectron.environment.GeometricMap
Map transform the predicted points to map coordinates.
predicted_trajs : ndarray
Predicted trajectories of shape (number of predictions, number of timesteps, 2).
Returns
=======
int
A value between [0, number of predictions].
"""
old_shape = predicted_trajs.shape
pred_trajs_map = scene_map.to_map_points(predicted_trajs.reshape((-1, 2)))
traj_values = interp_map(pred_trajs_map[:, 0], pred_trajs_map[:, 1], grid=False)
# traj_values has shape (1, num_samples, ph).
traj_values = traj_values.reshape((old_shape[0], old_shape[1], old_shape[2]))
# num_viol_trajs is an integer in [0, num_samples].
return np.sum(traj_values.max(axis=2) > 0, dtype=float)
def compute_kde_nll(predicted_trajs, gt_traj):
kde_ll = 0.
log_pdf_lower_bound = -20
num_timesteps = gt_traj.shape[0]
num_batches = predicted_trajs.shape[0]
for batch_num in range(num_batches):
for timestep in range(num_timesteps):
try:
kde = scipy.stats.gaussian_kde(predicted_trajs[batch_num, :, timestep].T)
pdf = kde.logpdf(gt_traj[timestep].T)
pdf = np.clip(kde.logpdf(gt_traj[timestep].T), a_min=log_pdf_lower_bound, a_max=None)[0]
kde_ll += pdf / (num_timesteps * num_batches)
except np.linalg.LinAlgError:
kde_ll = np.nan
return -kde_ll
def compute_ade(predicted_trajs, gt_traj):
error = np.linalg.norm(predicted_trajs - gt_traj, axis=-1)
ade = np.mean(error, axis=-1)
return ade.flatten()
def compute_fde(predicted_trajs, gt_traj):
final_error = np.linalg.norm(predicted_trajs[:, :, -1] - gt_traj[-1], axis=-1)
return final_error.flatten()
########################
# Most Likely Evaluation
########################
def evaluate_scene_most_likely(scene, ph, eval_stg, hyp,
ade=True, fde=True):
predictconfig = util.AttrDict(ph=ph, num_samples=1,
z_mode=True, gmm_mode=True, full_dist=False, all_z_sep=False)
max_hl = hyp['maximum_history_length']
with torch.no_grad():
predictions = eval_stg.predict(scene,
np.arange(scene.timesteps), predictconfig.ph,
num_samples=predictconfig.num_samples,
min_future_timesteps=predictconfig.ph,
z_mode=predictconfig.z_mode,
gmm_mode=predictconfig.gmm_mode,
full_dist=predictconfig.full_dist,
all_z_sep=predictconfig.all_z_sep)
prediction_dict, histories_dict, futures_dict = \
prediction_output_to_trajectories(
predictions, dt=scene.dt, max_h=max_hl, ph=predictconfig.ph, map=None)
batch_metrics = {'ade': list(), 'fde': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
if ade:
batch_metrics['ade'].extend(
compute_ade(prediction_dict[t][node], futures_dict[t][node]) )
if fde:
batch_metrics['fde'].extend(
compute_fde(prediction_dict[t][node], futures_dict[t][node]) )
return batch_metrics
def evaluate_most_likely(env, ph, eval_stg, hyp,
ade=True, fde=True):
batch_metrics = {'ade': list(), 'fde': list()}
prefix = f"Evaluate Most Likely (ph = {ph}): "
for scene in tqdm(env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
_batch_metrics = evaluate_scene_most_likely(scene, ph, eval_stg, hyp,
ade=ade, fde=fde)
batch_metrics['ade'].extend(_batch_metrics['ade'])
batch_metrics['fde'].extend(_batch_metrics['fde'])
return batch_metrics
#################
# Full Evaluation
#################
def evaluate_scene_full(scene, ph, eval_stg, hyp,
ade=True, fde=True, kde=True, offroad_viols=True):
num_samples = 2000
predictconfig = util.AttrDict(ph=ph, num_samples=num_samples,
z_mode=False, gmm_mode=False, full_dist=False, all_z_sep=False)
max_hl = hyp['maximum_history_length']
with torch.no_grad():
predictions = eval_stg.predict(scene,
np.arange(scene.timesteps), predictconfig.ph,
num_samples=predictconfig.num_samples,
min_future_timesteps=predictconfig.ph,
z_mode=predictconfig.z_mode,
gmm_mode=predictconfig.gmm_mode,
full_dist=predictconfig.full_dist,
all_z_sep=predictconfig.all_z_sep)
prediction_dict, histories_dict, futures_dict = \
prediction_output_to_trajectories(
predictions, dt=scene.dt, max_h=max_hl, ph=predictconfig.ph, map=None)
interp_map = make_interpolate_map(scene)
map = scene.map['VEHICLE']
batch_metrics = {'ade': list(), 'fde': list(), 'kde': list(), 'offroad_viols': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
if ade:
batch_metrics['ade'].extend(
compute_ade(prediction_dict[t][node], futures_dict[t][node]) )
if fde:
batch_metrics['fde'].extend(
compute_fde(prediction_dict[t][node], futures_dict[t][node]) )
if offroad_viols:
batch_metrics['offroad_viols'].extend(
[ compute_num_offroad_viols(interp_map, map, prediction_dict[t][node]) / float(num_samples) ])
if kde:
batch_metrics['kde'].extend(
[ compute_kde_nll(prediction_dict[t][node], futures_dict[t][node]) ])
return batch_metrics
def evaluate_full(env, ph, eval_stg, hyp,
ade=True, fde=True, kde=True, offroad_viols=True):
batch_metrics = {'ade': list(), 'fde': list(), 'kde': list(), 'offroad_viols': list()}
prefix = f"Evaluate Full (ph = {ph}): "
for scene in tqdm(env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
_batch_metrics = evaluate_scene_full(scene, ph, eval_stg, hyp,
ade=ade, fde=fde, kde=kde, offroad_viols=offroad_viols)
batch_metrics['ade'].extend(_batch_metrics['ade'])
batch_metrics['fde'].extend(_batch_metrics['fde'])
batch_metrics['kde'].extend(_batch_metrics['kde'])
batch_metrics['offroad_viols'].extend(_batch_metrics['offroad_viols'])
return batch_metrics
##########
# Datasets
##########
dataset_dir = "../../.."
dataset_1 = util.AttrDict(
test_set_path=f"{ dataset_dir }/carla_v3-1_dataset/v3-1_split1_test.pkl",
name='v3-1_split1_test',
desc="CARLA synthesized dataset with heading fix, occlusion fix, and 32 timesteps.")
dataset_2 = util.AttrDict(
test_set_path=f"{ dataset_dir }/carla_v3-1-1_dataset/v3-1-1_split1_test.pkl",
name='v3-1-1_split1_test',
desc="CARLA synthesized dataset with heading fix, occlusion fix, and 32 timesteps.")
dataset_3 = util.AttrDict(
test_set_path=f"{ dataset_dir }/carla_v3-1-2_dataset/v3-1-2_split1_test.pkl",
name='v3-1-2_split1_test',
desc="CARLA synthesized dataset with heading fix, occlusion fix, and 32 timesteps.")
DATASETS = [dataset_1, dataset_2, dataset_3]
def load_dataset(dataset):
logging.info(f"Loading dataset: {dataset.name}: {dataset.desc}")
with open(dataset.test_set_path, 'rb') as f:
eval_env = dill.load(f, encoding='latin1')
return eval_env
#############
# Experiments
#############
"""
The experiments to evaluate are:
- 20210621 one model trained on NuScenes to use as baseline for other evaluation
- 20210801 have models trained from v3-1-1 (train set has 200 scenes). Compare MapV2, MapV3.
- 20210802 have models trained from v3-1-1. MapV5 squeezes map encoding to size 32 using FC.
- 20210803 have models trained from v3-1-1. Compare map, mapV4. MapV4 with multi K values. MapV4 does not apply FC. May have size 100 or 150.
- 20210804 have models trained from v3-1 (train set has 300 scenes). Compare map with mapV4.
- 20210805 have models trained from v3-1 (train set has 300 scenes). MapV4 with multi K values.
- 20210812 have models trained from v3-1-1 rebalanced. Models are trained 20 epochs.
- 20210815 have models trained from v3-1-1 rebalanced. Models are trained 40 epochs.
- 20210816 have models trained from v3-1-2 (train set has 600 scenes) rebalanced.
"""
model_dir = "models"
baseline_model = util.AttrDict(
path=f"{ model_dir }/20210621/models_19_Mar_2021_22_14_19_int_ee_me_ph8",
desc="Base model +Dynamics Integration, Maps with K=25 latent values "
"(on NuScenes dataset)")
experiment_1 = util.AttrDict(
models_dir=f"{ model_dir }/20210801",
dataset=dataset_2,
desc="20210801 have models trained from v3-1-1 (train set has 200 scenes). Compare MapV2, MapV3.")
experiment_2 = util.AttrDict(
models_dir=f"{ model_dir }/20210802",
dataset=dataset_2,
desc="20210802 have models trained from v3-1-1. MapV5 squeezes map encoding to size 32 using FC.")
experiment_3 = util.AttrDict(
models_dir=f"{ model_dir }/20210803",
dataset=dataset_2,
desc="20210803 have models trained from v3-1-1. Compare map, mapV4. MapV4 with multi K values. "
"MapV4 does not apply FC. May have size 100 or 150.")
experiment_4 = util.AttrDict(
models_dir=f"{ model_dir }/20210804",
dataset=dataset_1,
desc="20210804 have models trained from v3-1 (train set has 300 scenes). Compare map with mapV4.")
experiment_5 = util.AttrDict(
models_dir=f"{ model_dir }/20210805",
dataset=dataset_1,
desc="20210805 have models trained from v3-1 (train set has 300 scenes). MapV4 with multi K values.")
experiment_6 = util.AttrDict(
models_dir=f"{ model_dir }/20210812",
dataset=dataset_2,
desc="20210812 have models trained from v3-1-1 rebalanced. Models are trained 20 epochs.")
experiment_7 = util.AttrDict(
models_dir=f"{ model_dir }/20210815",
dataset=dataset_2, ts=40,
desc="20210815 have models trained from v3-1-1 rebalanced. Models are trained 40 epochs.")
experiment_8 = util.AttrDict(
models_dir=f"{ model_dir }/20210816",
dataset=dataset_3,
desc="20210816 have models trained from v3-1-2 (train set has 600 scenes) rebalanced.")
EXPERIMENTS = [experiment_1, experiment_2, experiment_3, experiment_4, experiment_5, experiment_6, experiment_7, experiment_8]
def _load_model(model_path, eval_env, ts=20):
eval_stg, hyp = load_model(model_path, eval_env, ts=ts)#, device='cuda')
return eval_stg, hyp
PREDICTION_HORIZONS = [2,4,6,8]
def run_evaluate_experiments(config):
if config.experiment_index is not None and config.experiment_index >= 1:
experiments = [EXPERIMENTS[config.experiment_index - 1]]
else:
experiments = EXPERIMENTS
######################
# Evaluate experiments
######################
# results_filename = f"results_{time.strftime('%d_%b_%Y_%H_%M_%S', time.localtime())}.h5"
logging.info("Evaluating each experiment")
for experiment in experiments:
results_key = experiment.models_dir.split('/')[-1]
results_filename = f"results_{results_key}.h5"
logging.info(f"Evaluating models in experiment: {experiment.desc}")
logging.info(f"Writing to: {results_filename}")
eval_env = load_dataset(experiment.dataset)
# need hyper parameters to do this, but have to load models first
has_computed_scene_graph = False
for model_path in glob(f"{experiment.models_dir}/*"):
model_key = '/'.join(model_path.split('/')[-2:])
ts = getattr(experiment, 'ts', 20)
eval_stg, hyp = _load_model(model_path, eval_env, ts=ts)
if not has_computed_scene_graph:
prefix = f"Preparing Node Graph: "
for scene in tqdm(eval_env.scenes, desc=prefix, dynamic_ncols=True, leave=True):
scene.calculate_scene_graph(eval_env.attention_radius,
hyp['edge_addition_filter'], hyp['edge_removal_filter'])
has_computed_scene_graph = True
logging.info(f"Evaluating: {model_key}")
BoN_results_key = '/'.join([experiment.dataset.name] + model_path.split('/')[-2:] + ['BoN'])
with
|
pd.HDFStore(results_filename, 'a')
|
pandas.HDFStore
|
# **********************************************************************
# Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory
#
# All Rights Reserved.
# For any other permission, please contact the Legal Office at JHU/APL.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **********************************************************************
import glob
import json
import os
import re
import pandas as pd
import pydash
from bson import ObjectId
from flask import Blueprint, send_from_directory
from shared.config import config
from shared.log import logger
from system import FlaskExtensions
from system.controllers import classification_job, simulation_job, user_job
from system.models.metrics import SimulationMetrics, ClassificationMetrics
from system.utils.biology import TaxonomicHierarchy
from system.utils.zip import send_to_zip
results_bp = Blueprint("results", __name__, url_prefix=config.SERVER_API_CHROOT)
mongodb = FlaskExtensions.mongodb
@results_bp.route("/results/orig_abundance_profile/<string:user_job_id>")
def get_original_abundance_profile(user_job_id):
# Taxid Abundance Organization files: data/jobs/<user_job_id>/*.tsv
job = user_job.find_by_id(user_job_id=ObjectId(user_job_id))
if job is None:
return "{} does not exist!".format(user_job_id), 501
tsv_name = pydash.get(job, "abundance_tsv", None)
if tsv_name is not None:
path = os.path.join(config.JOBS_DIR, user_job_id, tsv_name)
abundance_df = get_result_dataframe(path, ["taxid", "abundance", "val"])
if abundance_df is None:
return "No abundance profile tsv file for {}!".format(user_job_id), 501
parsed_abundance_json = abundance_df.to_dict("records")
return json.dumps(parsed_abundance_json), 200
else:
logger.error("No abundance TSV found for job {}".format(user_job_id))
return None, 501
@results_bp.route(
"/results/computation/simulation/<string:metric>/<string:user_job_id>/<string:read_type>",
methods=["GET"])
def get_cpu_time_simulation(metric, user_job_id, read_type):
try:
SimulationMetrics(metric)
except ValueError:
return None, 501
data = simulation_job.find_specific_job(user_job_id=ObjectId(user_job_id), read_type=read_type)
res = {metric: pydash.get(data, metric, None)}
return json.dumps(res), 200
@results_bp.route(
"/results/computation/classification/<string:metric>/<string:user_job_id>/<string:read_type>/<string:classifier>",
methods=["GET"])
def get_computational_performance_simulated(metric, user_job_id, read_type, classifier):
try:
ClassificationMetrics(metric)
except ValueError:
return None, 501
data = classification_job.find_specific_job(user_job_id=ObjectId(user_job_id), read_type=read_type,
classifier=classifier)
res = {metric: pydash.get(data, metric, None)}
return json.dumps(res), 200
@results_bp.route(
"/results/computation/classification/<string:metric>/<string:user_job_id>/<string:classifier>",
methods=["GET"])
def get_computational_performance_real(metric, user_job_id, classifier):
try:
ClassificationMetrics(metric)
except ValueError:
return None, 501
data = classification_job.find_specific_job(user_job_id=ObjectId(user_job_id), classifier=classifier)
res = {metric: pydash.get(data, metric, None)}
return json.dumps(res), 200
@results_bp.route("/results/<string:user_job_id>/<string:read_type>/compare", methods=["GET"])
def get_results_for_user_job_and_read_type(user_job_id, read_type):
# Eval tsv: data/jobs/<user_job_id>/<read_type>/compare
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "eval.tsv")
eval_df = get_result_dataframe(path)
if eval_df is None:
return "No evaluation TSV file found!", 501
eval_json = eval_df.to_dict("records")
return json.dumps(eval_json), 200
@results_bp.route("/results/<string:user_job_id>/inclusion", methods=["GET"])
def get_classifier_rank_abu_taxid_org_inclusion_real(user_job_id):
# classifier_rank_abu_taxid_org_inclusion tsv:
# /data/jobs/<user_job_id>/eval/classifier_rank_abu_taxid_org_inclusion.tsv
path = os.path.join(config.JOBS_DIR, user_job_id, "eval", "classifier_rank_abu_taxid_org_inclusion.tsv")
eval_df = get_result_dataframe(path, ['classifier', 'rank', 'abundance', 'taxid', 'name', 'classifier_inclusion'])
eval_df['classifier_inclusion'] = eval_df['classifier_inclusion'].str.split(',')
eval_df['classifier_count'] = eval_df['classifier_inclusion'].str.len()
if eval_df is None:
return "No evaluation TSV file found!", 501
eval_json = eval_df.to_dict("records")
return json.dumps(eval_json), 200
@results_bp.route("/results/<string:user_job_id>/<string:read_type>/inclusion", methods=["GET"])
def get_classifier_rank_abu_taxid_org_inclusion_simulated(user_job_id, read_type):
# classifier_rank_abu_taxid_org_inclusion tsv:
# /data/jobs/<user_job_id>/<read_type>/eval/classifier_rank_abu_taxid_org_inclusion.tsv
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "classifier_rank_abu_taxid_org_inclusion.tsv")
eval_df = get_result_dataframe(path, ['classifier', 'rank', 'abundance', 'taxid', 'name', 'classifier_inclusion'])
eval_df['classifier_inclusion'] = eval_df['classifier_inclusion'].str.split(',')
eval_df['classifier_count'] = eval_df['classifier_inclusion'].str.len()
if eval_df is None:
return "No evaluation TSV file found!", 501
eval_json = eval_df.to_dict("records")
return json.dumps(eval_json), 200
@results_bp.route("/results/<string:user_job_id>/<string:read_type>/<string:classifier>", methods=["GET"])
def get_results_for_user_job_and_read_type_and_classifier(user_job_id, read_type, classifier):
# Report files: data/jobs/<user_job_id>/<read_type>/results/*.parsed_<classifier>
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "results", "*.parsed_{}".format(classifier))
parsed_report_df = get_result_dataframe(path, ["taxid", "abundance"])
if parsed_report_df is None:
return "No report file for {} {}!".format(classifier, user_job_id), 501
parsed_report_json = parsed_report_df.to_dict("records")
return json.dumps(parsed_report_json), 200
@results_bp.route("/results/taxid_abu_org/<string:user_job_id>/<string:read_type>/<string:classifier>/<string:rank>",
methods=["GET"])
def get_results_for_taxid_abu_org_by_rank(user_job_id, read_type, classifier, rank):
# Taxid Abundance Organization files: data/jobs/<user_job_id>/<read_type>/eval/tmp/parsed_<classifier>/taxid_abu_org-<rank>.tsv
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid_abu_org-{}.tsv".format(rank))
taxid_abu_org_df = get_result_dataframe(path, ["abundance", "taxid", "name"])
if taxid_abu_org_df is None:
return "No Tax ID Abundance Organization file for {} {} {}!".format(rank, read_type, user_job_id), 501
taxid_abu_org_json = taxid_abu_org_df.to_dict("records")
return json.dumps(taxid_abu_org_json), 200
@results_bp.route("/results/taxid_abu_org/<string:user_job_id>/<string:classifier>",
methods=["GET"])
def get_hierarchical_taxid_real(user_job_id, classifier):
# -------------------------------- Get result taxid abundance hierarchy --------------------------------
path = os.path.join(config.JOBS_DIR, user_job_id, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts.padded")
if not os.path.exists(path):
logger.warning("taxid.abu.ts.padded not found! Using taxid.abu.ts")
path = os.path.join(config.JOBS_DIR, user_job_id, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts")
taxid_abu_ts_df = get_result_dataframe(path, ["taxid", "abundance", "hierarchy"])
if taxid_abu_ts_df is None:
return "No taxid.abu.ts file for {} {} {}!".format(user_job_id, classifier), 501
# -------------------------------- Build hierarchy --------------------------------
hierarchy_col = taxid_abu_ts_df["hierarchy"].tolist()
abundance_col = taxid_abu_ts_df["abundance"].tolist()
tree = dict()
if len(hierarchy_col) > 0:
logger.info("BUILDING HIERARCHY FOR {} TAXONOMIC IDs".format(len(hierarchy_col)))
tree = build_hierarchy(hierarchy_list=hierarchy_col, abundance_list=abundance_col)
else:
logger.warning("taxid.abu.ts IS EMPTY!")
return json.dumps(tree), 200
@results_bp.route("/results/taxid_abu_org/<string:user_job_id>/<string:read_type>/<string:classifier>",
methods=["GET"])
def get_hierarchical_taxid_simulated(user_job_id, read_type, classifier):
# -------------------------------- Get result taxid abundance hierarchy --------------------------------
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts.padded")
if not os.path.exists(path):
logger.warning("taxid.abu.ts.padded not found! Using taxid.abu.ts")
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts")
taxid_abu_ts_df = get_result_dataframe(path, ["taxid", "abundance", "hierarchy"])
if taxid_abu_ts_df is None:
return "No taxid.abu.ts file for {} {} {}!".format(user_job_id, read_type, classifier), 501
# -------------------------------- Get baseline taxid abundance hierarchy --------------------------------
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "BASELINE1.tsv_dir",
"taxid.abu.ts.padded")
if not os.path.exists(path):
logger.warning("taxid.abu.ts.padded not found for Baseline! Using taxid.abu.ts")
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "BASELINE1.tsv_dir", 'taxid.abu.ts')
taxid_abu_baseline_ts_df = get_result_dataframe(path, ["taxid", "abundance", "hierarchy"])
taxid_abu_baseline_ts_df["abundance"] = 0
# ---------------------------- Merge the baseline and classifier abundance ts files ----------------------------
taxid_abu_ts_df =
|
pd.concat([taxid_abu_ts_df, taxid_abu_baseline_ts_df])
|
pandas.concat
|
from __future__ import absolute_import
import collections
import gzip
import logging
import os
import sys
import multiprocessing
import threading
import numpy as np
import pandas as pd
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
from data_utils import get_file
logger = logging.getLogger(__name__)
SEED = 2017
np.set_printoptions(threshold=np.nan)
np.random.seed(SEED)
def get_p1_file(link):
fname = os.path.basename(link)
return get_file(fname, origin=link, cache_subdir='Pilot1')
def scale(df, scaling=None):
"""Scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to scale
scaling : 'maxabs', 'minmax', 'std', or None, optional (default 'std')
type of scaling to apply
"""
if scaling is None or scaling.lower() == 'none':
return df
df = df.dropna(axis=1, how='any')
# Scaling data
if scaling == 'maxabs':
# Normalizing -1 to 1
scaler = MaxAbsScaler()
elif scaling == 'minmax':
# Scaling to [0,1]
scaler = MinMaxScaler()
else:
# Standard normalization
scaler = StandardScaler()
mat = df.as_matrix()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def impute_and_scale(df, scaling='std'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = df.dropna(axis=1, how='all')
imputer = Imputer(strategy='mean', axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return
|
pd.DataFrame(mat, columns=df.columns)
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti =
|
date_range("20130101", periods=3, tz=tz)
|
pandas.date_range
|
'''
Created on 24.01.2018
@author: gregor
'''
import pandas as pd
from ipet.Key import ProblemStatusCodes, SolverStatusCodes, ObjectiveSenseCode
from ipet import Key
from ipet.misc import getInfinity as infty
from ipet.misc import isInfinite as isInf
import numpy as np
import logging
import sqlite3
logger = logging.getLogger(__name__)
DEFAULT_RELTOL = 1e-4
DEFAULT_FEASTOL = 1e-6
class SolufileMarkers:
OPT = "=opt="
INF = "=inf="
BEST = "=best="
UNKN = "=unkn="
BESTDUAL = "=bestdual="
FEAS = "=feas="
class DataBaseMarkers:
OPT = "opt"
INF = "inf"
BEST = "best"
class Validation:
'''
Validation of experiments by using external solution information
'''
__primalidx__ = 0
__dualidx__ = 1
__feas__ = 1e99
__infeas__ = 1e100
def __init__(self, solufilename : str = None, tol : float = DEFAULT_RELTOL, feastol : float = DEFAULT_FEASTOL):
'''
Validation constructor
Parameters
----------
solufilename : str
string with absolute or relative path to a solu file with reference information
tol : float
relative objective tolerance
feastol : float
relative feasibility tolerance
'''
if solufilename:
if solufilename.endswith(".solu"):
self.referencedict = self.readSoluFile(solufilename)
self.objsensedict = {}
else:
self.referencedict, self.objsensedict = self.connectToDataBase(solufilename)
logger.debug("Data base connection finished, {} items".format(len(self.referencedict.items())))
else:
self.referencedict, self.objsensedict = {}, {}
self.tol = tol
self.inconsistentset = set()
self.feastol = feastol
def set_tol(self, tol : float):
"""sets this validation's tol attribute
Parameters
----------
tol : float
new value for the tol for this validation
"""
self.tol = tol
def set_feastol(self, feastol : float):
"""sets this validation's feastol attribute
Parameters
----------
feastol : float
new value for the feastol for this validation
"""
self.feastol = feastol
def connectToDataBase(self, databasefilename):
"""connects this validation to a data base
"""
soludict = {}
objsensedict = {}
with sqlite3.connect(databasefilename) as conn:
c = conn.cursor()
c.execute('SELECT DISTINCT name, objsense,primbound,dualbound,status FROM instances')
for name, objsense, primbound, dualbound, status in c:
if name in soludict:
logger.warning("Warning: Duplicate name {} with different data in data base".format(name))
infotuple = [None, None]
if status == DataBaseMarkers.OPT:
infotuple[self.__primalidx__] = infotuple[self.__dualidx__] = primbound
elif status == DataBaseMarkers.BEST:
if primbound is not None:
infotuple[self.__primalidx__] = primbound
if dualbound is not None:
infotuple[self.__dualidx__] = dualbound
elif status == DataBaseMarkers.INF:
infotuple[self.__primalidx__] = self.__infeas__
objsensedict[name] = ObjectiveSenseCode.MAXIMIZE if objsense == "max" else ObjectiveSenseCode.MINIMIZE
soludict[name] = tuple(infotuple)
return soludict, objsensedict
def readSoluFile(self, solufilename : str) -> dict:
"""parse entire solu file into a dictionary with problem names as keys
Parameters:
-----------
solufilename : str
name of .solu file containing optimal or best known bounds for instances
Return
------
dict
dictionary with problem names as keys and best known primal and dual bounds for validation as entries.
"""
soludict = dict()
with open(solufilename, "r") as solufile:
for line in solufile:
if line.strip() == "":
continue
spline = line.split()
marker = spline[0]
problemname = spline[1]
infotuple = list(soludict.get(problemname, (None, None)))
if marker == SolufileMarkers.OPT:
infotuple[self.__primalidx__] = infotuple[self.__dualidx__] = float(spline[2])
elif marker == SolufileMarkers.BEST:
infotuple[self.__primalidx__] = float(spline[2])
elif marker == SolufileMarkers.BESTDUAL:
infotuple[self.__dualidx__] = float(spline[2])
elif marker == SolufileMarkers.FEAS:
infotuple[self.__primalidx__] = self.__feas__
elif marker == SolufileMarkers.INF:
infotuple[self.__primalidx__] = self.__infeas__
soludict[problemname] = tuple(infotuple)
return soludict
def getPbValue(self, pb : float, objsense : int) -> float:
"""returns a floating point value computed from a given primal bound
"""
if pd.isnull(pb):
pb = infty() if objsense == ObjectiveSenseCode.MINIMIZE else -infty()
return pb
def getDbValue(self, db : float, objsense : int) -> float :
"""returns a floating point value computed from a given primal bound
"""
if pd.isnull(db):
db = -infty() if objsense == ObjectiveSenseCode.MINIMIZE else infty()
return db
def isInconsistent(self, problemname : str) -> bool:
"""are there inconsistent results for this problem
Parameters
----------
problemname : str
name of a problem
Returns
-------
bool
True if inconsistent results were detected for this instance, False otherwise
"""
return problemname in self.inconsistentset
def isSolFeasible(self, x : pd.Series):
"""check if the solution is feasible within tolerances
"""
#
# respect solution checker output, if it exists
#
if x.get(Key.SolCheckerRead) is not None:
#
# if this column is not None, the solution checker output exists for at least some of the problems
# such that it is reasonable to assume that it should exist for all parsed problems
#
# recall that we explicitly assume that there has been a solution reported when this function is called
# if the solution checker failed to read in the solution, or the solution checker crashed and did
# not report the result of the check command, the solution was most likely infeasible.
#
if not pd.isnull(x.get(Key.SolCheckerRead)) and x.get(Key.SolCheckerRead):
if not pd.isnull(x.get(Key.SolCheckerFeas)) and x.get(Key.SolCheckerFeas):
return True
else:
return False
else:
return False
# compute the maximum violation of constraints, LP rows, bounds, and integrality
maxviol = max((x.get(key, 0.0) for key in
[Key.ViolationBds, Key.ViolationCons, Key.ViolationInt, Key.ViolationLP]))
return maxviol <= self.feastol
def isSolInfeasible(self, x : pd.Series):
"""check if the solution is infeasible within tolerances
Parameters
----------
x : Series or dict
series or dictionary representing single instance information
"""
#
# respect solution checker output, if it exists
#
if x.get(Key.SolCheckerRead) is not None:
if not pd.isnull(x.get(Key.SolCheckerRead)) and x.get(Key.SolCheckerRead):
if not pd.isnull(x.get(Key.SolCheckerFeas)) and x.get(Key.SolCheckerFeas):
return False
else:
return True
# compute the maximum violation of constraints, LP rows, bounds, and integrality
maxviol = max((x.get(key, 0.0) for key in [Key.ViolationBds, Key.ViolationCons, Key.ViolationInt, Key.ViolationLP]))
# if no violations have been recorded, no solution was found, and the solution is not infeasible.
if pd.isnull(maxviol):
return False
return maxviol > self.feastol
def getReferencePb(self, problemname : str) -> float:
"""get the reference primal bound for this instance
Parameters
----------
problemname : str
base name of a problem to access the reference data
Returns
-------
float or None
either a finite floating point value, or None
"""
reference = self.referencedict.get(problemname, (None, None))
if self.isUnkn(reference) or self.isInf(reference) or self.isFeas(reference):
return None
else:
return reference[self.__primalidx__]
def getReferenceDb(self, problemname : str) -> float:
"""get the reference primal bound for this instance
Parameters
----------
problemname : str
base name of a problem to access the reference data
Returns
-------
float or None
either a finite floating point value, or None
"""
reference = self.referencedict.get(problemname, (None, None))
if self.isUnkn(reference) or self.isInf(reference) or self.isFeas(reference):
return None
else:
return reference[self.__dualidx__]
def getObjSense(self, problemname : str, x : pd.Series):
"""get the objective sense of a problem
"""
if problemname in self.objsensedict:
return self.objsensedict[problemname]
elif not pd.isnull(x.get(Key.ObjectiveSense, None)):
return x.get(Key.ObjectiveSense)
else:
logger.warning("No objective sense for {}, assuming minimization".format(problemname))
return ObjectiveSenseCode.MINIMIZE
def validateSeries(self, x : pd.Series) -> str:
"""
validate the results of a problem
Parameters:
----------
x : Series
Data series that represents problem information parsed by a solver
"""
# print("{x.ProblemName} {x.PrimalBound} {x.DualBound} {x.SolverStatus}".format(x=x))
problemname = x.get(Key.ProblemName)
sstatus = x.get(Key.SolverStatus)
if not problemname:
return ProblemStatusCodes.Unknown
if pd.isnull(sstatus):
return ProblemStatusCodes.FailAbort
else:
#
# check feasibility
#
pb = x.get(Key.PrimalBound)
if self.isSolInfeasible(x) or not (pd.isnull(pb) or isInf(pb) or self.isLE(x.get(Key.ObjectiveLimit, -1e20), pb) or self.isSolFeasible(x)):
return ProblemStatusCodes.FailSolInfeasible
#
# check reference consistency
#
psc = self.isReferenceConsistent(x)
if psc != ProblemStatusCodes.Ok:
return psc
#
# report inconsistency among solvers.
#
elif self.isInconsistent(problemname):
return ProblemStatusCodes.FailInconsistent
return Key.solverToProblemStatusCode(sstatus)
def isInf(self, referencetuple : tuple) -> bool:
"""is this an infeasible reference?
Parameters:
-----------
referencetuple : tuple
tuple containing a primal and dual reference bound
Return:
-------
bool
True if reference bound is infeasible, False otherwise
"""
return referencetuple[self.__primalidx__] == self.__infeas__
def isFeas(self, referencetuple):
"""is this a feasible reference?
Parameters:
-----------
referencetuple : tuple
tuple containing a primal and dual reference bound
Return:
-------
bool
True if reference bound is feasible, False otherwise
"""
return referencetuple[self.__primalidx__] == self.__feas__
def isUnkn(self, referencetuple):
"""is this a reference tuple of an unknown instance?
"""
return referencetuple[self.__primalidx__] is None and referencetuple[self.__dualidx__] is None
def collectInconsistencies(self, df : pd.DataFrame):
"""collect individual results for primal and dual bounds and collect inconsistencies.
Parameters
----------
df : DataFrame
joined data of an experiment with several test runs
"""
# problems with inconsistent primal and dual bounds
self.inconsistentset = set()
self.bestpb = dict()
self.bestdb = dict()
df.apply(self.updateInconsistency, axis = 1)
def isPbReferenceConsistent(self, pb : float, referencedb : float, objsense : int) -> bool:
"""compare primal bound consistency against reference bound
Returns
-------
bool
True if the primal bound value is consistent with the reference dual bound
"""
if objsense == ObjectiveSenseCode.MINIMIZE:
if not self.isLE(referencedb, pb):
return False
else:
if not self.isGE(referencedb, pb):
return False
return True
def isDbReferenceConsistent(self, db : float, referencepb : float, objsense : int) -> bool:
"""compare dual bound consistency against reference bound
Returns
-------
bool
True if the dual bound value is consistent with the reference primal bound
"""
if objsense == ObjectiveSenseCode.MINIMIZE:
if not self.isGE(referencepb, db):
return False
else:
if not self.isLE(referencepb, db):
return False
return True
def isReferenceConsistent(self, x : pd.Series) -> str :
"""Check consistency with solution information
"""
problemname = x.get(Key.ProblemName)
pb = x.get(Key.PrimalBound)
db = x.get(Key.DualBound)
obs = self.getObjSense(problemname, x)
sstatus = x.get(Key.SolverStatus)
reference = self.referencedict.get(problemname, (None, None))
logger.debug("Checking against reference {} for problem {}".format(reference, problemname))
referencepb = self.getPbValue(reference[self.__primalidx__], obs)
referencedb = self.getDbValue(reference[self.__dualidx__], obs)
if self.isUnkn(reference):
return ProblemStatusCodes.Ok
elif self.isInf(reference):
if sstatus != SolverStatusCodes.Infeasible and not
|
pd.isnull(pb)
|
pandas.isnull
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.