prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = | pd.Timestamp('2021-03-26') | pandas.Timestamp |
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from pandas.plotting import autocorrelation_plot
from analise_de_vendas import plot_comparacao
vendas_por_dia = pd.read_csv('arquivos/vendas_por_dia.csv')
vendas_por_dia.head()
vendas_por_dia.dtypes
vendas_por_dia['dia'] = | pd.to_datetime(vendas_por_dia['dia']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': | to_datetime(['2000-1-2']) | pandas.to_datetime |
#!/usr/bin/env python
######################################################################
# Copyright (c) 2016 <NAME>.
# All rights reserved.
######################################################################
# version 1.1 -- August 2017
# added checks for consistency between input files
# and upper limit on nb of cluster to look at
from __future__ import print_function
import sys
import os
import logging
import fileinput
import pandas as pd
from argparse import ArgumentParser
from jinja2 import Environment, FileSystemLoader
from shutil import copyfile
from collections import defaultdict
def check_pops(mfi_file, stat1):
df = pd.read_table(mfi_file)
df1 = pd.read_table(stat1)
nb_pop = len(set(df.Population))
nb_pop1 = len(df1.columns) - 2
if (nb_pop > 40):
sys.stderr.write("There are " + str(nb_pop) + " in the input file.")
sys.exit(1)
if (nb_pop != nb_pop1):
sys.exit(2)
def panel_to_json_string(panel):
# from http://stackoverflow.com/questions/28078118/merge-many-json-strings-with-python-pandas-inputs
def __merge_stream(key, stream):
return '"' + key + '"' + ': ' + stream + ', '
try:
stream = '{'
for item in panel.items:
stream += __merge_stream(item, panel.loc[item, :, :].to_json())
# take out extra last comma
stream = stream[:-2]
# add the final paren
stream += '}'
except:
logging.exception('Panel Encoding did not work')
return stream
def get_outliers(group, upper, lower):
cat = group.name
out = {}
for marker in group:
out[marker] = group[(group[marker] > upper.loc[cat][marker]) | (group[marker] < lower.loc[cat][marker])][marker]
return out
def get_boxplot_stats(all_data, mfi_file, output_json):
# modified code from http://bokeh.pydata.org/en/latest/docs/gallery/boxplot.html
# Get initial MFI values
mfi = pd.read_table(mfi_file)
mfi = mfi.set_index('Population')
df = pd.read_table(all_data)
# check if ever some pops not in cs_files
missing_pop = [x for x in mfi.index if x not in set(df.Population)]
if (missing_pop):
zeros = {}
for m in df.columns:
zeros[m] = [0 for x in missing_pop]
tmpdf = pd.DataFrame(zeros)
tmpdf.Population = missing_pop
df = df.append(tmpdf)
pops = df.groupby('Population')
q1 = pops.quantile(q=0.25)
q2 = pops.quantile(q=0.5)
q3 = pops.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
resampled = False
# get outliers
out = pops.apply(get_outliers, upper, lower).dropna()
outliers = defaultdict(dict)
for population in set(df.Population):
for marker in df.columns:
if marker != 'Population':
tmp_outliers = list(out[population][marker])
if (len(list(out[population][marker])) > 100):
tmp_outliers = list(out[population][marker].sample(n=100))
resampled = True
outliers[population][marker] = tmp_outliers
outdf = | pd.DataFrame(outliers) | pandas.DataFrame |
"""
Author: <NAME>
Created: 14/08/2020 11:04 AM
"""
import os
import numpy as np
import pandas as pd
from basgra_python import run_basgra_nz, _trans_manual_harv, get_month_day_to_nonleap_doy
from input_output_keys import matrix_weather_keys_pet
from check_basgra_python.support_for_tests import establish_org_input, get_org_correct_values, get_lincoln_broadfield, \
test_dir, establish_peyman_input, _clean_harvest, base_auto_harvest_data, base_manual_harvest_data
from supporting_functions.plotting import plot_multiple_results # used in test development and debugging
verbose = False
drop_keys = [ # newly added keys that must be dropped initially to manage tests, datasets are subsequently re-created
'WAFC',
'IRR_TARG',
'IRR_TRIG',
'IRRIG_DEM',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP',
'MXPAW',
'PAW',
'RESEEDED',
]
view_keys = [
'WAL',
'WCL',
'DM',
'YIELD',
'BASAL',
'ROOTD',
'IRRIG_DEM',
'HARVFR',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP', # # mm # Water in non-frozen root zone at wilting point
'MXPAW', # mm # maximum Profile available water
'PAW', # mm Profile available water at the time step
]
def test_trans_manual_harv(update_data=False):
test_nm = 'test_trans_manual_harv'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
np.random.seed(1)
days_harvest.loc[:, 'harv_trig'] = np.random.rand(len(days_harvest))
np.random.seed(2)
days_harvest.loc[:, 'harv_targ'] = np.random.rand(len(days_harvest))
np.random.seed(3)
days_harvest.loc[:, 'weed_dm_frac'] = np.random.rand(len(days_harvest))
out = _trans_manual_harv(days_harvest, matrix_weather)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out, dropable=False)
def _output_checks(out, correct_out, dropable=True):
"""
base checker
:param out: basgra data from current test
:param correct_out: expected basgra data
:param dropable: boolean, if True, can drop output keys, allows _output_checks to be used for not basgra data and
for new outputs to be dropped when comparing results.
:return:
"""
if dropable:
# should normally be empty, but is here to allow easy checking of old tests against versions with a new output
drop_keys_int = [
]
out2 = out.drop(columns=drop_keys_int)
else:
out2 = out.copy(True)
# check shapes
assert out2.shape == correct_out.shape, 'something is wrong with the output shapes'
# check datatypes
assert issubclass(out.values.dtype.type, np.float), 'outputs of the model should all be floats'
out2 = out2.values
correct_out2 = correct_out.values
out2[np.isnan(out2)] = -9999.99999
correct_out2[np.isnan(correct_out2)] = -9999.99999
# check values match for sample run
isclose = np.isclose(out2, correct_out2)
asmess = '{} values do not match between the output and correct output with rtol=1e-05, atol=1e-08'.format(
(~isclose).sum())
assert isclose.all(), asmess
print(' model passed test\n')
def test_org_basgra_nz(update_data=False):
print('testing original basgra_nz')
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
# test against my saved version (simply to have all columns
data_path = os.path.join(test_dir, 'test_org_basgra.csv')
if update_data:
out.to_csv(data_path)
print(' testing against full dataset')
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# test to the original data provided by <NAME>ward
out.drop(columns=drop_keys, inplace=True) # remove all of the newly added keys
print(' testing against Simon Woodwards original data')
correct_out2 = get_org_correct_values()
_output_checks(out, correct_out2)
def test_irrigation_trigger(update_data=False):
print('testing irrigation trigger')
# note this is linked to test_leap, so any inputs changes there should be mapped here
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_trigger_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irrigation_fraction(update_data=False):
print('testing irrigation fraction')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .60 # irrigation of 60% of what is needed to get to field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_fraction_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_water_short(update_data=False):
print('testing water shortage')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[matrix_weather.index > '2015-08-01', 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.8
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_water_short_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_short_season(update_data=False):
print('testing short season')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_short_season_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_variable_irr_trig_targ(update_data=False):
print('testing time variable irrigation triggers and targets')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[matrix_weather.index > '2013-08-01', 'irr_trig'] = 0.7
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather.loc[(matrix_weather.index < '2012-08-01'), 'irr_targ'] = 0.8
matrix_weather.loc[(matrix_weather.index > '2015-08-01'), 'irr_targ'] = 0.8
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_variable_irr_trig_targ.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irr_paw(update_data=False):
test_nm = 'test_irr_paw'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 0.9
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
params['irr_frm_paw'] = 1
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_pet_calculation(update_data=False):
# note this test was not as throughrougly investigated as it was not needed for my work stream
print('testing pet calculation')
params, matrix_weather, days_harvest, doy_irr = establish_peyman_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default',
supply_pet=False)
data_path = os.path.join(test_dir, 'test_pet_calculation.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# Manual Harvest tests
def test_fixed_harvest_man(update_data=False):
test_nm = 'test_fixed_harvest_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 10
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2000
days_harvest.loc[idx, 'harv_targ'] = 100
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_harv_trig_man(update_data=False):
# test manaual harvesting dates with a set trigger, weed fraction set to zero
test_nm = 'test_harv_trig_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fraction_man(update_data=False):
# test manual harvesting trig set to zero +- target with weed fraction above 0
test_nm = 'test_weed_fraction_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.5
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 1
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# automatic harvesting tests
def test_auto_harv_trig(update_data=False):
test_nm = 'test_auto_harv_trig'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['opt_harvfrin'] = 1
days_harvest = base_auto_harvest_data(matrix_weather)
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 2000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_auto_harv_fixed(update_data=False):
test_nm = 'test_auto_harv_fixed'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = base_auto_harvest_data(matrix_weather)
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fraction_auto(update_data=False):
# test auto harvesting trig set +- target with weed fraction above 0
test_nm = 'test_weed_fraction_auto'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['opt_harvfrin'] = 1
days_harvest = base_auto_harvest_data(matrix_weather)
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 2000
days_harvest.loc[idx, 'weed_dm_frac'] = 1.25
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.75
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fixed_harv_auto(update_data=False):
# test auto fixed harvesting trig set +- target with weed fraction above 0
test_nm = 'test_weed_fixed_harv_auto'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = base_auto_harvest_data(matrix_weather)
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.5
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 1
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_reseed(update_data=False):
print('testing reseeding')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 1
matrix_weather.loc[matrix_weather.index > '2015-08-01', 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
# these values are set to make observable changes in the results and are not reasonable values.
params['reseed_harv_delay'] = 120
params['reseed_LAI'] = 3
params['reseed_TILG2'] = 10
params['reseed_TILG1'] = 40
params['reseed_TILV'] = 5000
params['reseed_CLV'] = 100
params['reseed_CRES'] = 25
params['reseed_CST'] = 10
params['reseed_CSTUB'] = 0.5
doy_irr = list(range(305, 367)) + list(range(1, 91))
temp = pd.DataFrame(columns=days_harvest.keys())
for i, y in enumerate(days_harvest.year.unique()):
if y == 2011:
continue
temp.loc[i, 'year'] = y
temp.loc[i, 'doy'] = 152
temp.loc[i, 'frac_harv'] = 0
temp.loc[i, 'harv_trig'] = -1
temp.loc[i, 'harv_targ'] = 0
temp.loc[i, 'weed_dm_frac'] = 0
temp.loc[i, 'reseed_trig'] = 0.75
temp.loc[i, 'reseed_basal'] = 0.88
days_harvest = | pd.concat((days_harvest, temp)) | pandas.concat |
# pylint: disable=E1101
from pandas.util.py3compat import StringIO, BytesIO, PY3
from datetime import datetime
from os.path import split as psplit
import csv
import os
import sys
import re
import unittest
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
ExcelFile, TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
network,
ensure_clean)
import pandas.util.testing as tm
import pandas as pd
import pandas.lib as lib
from pandas.util import py3compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
from pandas._parser import OverflowError
from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
def _skip_if_no_xlrd():
try:
import xlrd
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
raise nose.SkipTest('xlrd not installed, skipping')
except ImportError:
raise nose.SkipTest('xlrd not installed, skipping')
def _skip_if_no_xlwt():
try:
import xlwt
except ImportError:
raise nose.SkipTest('xlwt not installed, skipping')
def _skip_if_no_openpyxl():
try:
import openpyxl
except ImportError:
raise nose.SkipTest('openpyxl not installed, skipping')
def _skip_if_no_excelsuite():
_skip_if_no_xlrd()
_skip_if_no_xlwt()
_skip_if_no_openpyxl()
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class ExcelTests(unittest.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def test_parse_cols_int(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=3)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols=3)
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_list(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=[0, 2, 3])
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols=[0, 2, 3])
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_str(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols='A:D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C,D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C,D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C:D')
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_excel_stop_iterator(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self):
_skip_if_no_xlrd()
pth = os.path.join(self.dirpath, 'test.xls')
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_excel_read_buffer(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xls')
f = open(pth, 'rb')
xls = ExcelFile(f)
# it works
xls.parse('Sheet1', index_col=0, parse_dates=True)
pth = os.path.join(self.dirpath, 'test.xlsx')
f = open(pth, 'rb')
xl = ExcelFile(f)
df = xl.parse('Sheet1', index_col=0, parse_dates=True)
def test_xlsx_table(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xlsx')
xlsx = ExcelFile(pth)
df = xlsx.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xlsx.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xlsx file
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
| tm.assert_frame_equal(df4, df.ix[:-1]) | pandas.util.testing.assert_frame_equal |
#coding:utf-8
import pandas as pd
import numpy as np
import time
#拼接数据
def merge_table(df):
df['field_results'] = df['field_results'].astype(str)
if df.shape[0] > 1:
merge_df = "".join(list(df['field_results']))
else:
merge_df = df['field_results'].value[0]
return merge_df
if __name__ == '__main__':
begin_time = time.time()
data_part1 = pd.read_csv('f_train_20180204.csv',encoding='GBK')
data_part2 = pd.read_csv('f_train_20180204.csv',encoding="GBK")
#统计item的行数
print(len(set(data_part1.id)))
print(len(set(data_part2.id)))
#打印前五行
print(data_part1.head(5))
#打印后五行
print(data_part1.tail(5))
#打印所有列标签
print(data_part1.columns)
#改变列标签
df2 = data_part1.rename(columns={'SNP1':'SNP99'})
data_part1.rename(columns={'SNP1': 'SNP99'},inplace=True)
print(data_part1.head(5))
#选择列或者行
print("******************************")
print(data_part1[['id','SNP3']])
print(data_part1[(data_part1['SNP3'] > 2) & (data_part1['SNP11'] == 3)])
#处理 丢失项目
# dropna 若有丢失项目,就把改行该列丢掉
data_part1.dropna()
# fillna 填充丢失项
#data_part2.fillna(value = 0) 可以填充平均值
mean = data_part2['SNP11'].mean()
data_part2['SNP11'].fillna(mean)
#创建新列
data_part1['newSNP1'] = data_part1['SNP11']
data_part1['newSNP2'] = data_part1['SNP11'] + 10
data_part1['newSNP3'] = data_part1['SNP11'] + data_part1['SNP12']
print("创建新列。。。。。")
print(data_part1.head(5))
#groupby,聚合,分组级运算
print("groupby...............")
print(data_part1.groupby('SNP11').sum())
print(data_part1.groupby(['SNP11','SNP12']).count())
#透视表操作
print("pivot......table....")
df1 = | pd.pivot_table(data_part1, values='newSNP1',index=['newSNP2','newSNP3'],columns=['SNP4']) | pandas.pivot_table |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
| OrderedDict() | pandas.compat.OrderedDict |
""" MCH API ver 0.1
Author: <NAME>
License: CC-BY-SA 4.0
2020 Mexico
"""
import os
from flask import Flask, jsonify, json, Response
from flask_restful import Api, Resource, reqparse, abort
from flask_mysqldb import MySQL
import pandas as pd
import numpy as np
import json
from os.path import abspath, dirname, join
app = Flask(__name__)
# Mysql connection
app.config['MYSQL_HOST'] = os.getenv('MCH_DB_HOST')
app.config['MYSQL_USER'] = os.getenv('MCH_DB_USER')
app.config['MYSQL_PASSWORD'] = os.getenv('MCH_DB_PASSWORD')
app.config['MYSQL_DB'] = os.getenv('MCH_DB_NAME')
app.config['MYSQL_PORT'] = int(os.getenv('MCH_DB_PORT'))
app.config['SECRET_KEY'] = os.getenv("APP_SECRET")
mysql = MySQL(app)
api = Api(app)
# dataframe for stations table
stnmdata = pd.DataFrame()
# read MCH languaje definition from mch.dbn
filemch = open('mch.dbn', 'r')
filemch.readline() # odbc connector
filemch.readline() # mysql5
filemch.readline() # interface languaje
mchlang = filemch.readline() # database languaje
# read fields and tables names definition file
deftbfl = pd.read_csv('MCHtablasycampos.def', sep = "\t", names = ['sec','type', 'id_sec', 'esp', 'eng', 'fra', '4', 'comment'], encoding='utf_8')
# new dataframe for especific languaje
ltbfl = pd.DataFrame()
# looking for especific fields and tables for the languaje
if int(mchlang) == 1:
ltbfl = deftbfl[['id_sec','esp']]
ltbfl.set_index('id_sec')
if int(mchlang) == 2:
ltbfl = deftbfl[['id_sec','eng']]
ltbfl.set_index('id_sec')
if int(mchlang) == 3:
ltbfl = deftbfl[['id_sec','fra']]
ltbfl.set_index('id_sec')
def deg_to_dms(deg):
d = int(deg)
md = abs(deg - d) * 60
m = int(md)
sd = (md - m) * 60
return [d, m, sd]
class stations(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
strqry='select * from ' +stntable.iloc[0,1] +' order by ' +stnfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH(2)','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stations, "/API/stations")
qry_station_req_arg = reqparse.RequestParser()
pars = qry_station_req_arg.add_argument("stn_id",type=str,help="Station ID",required=True)
class qry_station(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='select * from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
qrystation = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=qrystation,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Station not found...")
#abort_if_stn_not_exist("stn_id")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('stn_id')
parser.add_argument('stn_name')
parser.add_argument('stn_name2')
parser.add_argument('t_zone')
parser.add_argument('long')
parser.add_argument('lat')
parser.add_argument('alt')
parser.add_argument('state_id')
parser.add_argument('reg_m')
parser.add_argument('catchm')
parser.add_argument('s_cat')
parser.add_argument('o_reg')
parser.add_argument('hydro_r')
parser.add_argument('rh')
parser.add_argument('mun_id')
parser.add_argument('mosl')
parser.add_argument('mosv')
parser.add_argument('mesl')
parser.add_argument('mesv')
parser.add_argument('s_level')
parser.add_argument('s_stor')
parser.add_argument('fs_level')
parser.add_argument('fs_stor')
parser.add_argument('ds_level')
parser.add_argument('ds_cap')
parser.add_argument('us_capl')
parser.add_argument('ustor')
parser.add_argument('hstor')
parser.add_argument('crl_s')
parser.add_argument('mnl_s')
parser.add_argument('mxl_s')
parser.add_argument('cr_f')
parser.add_argument('mn_dis')
parser.add_argument('mx_dis')
parser.add_argument('stream')
parser.add_argument('dist')
parser.add_argument('infr')
parser.add_argument('type')
parser.add_argument('use')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
stn_id = args.get('stn_id')
stn_name = args.get('stn_name')
stn_name2 = args.get('stn_name2')
t_zone = args.get('t_zone')
long2 = args.get('long')
lat2 = args.get('lat')
alt = args.get('alt')
state_id = args.get('state_id')
reg_m = args.get('reg_m')
catchm = args.get('catchm')
s_cat = args.get('s_cat')
o_reg = args.get('o_reg')
hydro_r = args.get('hydro_r')
rh = args.get('rh')
mun_id = args.get('mun_id')
mosl = args.get('mosl')
mosv = args.get('mosv')
mesl = args.get('mesl')
mesv = args.get('mesv')
s_level = args.get('s_level')
s_stor = args.get('s_stor')
fs_level = args.get('fs_level')
fs_stor = args.get('fs_stor')
ds_level = args.get('ds_level')
ds_cap = args.get('ds_cap')
us_capl = args.get('us_capl')
ustor = args.get('ustor')
hstor = args.get('hstor')
crl_s = args.get('crl_s')
mnl_s = args.get('mnl_s')
mxl_s = args.get('mxl_s')
cr_f = args.get('cr_f')
mn_dis = args.get('mn_dis')
mx_dis = args.get('mx_dis')
stream = args.get('stream')
dist = args.get('dist')
infr = args.get('infr')
typee = args.get('type')
usee = args.get('use')
# check if input is at file
if jfile in (None, ''):
Latitude=deg_to_dms(float(lat2))
Longitude=deg_to_dms(float(long2))
slong2=str(Longitude[0])+'°'+str(Longitude[1]) +'´' +str(Longitude[2])
slat2=str(Latitude[0])+'°'+str(Latitude[1]) +'´' +str(Latitude[2])
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(stn_id) +'","' +str(stn_name) +'","' +str(stn_name2) +'","' +str(t_zone) +'","' + str(long2)
+ '","' +str(lat2) +'","' +str(alt) +'","' +str(long2) +'","' +str(lat2) +'","' +slong2 +'","' +slat2 +'","' +str(state_id) +'","' +str(reg_m)
+ '","' +str(catchm) +'","' +str(s_cat) +'","' +str(o_reg) +'","' +str(hydro_r) +'","' +str(rh) +'","' +str(mun_id) +'","","","","","","","","","","","","","' + str(mosl)
+ '","' +str(mosv) +'","' +str(mesl) +'","' +str(mesv) +'","' +str(s_level) +'","' +str(s_stor) +'","' +str(fs_level) +'","' + str(fs_stor)
+ '","' +str(ds_level) +'","' +str(ds_cap) +'","' +str(us_capl) +'","' +str(ustor) +'","' +str(hstor) +'","","","","' +str(crl_s) +'","' + str(mnl_s)
+ '","' +str(mxl_s) +'","' +str(cr_f) +'","' +str(mn_dis) +'","' +str(mx_dis) +'","' +str(stream) +'","' +str(dist) +'","' +str(infr) +'","' + str(typee)
+ '","' +str(usee) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'","' +data.iloc[int(n),3] +'","' + data.iloc[int(n),4]
+ '","' +data.iloc[int(n),5] +'","' +str(data.iloc[int(n),6]) +'","' +str(data.iloc[int(n),7]) +'","' +str(data.iloc[int(n),8]) +'","' +data.iloc[int(n),9] +'","' +data.iloc[int(n),10] +'","' +data.iloc[int(n),11]
+ '","' +data.iloc[int(n),12] + '","' +data.iloc[int(n),13] +'","' +data.iloc[int(n),14] +'","' +data.iloc[int(n),15] +'","' +data.iloc[int(n),16] +'","' +data.iloc[int(n),17] +'","' +data.iloc[int(n),18]
+ '","' +data.iloc[int(n),19] +'","' +data.iloc[int(n),20] +'","' +data.iloc[int(n),21] +'","' +data.iloc[int(n),22] +'","' +data.iloc[int(n),23] +'","' +data.iloc[int(n),24] +'","' +data.iloc[int(n),25]
+ '","' +data.iloc[int(n),26] + '","' +data.iloc[int(n),27] +'","' +data.iloc[int(n),28] +'","' +data.iloc[int(n),29] +'","' +data.iloc[int(n),30] +'","' +data.iloc[int(n),31]
+ '","' +data.iloc[int(n),32] +'","' +data.iloc[int(n),33] +'","' +data.iloc[int(n),34] +'","' +data.iloc[int(n),35] +'","' +data.iloc[int(n),36] +'","' +data.iloc[int(n),37] +'","' + data.iloc[int(n),38]
+ '","' +data.iloc[int(n),39] +'","' +data.iloc[int(n),40] +'","' +data.iloc[int(n),41] +'","' +data.iloc[int(n),42] +'","' +data.iloc[int(n),43] +'","' +data.iloc[int(n),44] +'","' +data.iloc[int(n),45]
+ '","' +data.iloc[int(n),46] +'","' +data.iloc[int(n),47] +'","' + data.iloc[int(n),48] +'","' +data.iloc[int(n),49] +'","' +data.iloc[int(n),50] +'","' +data.iloc[int(n),51] +'","' +data.iloc[int(n),52]
+ '","' +data.iloc[int(n),53] +'","' +data.iloc[int(n),54] +'","' +data.iloc[int(n),55] +'","' +data.iloc[int(n),56] +'","' +data.iloc[int(n),57] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'","' +data.iloc[0,3] +'","' + data.iloc[0,4]
+ '","' +data.iloc[0,5] +'","' +str(data.iloc[0,6]) +'","' +str(data.iloc[0,7]) +'","' +str(data.iloc[0,8]) +'","' +data.iloc[0,9] +'","' +data.iloc[0,10] +'","' +data.iloc[0,11]
+ '","' +data.iloc[0,12] + '","' +data.iloc[0,13] +'","' +data.iloc[0,14] +'","' +data.iloc[0,15] +'","' +data.iloc[0,16] +'","' +data.iloc[0,17] +'","' +data.iloc[0,18]
+ '","' +data.iloc[0,19] +'","' +data.iloc[0,20] +'","' +data.iloc[0,21] +'","' +data.iloc[0,22] +'","' +data.iloc[0,23] +'","' +data.iloc[0,24] +'","' +data.iloc[0,25]
+ '","' +data.iloc[0,26] + '","' +data.iloc[0,27] +'","' +data.iloc[0,28] +'","' +data.iloc[0,29] +'","' +data.iloc[0,30] +'","' +data.iloc[0,31]
+ '","' +data.iloc[0,32] +'","' +data.iloc[0,33] +'","' +data.iloc[0,34] +'","' +data.iloc[0,35] +'","' +data.iloc[0,36] +'","' +data.iloc[0,37] +'","' + data.iloc[0,38]
+ '","' +data.iloc[0,39] +'","' +data.iloc[0,40] +'","' +data.iloc[0,41] +'","' +data.iloc[0,42] +'","' +data.iloc[0,43] +'","' +data.iloc[0,44] +'","' +data.iloc[0,45]
+ '","' +data.iloc[0,46] +'","' +data.iloc[0,47] +'","' + data.iloc[0,48] +'","' +data.iloc[0,49] +'","' +data.iloc[0,50] +'","' +data.iloc[0,51] +'","' +data.iloc[0,52]
+ '","' +data.iloc[0,53] +'","' +data.iloc[0,54] +'","' +data.iloc[0,55] +'","' +data.iloc[0,56] +'","' +data.iloc[0,57] +'")')
qry.execute(strqry)
return 'Station stored',201
def delete(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='delete from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Station deleted',204
api.add_resource(qry_station, "/API/stations/qry_station")
class stngroups(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stngroups, "/API/stngroups")
class qry_stngroup(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup','Secuen','Station'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Stationgroup not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('file')
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
tdata=len(data.index)
rows=list(range(0,tdata))
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
return 'Stationgroup stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Stationgroup deleted',204
api.add_resource(qry_stngroup, "/API/stngroups/qry_stngroup")
class variables(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(variables, "/API/variables")
class qry_variable(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
parser = reqparse.RequestParser()
parser.add_argument('var_id')
args = parser.parse_args()
var_id = args.get('var_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable','VariabAbbrev','VariabDescrn','TableName','Unit','TypeDDorDE','CumulType','NbrDecimal','CalcbyGrp','CalcDTaD'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Variable not found...")
return parsed
api.add_resource(qry_variable, "/API/variables/qry_variable")
class states(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(states, "/API/states")
class qry_state(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
state_id = args.get('state_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="State not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('state_id')
parser.add_argument('state_2')
parser.add_argument('state_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
state_id = args.get('state_id')
state_2 = args.get('state_2')
state_name = args.get('state_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(state_id) +'","' +str(state_2) +'","' +str(state_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'State stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
stngp_id = args.get('state_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'State deleted',204
api.add_resource(qry_state, "/API/states/qry_state")
class municipalities(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = | pd.DataFrame(data=dataqry,columns=['Municipality','Municipality2','MunicipalityName']) | pandas.DataFrame |
"""
Test cases for the wiutils.get_lowest_taxon function.
"""
import numpy as np
import pandas as pd
import pytest
from wiutils.extraction import get_lowest_taxon
@pytest.fixture(scope="function")
def images():
return pd.DataFrame(
{
"class": [
"Mammalia",
np.nan,
"Aves",
"No CV Result",
"Mammalia",
"Mammalia",
"Mammalia",
],
"order": [
np.nan,
np.nan,
"Psittaciformes",
"No CV Result",
"Carnivora",
"Rodentia",
"Perissodactyla",
],
"family": [
np.nan,
np.nan,
"Psittacidae",
"No CV Result",
"Felidae",
"No CV Result",
"Tapiridae",
],
"genus": [
np.nan,
np.nan,
"Ara",
"No CV Result",
np.nan,
"No CV Result",
"Tapirus",
],
"species": [
np.nan,
np.nan,
"macao",
"No CV Result",
np.nan,
"No CV Result",
np.nan,
],
}
)
def test_taxa(images):
result = get_lowest_taxon(images, return_rank=False)
expected = pd.Series(
["Mammalia", np.nan, "Ara macao", np.nan, "Felidae", "Rodentia", "Tapirus"]
)
pd.testing.assert_series_equal(result, expected)
def test_ranks(images):
_, result = get_lowest_taxon(images, return_rank=True)
expected = pd.Series(
["class", np.nan, "species", np.nan, "family", "order", "genus"]
)
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np # type: ignore
import pandas as pd # type: ignore
from elasticsearch import Elasticsearch
# Default number of rows displayed (different to pandas where ALL could be displayed)
DEFAULT_NUM_ROWS_DISPLAYED = 60
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_CSV_BATCH_OUTPUT_SIZE = 10000
DEFAULT_PROGRESS_REPORTING_NUM_ROWS = 10000
DEFAULT_ES_MAX_RESULT_WINDOW = 10000 # index.max_result_window
DEFAULT_PAGINATION_SIZE = 5000 # for composite aggregations
PANDAS_VERSION: Tuple[int, ...] = tuple(
int(part) for part in pd.__version__.split(".") if part.isdigit()
)[:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
EMPTY_SERIES_DTYPE = pd.Series().dtype
def build_pd_series(
data: Dict[str, Any], dtype: Optional[np.dtype] = None, **kwargs: Any
) -> pd.Series:
"""Builds a pd.Series while squelching the warning
for unspecified dtype on empty series
"""
dtype = dtype or (EMPTY_SERIES_DTYPE if not data else dtype)
if dtype is not None:
kwargs["dtype"] = dtype
return pd.Series(data, **kwargs)
def docstring_parameter(*sub: Any) -> Callable[[Any], Any]:
def dec(obj: Any) -> Any:
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
class SortOrder(Enum):
ASC = 0
DESC = 1
@staticmethod
def reverse(order: "SortOrder") -> "SortOrder":
if order == SortOrder.ASC:
return SortOrder.DESC
return SortOrder.ASC
@staticmethod
def to_string(order: "SortOrder") -> str:
if order == SortOrder.ASC:
return "asc"
return "desc"
@staticmethod
def from_string(order: str) -> "SortOrder":
if order == "asc":
return SortOrder.ASC
return SortOrder.DESC
def elasticsearch_date_to_pandas_date(
value: Union[int, str], date_format: Optional[str]
) -> pd.Timestamp:
"""
Given a specific Elasticsearch format for a date datatype, returns the
'partial' `to_datetime` function to parse a given value in that format
**Date Formats: https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#built-in-date-formats
Parameters
----------
value: Union[int, str]
The date value.
date_format: str
The Elasticsearch date format (ex. 'epoch_millis', 'epoch_second', etc.)
Returns
-------
datetime: pd.Timestamp
From https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html
Date formats can be customised, but if no format is specified then it uses the default:
"strict_date_optional_time||epoch_millis"
Therefore if no format is specified we assume either strict_date_optional_time
or epoch_millis.
"""
if date_format is None or isinstance(value, (int, float)):
try:
return pd.to_datetime(
value, unit="s" if date_format == "epoch_second" else "ms"
)
except ValueError:
return pd.to_datetime(value)
elif date_format == "epoch_millis":
return pd.to_datetime(value, unit="ms")
elif date_format == "epoch_second":
return pd.to_datetime(value, unit="s")
elif date_format == "strict_date_optional_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "basic_date":
return pd.to_datetime(value, format="%Y%m%d")
elif date_format == "basic_date_time":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S.%f", exact=False)
elif date_format == "basic_date_time_no_millis":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S%z")
elif date_format == "basic_ordinal_date":
return | pd.to_datetime(value, format="%Y%j") | pandas.to_datetime |
import json
import os
import pandas
import pyperclip
import selenium
import sys
import time
from bs4 import *
# 공통 로그 함수
def line_logging(*messages):
import datetime
log_time = datetime.datetime.today().strftime('[%Y/%m/%d %H:%M:%S]')
log = list()
for message in messages:
log.append(str(message))
print(log_time + ':[' + ' '.join(log) + ']', flush=True)
# crawling function
def get_post(p_url, p_param, p_sleep_time=2, p_flag_view_url=True):
import time
import urllib
import requests
url_full_path = p_url + '?' + urllib.parse.urlencode(p_param)
if p_flag_view_url:
line_logging(url_full_path)
headers = {
'content-type': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15',
'referer': 'http://finance.daum.net/domestic/exchange/COMMODITY-%2FCLc1'
}
try:
results = requests.get(url_full_path, headers=headers)
time.sleep(p_sleep_time)
return results
except:
time.sleep(p_sleep_time * 2)
results = requests.get(url_full_path, headers=headers)
time.sleep(p_sleep_time)
return results
# ====================================================================================================================================================================================================
# HTML Parsing
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# 국내 지수 (코스피/코스닥) 수집
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def collect_korea(p_market, page_no=1, p_sleep_time=2):
url = "https://finance.naver.com/sise/sise_index_day.nhn"
list_price = list()
param = {
'code': p_market,
'page': page_no
}
results = get_post(url, param, p_sleep_time=p_sleep_time)
price_table = BeautifulSoup(results.content, "html.parser").find_all('table')[0]
price_trs = BeautifulSoup(str(price_table), "html.parser").find_all('tr')
for price_tr in price_trs:
row = BeautifulSoup(str(price_tr), "html.parser").find_all('td')
if len(row) > 3:
list_price.append({
'eod_date': int(row[0].text.strip().replace('.', '')),
'item_code': p_market,
'price_close': float(row[1].text.strip().replace(',', '')),
'trade_amount': int(row[4].text.strip().replace(',', '')),
'diff': float(row[2].text.strip().replace(',', '')),
'rate': float(row[3].text.strip().replace(',', '').replace('%', '').replace('+', '')),
})
return pandas.DataFrame(list_price)
# 국내 지수 (코스피/코스닥) 수집 결과 저장
def save_kospi_and_kosdaq(p_market, page_to=10):
page_from = 1
df_index = | pandas.DataFrame() | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": pandas.StringDtype(),
"nandWritesPerStream27": pandas.StringDtype(),
"nandWritesPerStream28": pandas.StringDtype(),
"nandWritesPerStream29": pandas.StringDtype(),
"nandWritesPerStream30": pandas.StringDtype(),
"nandWritesPerStream31": pandas.StringDtype(),
"nandWritesPerStream32": pandas.StringDtype(),
"hostSoftReadSuccess": pandas.StringDtype(),
"xorInvokedCount": pandas.StringDtype(),
"comresets": pandas.StringDtype(),
"syncEscapes": pandas.StringDtype(),
"rErrHost": pandas.StringDtype(),
"rErrDevice": pandas.StringDtype(),
"iCrcs": pandas.StringDtype(),
"linkSpeedDrops": pandas.StringDtype(),
"mrrXtrapageEvents": pandas.StringDtype(),
"mrrToppageEvents": pandas.StringDtype(),
"hostXorSuccessCount": pandas.StringDtype(),
"hostXorFailCount": pandas.StringDtype(),
"nandWritesWithPreReadPerStream1": pandas.StringDtype(),
"nandWritesWithPreReadPerStream2": pandas.StringDtype(),
"nandWritesWithPreReadPerStream3": pandas.StringDtype(),
"nandWritesWithPreReadPerStream4": pandas.StringDtype(),
"nandWritesWithPreReadPerStream5": pandas.StringDtype(),
"nandWritesWithPreReadPerStream6": pandas.StringDtype(),
"nandWritesWithPreReadPerStream7": pandas.StringDtype(),
"nandWritesWithPreReadPerStream8": pandas.StringDtype(),
"nandWritesWithPreReadPerStream9": pandas.StringDtype(),
"nandWritesWithPreReadPerStream10": pandas.StringDtype(),
"nandWritesWithPreReadPerStream11": pandas.StringDtype(),
"nandWritesWithPreReadPerStream12": pandas.StringDtype(),
"nandWritesWithPreReadPerStream13": pandas.StringDtype(),
"nandWritesWithPreReadPerStream14": pandas.StringDtype(),
"nandWritesWithPreReadPerStream15": pandas.StringDtype(),
"nandWritesWithPreReadPerStream16": pandas.StringDtype(),
"nandWritesWithPreReadPerStream17": pandas.StringDtype(),
"nandWritesWithPreReadPerStream18": pandas.StringDtype(),
"nandWritesWithPreReadPerStream19": pandas.StringDtype(),
"nandWritesWithPreReadPerStream20": pandas.StringDtype(),
"nandWritesWithPreReadPerStream21": pandas.StringDtype(),
"nandWritesWithPreReadPerStream22": pandas.StringDtype(),
"nandWritesWithPreReadPerStream23": pandas.StringDtype(),
"nandWritesWithPreReadPerStream24": pandas.StringDtype(),
"nandWritesWithPreReadPerStream25": pandas.StringDtype(),
"nandWritesWithPreReadPerStream26": pandas.StringDtype(),
"nandWritesWithPreReadPerStream27": pandas.StringDtype(),
"nandWritesWithPreReadPerStream28": pandas.StringDtype(),
"nandWritesWithPreReadPerStream29": pandas.StringDtype(),
"nandWritesWithPreReadPerStream30": pandas.StringDtype(),
"nandWritesWithPreReadPerStream31": pandas.StringDtype(),
"nandWritesWithPreReadPerStream32": pandas.StringDtype(),
"dramCorrectables8to1": pandas.StringDtype(),
"driveRecoveryCount": pandas.StringDtype(),
"mprLiteReads": pandas.StringDtype(),
"eccErrOnMprLiteReads": pandas.StringDtype(),
"readForwardingXpPreReadCount": pandas.StringDtype(),
"readForwardingUpPreReadCount": pandas.StringDtype(),
"readForwardingLpPreReadCount": pandas.StringDtype(),
"pweDefectCompensationCredit": pandas.StringDtype(),
"planarXorRebuildFailure": pandas.StringDtype(),
"itgXorRebuildFailure": pandas.StringDtype(),
"planarXorRebuildSuccess": pandas.StringDtype(),
"itgXorRebuildSuccess": pandas.StringDtype(),
"xorLoggingSkippedSIcBand": pandas.StringDtype(),
"xorLoggingSkippedDieOffline": pandas.StringDtype(),
"xorLoggingSkippedDieAbsent": pandas.StringDtype(),
"xorLoggingSkippedBandErased": pandas.StringDtype(),
"xorLoggingSkippedNoEntry": pandas.StringDtype(),
"xorAuditSuccess": pandas.StringDtype(),
"maxSuspendCount": pandas.StringDtype(),
"suspendLimitPerPrgm": pandas.StringDtype(),
"psrCountStats": pandas.StringDtype(),
"readNandBuffCount": pandas.StringDtype(),
"readNandBufferRspErrorCount": pandas.StringDtype(),
"ddpNandWrites": pandas.StringDtype(),
"totalDeallocatedSectorsInCore": pandas.StringDtype(),
"prefetchHostReads": pandas.StringDtype(),
"hostReadtoDSMDCount": pandas.StringDtype(),
"hostWritetoDSMDCount": pandas.StringDtype(),
"snapReads4k": pandas.StringDtype(),
"snapReads8k": pandas.StringDtype(),
"snapReads16k": pandas.StringDtype(),
"xorLoggingTriggered": pandas.StringDtype(),
"xorLoggingAborted": pandas.StringDtype(),
"xorLoggingSkippedHistory": pandas.StringDtype(),
"deckDisturbRelocationUD": pandas.StringDtype(),
"deckDisturbRelocationMD": pandas.StringDtype(),
"deckDisturbRelocationLD": pandas.StringDtype(),
"bbdProactiveReadRetry": pandas.StringDtype(),
"statsRestoreRequired": pandas.StringDtype(),
"statsAESCount": pandas.StringDtype(),
"statsHESCount": pandas.StringDtype(),
"psrCountStats1": pandas.StringDtype(),
"psrCountStats2": pandas.StringDtype(),
"psrCountStats3": pandas.StringDtype(),
"psrCountStats4": pandas.StringDtype(),
"psrCountStats5": pandas.StringDtype(),
"psrCountStats6": pandas.StringDtype(),
"psrCountStats7": pandas.StringDtype(),
"psrCountStats8": pandas.StringDtype(),
"psrCountStats9": pandas.StringDtype(),
"psrCountStats10": pandas.StringDtype(),
"psrCountStats11": pandas.StringDtype(),
"psrCountStats12": pandas.StringDtype(),
"psrCountStats13": pandas.StringDtype(),
"psrCountStats14": pandas.StringDtype(),
"psrCountStats15": pandas.StringDtype(),
"psrCountStats16": pandas.StringDtype(),
"psrCountStats17": pandas.StringDtype(),
"psrCountStats18": pandas.StringDtype(),
"psrCountStats19": pandas.StringDtype(),
"psrCountStats20": pandas.StringDtype(),
"psrCountStats21": pandas.StringDtype(),
"psrCountStats22": pandas.StringDtype(),
"psrCountStats23": pandas.StringDtype(),
"psrCountStats24": pandas.StringDtype(),
"psrCountStats25": pandas.StringDtype(),
"psrCountStats26": pandas.StringDtype(),
"psrCountStats27": pandas.StringDtype(),
"psrCountStats28": pandas.StringDtype(),
"psrCountStats29": pandas.StringDtype(),
"psrCountStats30": pandas.StringDtype(),
"psrCountStats31": pandas.StringDtype(),
"psrCountStats32": pandas.StringDtype(),
"psrCountStats33": pandas.StringDtype(),
"psrCountStats34": pandas.StringDtype(),
"psrCountStats35": pandas.StringDtype(),
"psrCountStats36": pandas.StringDtype(),
"psrCountStats37": pandas.StringDtype(),
"psrCountStats38": pandas.StringDtype(),
"psrCountStats39": pandas.StringDtype(),
"psrCountStats40": pandas.StringDtype(),
"psrCountStats41": pandas.StringDtype(),
"psrCountStats42": pandas.StringDtype(),
"psrCountStats43": pandas.StringDtype(),
"psrCountStats44": pandas.StringDtype(),
"psrCountStats45": pandas.StringDtype(),
"psrCountStats46": pandas.StringDtype(),
"psrCountStatsHigh1": pandas.StringDtype(),
"psrCountStatsHigh2": pandas.StringDtype(),
"psrCountStatsHigh3": pandas.StringDtype(),
"psrCountStatsHigh4": pandas.StringDtype(),
"psrCountStatsHigh5": pandas.StringDtype(),
"psrCountStatsHigh6": pandas.StringDtype(),
"psrCountStatsHigh7": pandas.StringDtype(),
"psrCountStatsHigh8": pandas.StringDtype(),
"psrCountStatsHigh9": pandas.StringDtype(),
"psrCountStatsHigh10": pandas.StringDtype(),
"psrCountStatsHigh11": pandas.StringDtype(),
"psrCountStatsHigh12": pandas.StringDtype(),
"psrCountStatsHigh13": pandas.StringDtype(),
"psrCountStatsHigh14": pandas.StringDtype(),
"vssSwitchCount": pandas.StringDtype(),
"openBandReadCount": pandas.StringDtype(),
"closedBandReadCount": pandas.StringDtype(),
"minEraseSLC": pandas.StringDtype(),
"maxEraseSLC": | pandas.StringDtype() | pandas.StringDtype |
import numpy as np
import pandas as pd; pd.options.mode.chained_assignment = None
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.optimize import minimize
from scipy.optimize import least_squares
import os
def is_const(x):
if np.linalg.norm(x - np.mean(x)) < 1e-13 * np.abs(np.mean(x)):
return True
elif np.all( x==x[0]):
return True
else:
return False
def calc_eval_metrics(y, y_hat, y_hat_map=None, d=None, ci=None):
'''
Calculate RMSE, Pearson's correlation.
'''
r = {
'r_p': np.nan,
'r_p_map': np.nan,
'rmse': np.nan,
'rmse_map': np.nan,
'rmse_star_map': np.nan,
}
if is_const(y_hat):
r['r_p'] = np.nan
else:
r['r_p'] = pearsonr(y, y_hat)[0]
r['rmse'] = calc_rmse(y, y_hat)
if y_hat_map is not None:
r['rmse_map'] = calc_rmse(y, y_hat_map, d=d)
r['r_p_map'] = pearsonr(y, y_hat_map)[0]
if ci is not None:
r['rmse_star_map'] = calc_rmse_star(y, y_hat_map, ci, d)[0]
return r
def calc_rmse(y_true, y_pred, d=0):
if d== 0:
rmse = np.sqrt(np.mean(np.square(y_true - y_pred)))
else:
N = y_true.shape[0]
if (N - d) < 1:
rmse = np.nan
else:
rmse = np.sqrt(1 / (N - d) * np.sum(np.square(y_true - y_pred))) # Eq (7-29) P.1401
return rmse
def calc_rmse_star(mos_sub, mos_obj, ci, d):
N = mos_sub.shape[0]
error = mos_sub - mos_obj
if ci[0] == -1:
p_error = np.nan
rmse_star = np.nan
else:
p_error = (abs(error) - ci).clip(min=0) # Eq (7-27) P.1401
if (N - d) < 1:
rmse_star = np.nan
else:
rmse_star = np.sqrt(1 / (N - d) * sum(p_error) ** 2) # Eq (7-29) P.1401
return rmse_star, p_error, error
def calc_mapped(x, b):
N = x.shape[0]
order = b.shape[0] - 1
A = np.zeros([N, order + 1])
for i in range(order + 1):
A[:, i] = x ** (i)
return A @ b
def fit_first_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
return b
def fit_second_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat, y_con_hat ** 2]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
return b
def fit_third_order(y_con, y_con_hat):
A = np.vstack([np.ones(len(y_con_hat)), y_con_hat, y_con_hat ** 2, y_con_hat ** 3]).T
b = np.linalg.lstsq(A, y_con, rcond=None)[0]
p = np.poly1d(np.flipud(b))
p2 = np.polyder(p)
rr = np.roots(p2)
r = rr[np.imag(rr) == 0]
monotonic = all(np.logical_or(r > max(y_con_hat), r < min(y_con_hat)))
if monotonic == False:
print('Not monotonic!!!')
return b
def fit_monotonic_third_order(
dfile_db,
dcon_db=None,
pred=None,
target_mos=None,
target_ci=None,
mapping=None):
y = dfile_db[target_mos].to_numpy()
y_hat = dfile_db[pred].to_numpy()
if dcon_db is None:
if target_ci in dfile_db:
ci = dfile_db[target_ci].to_numpy()
else:
ci = 0
else:
y_con = dcon_db[target_mos].to_numpy()
if target_ci in dcon_db:
ci = dcon_db[target_ci].to_numpy()
else:
ci = 0
x = y_hat
y_hat_min = min(y_hat) - 0.01
y_hat_max = max(y_hat) + 0.01
def polynomial(p, x):
return p[0] + p[1] * x + p[2] * x ** 2 + p[3] * x ** 3
def constraint_2nd_der(p):
return 2 * p[2] + 6 * p[3] * x
def constraint_1st_der(p):
x = np.arange(y_hat_min, y_hat_max, 0.1)
return p[1] + 2 * p[2] * x + 3 * p[3] * x ** 2
def objective_con(p):
x_map = polynomial(p, x)
dfile_db['x_map'] = x_map
x_map_con = dfile_db.groupby('con').mean().x_map.to_numpy()
err = x_map_con - y_con
if mapping == 'pError':
p_err = (abs(err) - ci).clip(min=0)
return (p_err ** 2).sum()
elif mapping == 'error':
return (err ** 2).sum()
else:
raise NotImplementedError
def objective_file(p):
x_map = polynomial(p, x)
err = x_map - y
if mapping == 'pError':
p_err = (abs(err) - ci).clip(min=0)
return (p_err ** 2).sum()
elif mapping == 'error':
return (err ** 2).sum()
else:
raise NotImplementedError
cons = dict(type='ineq', fun=constraint_1st_der)
if dcon_db is None:
res = minimize(
objective_file,
x0=np.array([0., 1., 0., 0.]),
method='SLSQP',
constraints=cons,
)
else:
res = minimize(
objective_con,
x0=np.array([0., 1., 0., 0.]),
method='SLSQP',
constraints=cons,
)
b = res.x
return b
def calc_mapping(
dfile_db,
mapping=None,
dcon_db=None,
target_mos=None,
target_ci=None,
pred=None,
):
if dcon_db is not None:
y = dcon_db[target_mos].to_numpy()
y_hat = dfile_db.groupby('con').mean().get(pred).to_numpy()
else:
y = dfile_db[target_mos].to_numpy()
y_hat = dfile_db[pred].to_numpy()
if mapping == None:
b = np.array([0, 1, 0, 0])
d_map = 0
elif mapping == 'first_order':
b = fit_first_order(y, y_hat)
d_map = 1
elif mapping == 'second_order':
b = fit_second_order(y, y_hat)
d_map = 3
elif mapping == 'third_order':
b = fit_third_order(y, y_hat)
d_map = 4
elif mapping == 'monotonic_third_order':
b = fit_monotonic_third_order(
dfile_db,
dcon_db=dcon_db,
pred=pred,
target_mos=target_mos,
target_ci=target_ci,
mapping='error',
)
d_map = 4
else:
raise NotImplementedError
return b, d_map
def eval_results(
df,
target_mos='mos',
target_ci='mos_ci',
pred='mos_pred',
mapping=None,
do_print=False
):
'''
Evaluates a trained model on given dataset.
'''
# Loop through databases
db_results_df = []
df['y_hat_map'] = np.nan
# s = df.db.astype("category").cat.categories
for db_name in df.db.astype("category").cat.categories:
df_db = df.loc[df.db == db_name]
# per file -----------------------------------------------------------
y = df_db[target_mos].to_numpy()
if np.isnan(y).any():
r = {'r_p': np.nan, 'r_s': np.nan, 'rmse': np.nan, 'r_p_map': np.nan,
'r_s_map': np.nan, 'rmse_map': np.nan}
else:
y_hat = df_db[pred].to_numpy()
b, d = calc_mapping(
df_db,
mapping=mapping,
target_mos=target_mos,
target_ci=target_ci,
pred=pred
)
y_hat_map = calc_mapped(y_hat, b)
r = calc_eval_metrics(y, y_hat, y_hat_map=y_hat_map, d=d)
r.pop('rmse_star_map')
r = {f'{k}_file': v for k, v in r.items()}
if do_print and (not np.isnan(y).any()):
print('%-30s r_p_file: %0.2f, rmse_file: %0.2f, rmse_map_file: %0.2f'
% (db_name + ':', r['r_p_file'],r['rmse_file'], r['rmse_map_file']))
db_results_df.append({'db': db_name, **r})
# Save individual database results in DataFrame
db_results_df = pd.DataFrame(db_results_df)
r_average = {}
r_average['r_p_mean_file'] = db_results_df.r_p_file.mean()
r_average['rmse_mean_file'] = db_results_df.rmse_file.mean()
r_average['rmse_map_mean_file'] = db_results_df.rmse_map_file.mean()
y = df[target_mos].to_numpy()
y_hat = df[pred].to_numpy()
r_total_file = calc_eval_metrics(y, y_hat)
r_total_file = {'r_p_all': r_total_file['r_p'], 'rmse_all': r_total_file['rmse']}
overall_results = {
**r_total_file,
**r_average
}
return db_results_df, overall_results
def evaluate_mos(csv_mos,csv_mos_pre):
df = | pd.read_csv(csv_mos) | pandas.read_csv |
import pandas as pd
import numpy as np
from tensorflow import keras
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# instance of the neural network to predit future prices
class Neural_Network:
def neural_network(self, n_df):
df = n_df.copy()
df = df.replace('^\s*$', np.nan, regex=True)
#df['itemId'] = df['itemId'].astype(int)
df['listingType'] = pd.get_dummies(df['listingType'])
df['endPrice'] = df['endPrice'].astype(np.float)
df['shippingServiceCost'] = df['shippingServiceCost'].astype(np.float)
#df['shippingServiceCost'] = df['shippingServiceCost'].interpolate()
df['shippingServiceCost'] = df['shippingServiceCost'].fillna(df['shippingServiceCost'].mean())
df['bidCount'] = df['bidCount'].astype(np.float)
#df['bidCount'] = df['bidCount'].interpolate()
df['bidCount'] = df['bidCount'].fillna(df['bidCount'].mean())
df['watchCount'] = df['watchCount'].astype(np.float)
#df['watchCount'] = df['watchCount'].interpolate()
df['watchCount'] = df['watchCount'].fillna(df['watchCount'].mean())
df['returnsAccepted'] = | pd.get_dummies(df['returnsAccepted']) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = | tm.box_expected(expected, box) | pandas.util.testing.box_expected |
from datetime import datetime, timedelta
import pandas as pd
import pytest
from sklearn.datasets import load_breast_cancer
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import StratifiedKFold, TimeSeriesSplit
from TinyAutoML.builders import buildColumnTransformer, buildMetaPipeline
from TinyAutoML.support.MyTools import (
checkClassBalance,
getAdaptedCrossVal,
isIndexedByTime,
)
iris = load_breast_cancer()
X = pd.DataFrame(data=iris.data, columns=iris.feature_names)
y = iris.target
today = datetime.now()
time_df = pd.DataFrame(
{
"col1": [1, 2, 3],
"col2": [1, 2, 3],
"date": [today, today + timedelta(days=365), today + timedelta(days=700)],
}
).set_index("date")
df = | pd.DataFrame({"col1": [1, 2, 3], "col2": [1, 2, 3]}) | pandas.DataFrame |
# data processing
import numpy as np
import pandas as pd
# machine learning
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
from keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
# utils
import time
from datetime import timedelta
verbose=0
file_train='./datasets/titanic_train.csv'
file_test='./datasets/titanic_test.csv'
#define random seed for reproducibility
seed=69
np.random.seed(seed)
#read training data
train_df=pd.read_csv(file_train,index_col='PassengerId')
#show the columns
print(train_df.shape)
print(train_df.head())
#show that there is NaN data ,that needs to be handling during data cleansing
print(train_df.isnull().sum())
def prep_data(df):
#drop unwanted features
df=df.drop(['Name','Ticket','Cabin'],axis=1)
#fill missing data age and fare with the mean, embarked with most frequent value
df[['Age']]=df[['Age']].fillna(value=df[['Age']].mean())
df[['Fare']]=df[['Fare']].fillna(value=df[['Fare']].mean())
df[['Embarked']]=df[['Embarked']].fillna(value=df['Embarked'].value_counts().idxmax())
#convert categorical features into numeric
df['Sex']=df['Sex'].map({'female':1,'male':0}).astype(int)
#convert embarked one-hot
embarked_one_hot=pd.get_dummies(df['Embarked'],prefix='Embarked')
df=df.drop('Embarked',axis=1)
df=df.join(embarked_one_hot)
return df
train_df=prep_data(train_df)
print(train_df.isnull().sum())
#x contains all columns except 'Survived'
X=train_df.drop(['Survived'],axis=1).values.astype(float)
#it is almost always a good idea to perform some scaling of input values using neural network models
scale=StandardScaler()
X=scale.fit_transform(X)
#Y is just the 'Survived' column
Y=train_df['Survived'].values
n_cols=X.shape[1]
def create_model(optimizer='adam',init='uniform'):
if verbose:print("creating model with optimizer: %s; init:%s"%(optimizer,init))
model=Sequential()
model.add(Dense(16,input_shape=(n_cols,),kernel_initializer=init,activation='relu'))
model.add(Dense(8,kernel_initializer=init,activation='relu'))
model.add(Dense(4,kernel_initializer=init,activation='relu'))
model.add(Dense(1,kernel_initializer=init,activation='sigmoid'))
#compile model
model.compile(loss='mean_squared_error',optimizer=optimizer,metrics=['accuracy'])
return model
best_epochs=20
best_batch_size=1
best_init='glorot_uniform'
best_optimizer='rmsprop'
tensorBoard=TensorBoard(log_dir='./titanic/logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
model_pred=create_model()
#model_pred=KerasClassifier(build_fn=create_model,optimizer=best_optimizer,init=best_init,epochs=best_epochs,batch_size=best_batch_size,verbose=verbose)
model_pred.fit(X,Y,epochs=best_epochs,batch_size=best_batch_size,verbose=1,callbacks=[tensorBoard])
test_df= | pd.read_csv(file_test,index_col='PassengerId') | pandas.read_csv |
"""
This module implements methods to collect financial data from Wharton Research Services via the wrds package
"""
import datetime
import json
import re
import sys
import time
import warnings
from typing import Tuple
import numpy as np
import pandas as pd
import wrds
from colorama import Fore, Back, Style
from sklearn.preprocessing import StandardScaler
from config import *
# Configurations for displaying DataFrames
from core.utils import get_index_name, check_directory_for_file, Timer, lookup_multiple
pd.set_option('mode.chained_assignment', None)
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
def retrieve_index_history(index_id: str = None, from_file=False, last_n: int = None,
folder_path: str = '', generate_dict=False) -> pd.DataFrame:
"""
Download complete daily index history and return as Data Frame (no date index)
:return: DataFrame containing full index constituent data over full index history
:rtype: pd.DataFrame
"""
if not from_file:
# Load GVKEYX lookup dict
with open(os.path.join(ROOT_DIR, 'data', 'gvkeyx_name_dict.json'), 'r') as fp:
gvkeyx_lookup = json.load(fp)
# Establish database connection
print('Opening DB connection ...')
db = wrds.Connection(wrds_username='afecker')
print('Done')
# Retrieve list of all stocks (gvkeys) for specified index including full date range of historic index data
gvkey_list, relevant_date_range = get_all_constituents(
constituency_matrix=pd.read_csv(os.path.join(ROOT_DIR, folder_path, 'constituency_matrix.csv'), index_col=0,
header=[0, 1],
parse_dates=True))
# Set start and end date
if last_n:
start_date = str(relevant_date_range[-last_n].date())
else:
start_date = str(relevant_date_range[0].date())
end_date = str(relevant_date_range[-1].date())
# Specify list of companies and start and end date of query
parameters = {'company_codes': tuple(gvkey_list), 'start_date': start_date, 'end_date': end_date}
print('Querying full index history for index %s \n'
'between %s and %s ...' % (gvkeyx_lookup.get(index_id), start_date, end_date))
start_time = time.time()
data = get_data_table(db, sql_query=True,
query_string="select datadate, gvkey, iid, trfd, ajexdi, cshtrd, prccd, divd, conm, curcdd, sedol, exchg, gsubind "
"from comp.g_secd "
"where gvkey in %(company_codes)s and datadate between %(start_date)s and %(end_date)s "
"order by datadate asc",
index_col=['datadate', 'gvkey', 'iid'], table_info=1, params=parameters)
end_time = time.time()
print('Query duration: %g seconds' % (round(end_time - start_time, 2)))
print('Number of observations: %s' % data.shape[0])
print('Number of individual dates: %d' % data.index.get_level_values('datadate').drop_duplicates().size)
# JOB: Add return_index and daily_return columns
data = calculate_daily_return(data, save_to_file=False)
# Reset index
data.reset_index(inplace=True)
# Save to file
data.to_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'))
else:
data = pd.read_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents.csv'),
dtype={'gvkey': str, 'gsubind': str, 'datadate': str, 'gics_sector': str, 'gsector': str},
parse_dates=False, index_col=False)
data.loc[:, 'datadate'] = | pd.to_datetime(data.loc[:, 'datadate'], infer_datetime_format=True) | pandas.to_datetime |
'''
Group enabled ANPNetwork class and supporting classes.
'''
from pyanp.pairwise import Pairwise
from pyanp.prioritizer import Prioritizer, PriorityType
from pyanp.general import islist, unwrap_list, get_matrix, matrix_as_df
from typing import Union
import pandas as pd
from copy import deepcopy
from pyanp.limitmatrix import normalize, calculus, priority_from_limit
import numpy as np
import re
from pyanp.rating import Rating
class ANPNode:
'''
A node inside a cluster, inside a netowrk. The basic building block of
an ANP netowrk.
:param network: An ANPNetwork object that this node lives inside.
:param cluster: An ANPCluster object that this node lives inside.
:param name: The name of this node.
'''
def __init__(self, network, cluster, name:str):
self.name = name
self.cluster = cluster
self.network = network
self.node_prioritizers = {}
self.subnetwork = None
self.invert = False
def is_node_cluster_connection(self, dest_cluster:str)->bool:
'''
Is this node connected to a cluster.
:param dest_cluster: The name of the cluster
:return: True/False
'''
if dest_cluster in self.node_prioritizers:
return True
else:
return False
def node_connect(self, dest_node)->None:
''''
Make a node connection from this node to dest_node
:param dest_node: The destination node as a str, int, or ANPNode. It
can be a list of nodes, and then we will coonect each node from
this node. The dest_node should be in any format accepted by
ANPNetwork._get_node()
'''
if islist(dest_node):
for dn in dest_node:
self.node_connect(dn)
else:
prioritizer = self.get_node_prioritizer(dest_node, create=True)
prioritizer.add_alt(dest_node, ignore_existing=True)
#Make sure parent clusters are connected
src_cluster = self.cluster
dest_cluster = self.network._get_node_cluster(dest_node)
src_cluster.cluster_connect(dest_cluster)
def get_node_prioritizer(self, dest_node, create=False,
create_class=Pairwise, dest_is_cluster=False)->Prioritizer:
'''
Gets the node prioritizer for the other_node
:param dest_node: The node as a int, str, or ANPNode object.
:return: The prioritizer if it exists, or None
'''
if dest_is_cluster:
dest_cluster = self.network.cluster_obj(dest_node)
dest_name = dest_cluster.name
else:
dest_cluster = self.network._get_node_cluster(dest_node)
dest_name = dest_cluster.name
if dest_name not in self.node_prioritizers:
if create:
prioritizer = create_class()
self.node_prioritizers[dest_name] = prioritizer
return prioritizer
else:
return None
else:
return self.node_prioritizers[dest_name]
def is_node_node_connection(self, dest_node)->bool:
'''
Checks if there is a node connection from this node to dest_node
:param dest_node: The node as a int, str, or ANPNode object.
:return:
'''
pri = self.get_node_prioritizer(dest_node)
if pri is None:
return False
elif not pri.is_alt(dest_node):
return False
else:
return True
def get_unscaled_column(self, username=None)->pd.Series:
'''
Returns the column in the unscaled supermatrix for this node.
:param username: The user/users to do this for. Typical Prioritizer
calculation usage, i.e. None means do for all group average.
:return: A pandas series indexed by the node names.
'''
nnodes = self.network.nnodes()
rval = pd.Series(data=[0.0]*nnodes, index=self.network.node_names())
prioritizer:Prioritizer
for prioritizer in self.node_prioritizers.values():
vals = prioritizer.priority(username, PriorityType.NORMALIZE)
for alt, val in vals.iteritems():
rval[alt] = val
return rval
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
pri:Prioritizer
for pri in self.node_prioritizers.values():
pri.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def set_node_prioritizer_type(self, destNode, prioritizer_class):
'''
Sets the node prioritizer type
:param destNode: An ANPNode object, string, or integer location
:param prioritizer_class: The new type
:return: None
'''
pri = self.get_node_prioritizer(destNode, create_class=prioritizer_class)
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
dest_cluster = self.network._get_node_cluster(destNode)
dest_name = dest_cluster.name
self.node_prioritizers[dest_name] = rval
else:
pass
class ANPCluster:
'''
A cluster in an ANP object
:param network: The ANPNetowrk object this cluster is in.
:param name: The name of the cluster to create.
'''
def __init__(self, network, name:str):
self.prioritizer = Pairwise()
self.name = name
self.network = network
# The list of ANP nodes in this cluster
self.nodes = {}
def add_node(self, *nodes)->None:
"""
Adds one or more nodes
:param nodes: A vararg list of node names to add to this cluster.
The names should all be strings.
:return: Nonthing
"""
nodes = unwrap_list(nodes)
if islist(nodes):
for node in nodes:
if isinstance(node, str):
self.add_node(node)
else:
self.nodes[nodes] = ANPNode(self.network, self, nodes)
def nnodes(self)->int:
"""
:return: The number of nodes in this cluster.
"""
return len(self.nodes)
def is_node(self, node_name:str)->bool:
'''
Does a node by that name exist in this cluster
:param node_name: The name of the node to look for
:return: True/False
'''
return node_name in self.nodes
def node_obj(self, node_name):
"""
Get a node in this cluster.
:param node_name: The node as either a string name, integer position, or
simply the ANPObject, in which case there is nothing to do except
return it.
:return: ANPNode object. If it wasn't found, None is returned.
"""
if isinstance(node_name, ANPNode):
return node_name
else:
return get_item(self.nodes, node_name)
def node_names(self)->list:
'''
:return: List of the string names of the nodes in this cluster
'''
return list(self.nodes.keys())
def node_objs(self)->list:
'''
:return: List of the ANPNode objects in this cluster.
'''
return self.nodes.values()
def cluster_connect(self, dest_cluster)->None:
"""
Make a cluster->cluster connection from this node to the destination.
:param dest_cluster: Either the ANPCluster object to connect to, or
the name of the destination cluster.
:return:
"""
if isinstance(dest_cluster, ANPCluster):
dest_cluster_name = dest_cluster.name
else:
dest_cluster_name = dest_cluster
self.prioritizer.add_alt(dest_cluster_name, ignore_existing=True)
def set_prioritizer_type(self, prioritizer_class)->None:
'''
Sets the cluster prioritizer type
:param prioritizer_class: The new type
:return: None
'''
pri = self.prioritizer
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
self.prioritizer = rval
else:
pass
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
if self.prioritizer is not None:
self.prioritizer.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def get_item(tbl:dict, key):
"""
Looks up an item in a dictionary by key first, assuming the key is in the
dictionary. Otherwise, it checks if the key is an integer, and returns
the item in that position.
:param tbl: The dictionary to look in
:param key: The key, or integer position to get the item of
:return: The item, or it not found, None
"""
if key in tbl:
return tbl[key]
elif not isinstance(key, int):
return None
# We have an integer key by this point
if key < 0:
return None
elif key >= len(tbl):
return None
else:
count = 0
for rval in tbl.values():
if count == key:
return rval
count+=1
#Should never make it here
raise ValueError("Shouldn't happen in anp.get_item")
__CLEAN_SPACES_RE = re.compile('\\s+')
def clean_name(name:str)->str:
"""
Cleans up a string for usage by:
1. stripping off begging and ending spaces
2. All spaces convert to one space
3. \t and \n are treated like a space
:param name: The string name to be cleaned
:return: The cleaned name.
"""
rval = name.strip()
return __CLEAN_SPACES_RE.sub(string=rval, repl=' ')
def sum_subnetwork_formula(priorities:pd.Series, dict_of_series:dict):
"""
A function that takes the weighted sum of values. Used for synthesis.
:param priorities: Series whose index are the nodes with subnetworks and
values are their weights.
:param dict_of_series: A dictionary whose keys are the same as the keys of
priorities, i.e. the nodes with subnetworks. The values are Series
whose keys are alternative names and values are the synthesized
alternative scores under that subnetwork.
:return:
"""
subpriorities = priorities[dict_of_series.keys()]
if sum(subpriorities) != 0:
subpriorities /= sum(subpriorities)
rval = pd.Series()
counts = pd.Series(dtype=int)
for subnet_name, vals in dict_of_series.items():
priority = subpriorities[subnet_name]
for alt_name, val in vals.iteritems():
if alt_name in rval:
rval[alt_name] += val * priority
counts[alt_name] += priority
else:
rval[alt_name] = val
counts[alt_name] = priority
# Now let's calculate the averages
for alt_name, val in rval.iteritems():
if counts[alt_name] > 0:
rval[alt_name] /= counts[alt_name]
return rval
class ANPNetwork(Prioritizer):
'''
Represents an ANP prioritizer. Has clusters/nodes, comparisons, etc.
:param create_alts_cluster: If True (which is the default) we start with a
cluster that is the alternatives cluster. Otherwise the model starts
empty.
'''
def __init__(self, create_alts_cluster=True):
self.clusters = {}
if create_alts_cluster:
cl = self.add_cluster("Alternatives")
self.alts_cluster = cl
self.users=[]
self.limitcalc = calculus
self.subnet_formula = sum_subnetwork_formula
self.default_priority_type = None
def add_cluster(self, *args)->ANPCluster:
'''
Adds one or more clusters to a network
:param args: Can be either a single string, or a list of strings
:return: ANPCluster object or list of ANPCluster objects
'''
clusters = unwrap_list(args)
if islist(clusters):
rval = []
for cl in clusters:
rval.append(self.add_cluster(cl))
return rval
else:
#Adding a single cluster
cl = ANPCluster(self, clusters)
self.clusters[clusters] = cl
return cl
def cluster_names(self)->list:
'''
:return: List of string names of the clusters
'''
return list(self.clusters.keys())
def nclusters(self)->int:
'''
:return: The number of clusters in the network.
'''
return len(self.clusters)
def cluster_obj(self, cluster_info:Union[ANPCluster, str])->ANPCluster:
'''
Returns the cluster with given information
:param cluster_info: Either the name of the cluster object to get
or the cluster object, or its int position
:return: The ANPCluster object
'''
if isinstance(cluster_info, ANPCluster):
return cluster_info
else:
return get_item(self.clusters, cluster_info)
def add_node(self, cl, *nodes):
'''
Adds nodes to a cluster
:param cl: The cluster name or object
:param nodes: The name or names of the nodes
:return: Nothing
'''
cluster = self.cluster_obj(cl)
cluster.add_node(nodes)
def nnodes(self, cluster=None)->int:
"""
Returns the number of nodes in the network, or a cluster.
:param cluster: If None, we return the number of nodes in the network.
Otherwise this is the integer position, string name, or ANPCluster
object of the cluster to get the node count within.
:return: The count.
"""
if cluster is None:
rval = pd.Series()
for cname, cluster in self.clusters.items():
rval[cname] = cluster.nnodes()
return sum(rval)
else:
clobj = self.cluster_obj(cluster)
return clobj.nnodes()
def add_alt(self, alt_name:str):
"""
Adds an alternative to the model:
1. Adds the altenrative to alts_cluster if not None
2. For each node with a subnetwork, we add the alternative to that subnetwork.
:param alt_name: The name of the alternative to add
:return: Nothing
"""
if self.alts_cluster is not None:
self.add_node(self.alts_cluster, alt_name)
# We should add this alternative to each subnetwork
for node in self.node_objs_with_subnet():
node.subnetwork.add_alt(alt_name)
def is_user(self, uname)->bool:
'''
Checks if a user exists
:param uname: The name of the user to check for
:return: bool
'''
return uname in self.users
def is_alt(self, altname)->bool:
'''
Checks if an alternative exists
:param altname: The alterantive name to look for
:return: bool
'''
return self.alts_cluster.is_node(altname)
def add_user(self, uname, ignore_dupe=False):
'''
Adds a user to the system
:param uname: The name of the new user
:return: Nothing
:raise ValueError If the user already existed
'''
if islist(uname):
for un in uname:
self.add_user(un, ignore_dupe=ignore_dupe)
return
if self.is_user(uname):
if not ignore_dupe:
raise ValueError("User by the name "+uname+" already existed")
else:
return
self.users.append(uname)
def nusers(self)->int:
'''
:return: The number of users
'''
return len(self.users)
def user_names(self)->list:
'''
:return: List of names of the users
'''
return deepcopy(self.users)
def node_obj(self, node_name)->ANPNode:
'''
Gets the ANPNode object of the node with the given name
:param node_name: The name of the node to get, or it's overall integer
position, or the ANPNode object itself
:return: The ANPNode if it exists, or None
'''
if isinstance(node_name, ANPNode):
return node_name
elif isinstance(node_name, int):
#Reference by integer
node_pos = node_name
node_count = 0
for cluster in self.clusters.values():
rel_pos = node_pos - node_count
if rel_pos < cluster.nnodes():
return cluster.node_obj(rel_pos)
#If we make it here, we were out of bounds
return None
#Okay handle string node name
cluster: ANPCluster
for cname, cluster in self.clusters.items():
rval = cluster.node_obj(node_name)
if rval is not None:
return rval
#Made it here, the node didn't exist
return None
def _get_node_cluster(self, node)->ANPCluster:
'''
Gets the ANPCluster object a node lives in
:param node: The name/integer positions, or ANPNode object itself. See
node_obj() method for more details.
:return: The ANPCluster object this node lives in, or None if it doesn't
exist.
'''
n = self.node_obj(node)
if n is None:
# Could not find the node
return None
return n.cluster
def node_connect(self, src_node, dest_node):
'''
connects 2 nodes
:param src_node: Source node as prescribed by node_object() function
:param dest_node: Destination node as prescribed by node_object() function
:return: Nothing
'''
src = self.node_obj(src_node)
src.node_connect(dest_node)
def node_names(self, cluster=None)->list:
'''
Returns a list of nodes in this network, organized by cluster
:param cluster: If None, we get all nodes in network, else we get nodes
in that cluster, otherwise format as specified by cluster_obj() function.
:return: List of strs of node names
'''
if cluster is not None:
cl = self.cluster_obj(cluster)
return cl.node_names()
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_names()
for name in cnodes:
rval.append(name)
return rval
def node_objs(self)->list:
'''
Returns a list of ANPNodes in this network, organized by cluster
:return: List of strs of node names
'''
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_objs()
for name in cnodes:
rval.append(name)
return rval
def cluster_objs(self)->list:
"""
:return: List of ANPCluster objects in the network
"""
return list(self.clusters.values())
def node_connections(self)->np.ndarray:
"""
Returns the node conneciton matrix for this network.
:return: A numpy array of shape [nnode, nnodes] where item [row, col]
1 means there is a node connection from col -> row, and 0 means
no connection.
"""
nnodes = self.nnodes()
nnames = self.node_names()
rval = np.zeros([nnodes, nnodes])
src_node:ANPNode
for src in range(nnodes):
srcname = nnames[src]
src_node = self.node_obj(srcname)
for dest in range(nnodes):
dest_name = nnames[dest]
if src_node.is_node_node_connection(dest_name):
rval[dest,src]=1
return rval
def unscaled_supermatrix(self, username=None, as_df=False)->np.array:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The unscaled supermatrix as a numpy.array of shape [nnode, nnodes]
'''
nnodes = self.nnodes()
rval = np.zeros([nnodes, nnodes])
nodes = self.node_objs()
col = 0
node:ANPNode
for node in nodes:
rval[:,col] = node.get_unscaled_column(username)
col += 1
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def scaled_supermatrix(self, username=None, as_df=False)->np.ndarray:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The scaled supermatrix
'''
rval = self.unscaled_supermatrix(username)
# Now I need to normalized by cluster weights
clusters = self.cluster_objs()
nclusters = len(clusters)
col = 0
for col_cp in range(nclusters):
col_cluster:ANPCluster = clusters[col_cp]
row_nnodes = col_cluster.nnodes()
cluster_pris = col_cluster.prioritizer.priority(username, PriorityType.NORMALIZE)
row_offset = 0
for col_node in col_cluster.node_objs():
row=0
for row_cp in range(nclusters):
row_cluster:ANPCluster = clusters[row_cp]
row_cluster_name = row_cluster.name
if row_cluster_name in cluster_pris:
priority = cluster_pris[row_cluster_name]
else:
priority = 0
for row_node in row_cluster.node_objs():
rval[row, col] *= priority
row += 1
col += 1
normalize(rval, inplace=True)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def global_priority(self, username=None)->pd.Series:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:return: The global priorities Series, index by node name
'''
lm = self.limit_matrix(username)
rval = priority_from_limit(lm)
node_names = self.node_names()
return pd.Series(data=rval, index=node_names)
def global_priority_df(self, user_infos=None)->pd.DataFrame:
'''
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: The global priorities dataframe. Rows are the nodes and
columns are the users. The first user/column is the Group Average
'''
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
for user in user_infos:
if user is None:
uname = "Group Average"
else:
uname = user
rval[uname] = self.global_priority(user)
return rval
def limit_matrix(self, username=None, as_df=False):
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The limit supermatrix
'''
sm = self.scaled_supermatrix(username)
rval = self.limitcalc(sm)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def alt_names(self)->list:
'''
:return: List of alt names in this ANP model
'''
if self.has_subnet():
# We have some v1 subnetworks, we get alternative names by looking
# there.
rval = []
node: ANPNode
for node in self.node_objs_with_subnet():
alts = node.subnetwork.alt_names()
for alt in alts:
if alt not in rval:
rval.append(alt)
return rval
else:
return self.alts_cluster.node_names()
def priority(self, username=None, ptype:PriorityType=None)->pd.Series:
'''
Synthesize and return the alternative scores
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param ptype: The priority type to use
:return: A pandas.Series indexed on alt names, values are the score
'''
if ptype is None:
# Use the default priority type for this network
ptype = self.default_priority_type
if self.has_subnet():
# Need to synthesize using subnetworks
return self.subnet_synthesize(username=username, ptype=ptype)
else:
gp = self.global_priority(username)
alt_names = self.alt_names()
rval = gp[alt_names]
if sum(rval) != 0:
rval /= sum(rval)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def data_names(self):
'''
Returns the column headers needed to fill in the data for this model
:return: A list of strings that would be usable in excel for parsing
headers
'''
node:ANPNode
rval = []
cluster: ANPCluster
for cluster in self.cluster_objs():
cluster.data_names(rval)
for node in self.node_objs():
node.data_names(rval)
return rval
def node_connection_matrix(self, new_mat:np.ndarray=None):
'''
Returns the current node conneciton matrix if new_mat is None.
Otherwise, for each item [row, col] in the matrix with a value of 1
we connect from node[row] to node[col].
:param new_mat: The new node connection matrix. If None, we return
the current one.
:return: Current connection matrix.
'''
src_node:ANPNode
nnodes = self.nnodes()
nodes = self.node_objs()
node_names = self.node_names()
if new_mat is not None:
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if new_mat[dest_node_pos, src_node_pos] != 0:
src_node.node_connect(node_names[dest_node_pos])
rval = np.zeros([nnodes, nnodes])
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if src_node.is_node_node_connection(node_names[dest_node_pos]):
rval[dest_node_pos, src_node_pos] = 1
return rval
def import_pw_series(self, series:pd.Series)->None:
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer (or cluster).
The name should be A vs B wrt C, where A, B, C are node or cluster names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
wrtNode:ANPNode
wrtNode = self.node_obj(wrt)
info = info[0].split( ' vs ')
if len(info) < 2:
raise ValueError(" vs was not present in "+name)
row, col = info
rowNode = self.node_obj(row)
colNode = self.node_obj(col)
npri: Pairwise
if (wrtNode is not None) and (rowNode is not None) and (colNode is not None):
# Node pairwise
npri = wrtNode.get_node_prioritizer(rowNode, create=True)
#print("Node comparison "+name)
if not isinstance(npri, Pairwise):
raise ValueError("Node prioritizer was not pairwise")
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
else:
# Try cluster pairwise
wrtcluster = self.cluster_obj(wrt)
rowcluster = self.cluster_obj(row)
colcluster = self.cluster_obj(col)
if wrtcluster is None:
raise ValueError("wrt="+wrt+" was not a cluster, and the group was not a node comparison")
if rowcluster is None:
raise ValueError("row="+row+" was not a cluster, and the group was not a node comparison")
if colcluster is None:
raise ValueError("col="+col+" was not a cluster, and the group was not a node comparison")
npri = self.cluster_prioritizer(wrtcluster)
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
#print("Cluster comparison "+name)
def set_alts_cluster(self, new_cluster):
'''
Sets the new alternatives cluster
:param new_cluster: Cluster specified as cluster_obj() expects.
:return: Nothing
'''
cl = self.cluster_obj(new_cluster)
self.alts_cluster = cl
def import_rating_series(self, series:pd.Series):
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer as ratings (or cluster).
Title should be A wrt B, where A and B are either both node names or
both column names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
dest = info[0].strip()
wrtNode:ANPNode
destNode:ANPNode
wrtNode = self.node_obj(wrt)
destNode = self.node_obj(dest)
npri:Rating
if (wrtNode is not None) and (destNode is not None):
# Node ratings
npri = wrtNode.get_node_prioritizer(destNode, create=True, create_class=Rating)
if not isinstance(npri, Rating):
wrtNode.set_node_prioritizer_type(destNode, Rating)
npri = wrtNode.get_node_prioritizer(destNode, create=True)
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
else:
# Trying cluster ratings
wrtcluster = self.cluster_obj(wrt)
destcluster = self.cluster_obj(dest)
if wrtcluster is None:
raise ValueError("Ratings: wrt is not a cluster wrt="+wrt+" and wasn't a node either")
if destcluster is None:
raise ValueError("Ratings: dest is not a cluster dest="+dest+" and wasn't a node either")
npri = wrtcluster.prioritizer
if not isinstance(npri, Rating):
wrtcluster.set_prioritizer_type(Rating)
npri = wrtcluster.prioritizer
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
def node_prioritizer(self, wrtnode=None, cluster=None):
'''
Gets the prioritizer for node->cluster connection
:param wrtnode: The node as understood by node_obj() function.
:param cluster: Cluster as understood by cluster_obj() function.
:return: If both wrtnode and cluster are specified, a single node prioritizer
is returned for that comparison (or None if there was nothing there).
Otherwise it returns a dictionary indexed by [wrtnode, cluster] and
whose values are the prioritizers for that (only the non-None ones).
'''
if wrtnode is not None and cluster is not None:
node = self.node_obj(wrtnode)
cl_obj = self.cluster_obj(cluster)
cluster_name = cl_obj.name
return node.get_node_prioritizer(dest_node=cluster_name, dest_is_cluster=True)
elif wrtnode is not None:
# Have wrtnode, do not have cluster
rval = {}
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
elif cluster is not None:
# Have cluster, but not wrtnode
rval = {}
for wrtnode in self.node_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
else:
# Both wrtnode and cluster are none, want all
rval = {}
for wrtnode in self.node_names():
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
def subnet(self, wrtnode):
'''
Makes wrtnode have a subnetwork if it did not already.
:param wrtnode: The node to give a subnetwork to, or get the subnetwork
of. Node specified as node_obj() function expects.
:return: The ANPNetwork that is the subnet of this node
'''
node = self.node_obj(wrtnode)
if node.subnetwork is not None:
return node.subnetwork
else:
rval = ANPNetwork(create_alts_cluster=False)
node.subnetwork = rval
rval.default_priority_type = PriorityType.IDEALIZE
return rval
def node_invert(self, node, value=None):
'''
Either sets, or tells if a node is inverted
:param node: The node to do this on, as expected by node_obj() function
:param value: If None, we return the boolean about if this node is
inverted. Otherwise specifies the new value.
:return: T/F if value=None, telling if the node is inverted. Otherwise
returns nothing.
'''
node = self.node_obj(node)
if value is None:
return node.invert
else:
node.invert = value
def has_subnet(self)->bool:
'''
:return: True/False telling if some node had a subentwork
'''
for node in self.node_objs():
if node.subnetwork is not None:
return True
return False
def subnet_synthesize(self, username=None, ptype:PriorityType=None):
'''
Does the standard V1 subnetowrk synthesis.
:param username: The user/users to synthesize for. If None, we group
synthesize across all. If a single user, we sythesize for that user
across all. If it is a list, we synthesize for the group that is that
list of users.
:return: Nothing
'''
# First we need our global priorities
pris = self.global_priority(username)
# Next we need the alternative priorities from each subnetwork
subnets = {}
node:ANPNode
for node in self.node_objs_with_subnet():
p = node.subnetwork.priority(username, ptype)
if node.invert:
p = self.invert_priority(p)
subnets[node.name]=p
rval = self.synthesize_combine(pris, subnets)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def node_objs_with_subnet(self):
"""
:return: List of ANPNode objects in this network that have v1 subnets
"""
return [node for node in self.node_objs() if node.subnetwork is not None]
def invert_priority(self, p):
"""
Makes a copy of the list like element p, and inverts. The current
standard inversion is 1-p. There could be others implemented later.
:param p: The list like to invert
:return: New list-like of same type as p, with inverted priorities
"""
rval = deepcopy(p)
for i in range(len(p)):
rval[i] = 1 - rval[i]
return rval
def synthesize_combine(self, priorities:pd.Series, alt_scores:dict):
"""
Performs the actual sythesis step from anp v1 synthesis.
:param priorities: Priorities of the subnetworks
:param alt_scores: Alt scores as dictionary, keys are subnetwork names
values are Series whose keys are alt names.
:return: Series whose keys are alt names, and whose values are the
synthesized scores.
"""
return self.subnet_formula(priorities, alt_scores)
def cluster_prioritizer(self, wrtcluster=None):
"""
Gets the prioritizer for the clusters wrt a given cluster.
:param wrtcluster: WRT cluster identifier as expected by cluster_obj() function.
If None, then we return a dictionary indexed by cluster names and values
are the prioritizers
:return: THe prioritizer for that cluster, or a dictionary of all cluster
prioritizers
"""
if wrtcluster is not None:
cluster = self.cluster_obj(wrtcluster)
return cluster.prioritizer
else:
rval = {}
for cluster in self.cluster_objs():
rval[cluster.name] = cluster.prioritizer
return rval
def to_excel(self, fname):
struct = pd.DataFrame()
cluster:ANPCluster
writer = pd.ExcelWriter(fname, engine='openpyxl')
for cluster in self.cluster_objs():
cluster_name = cluster.name
if cluster == self.alts_cluster:
cluster_name = "*"+str(cluster_name)
struct[cluster_name] = cluster.node_names()
struct.to_excel(writer, sheet_name="struct", index=False)
# Now the node connections
mat = self.node_connection_matrix()
pd.DataFrame(mat).to_excel(writer, sheet_name="connection", index=False, header=False)
# Lastly let's write just the comparison structure
cmp = self.data_names()
pd.DataFrame({"":cmp}).to_excel(writer, sheet_name="votes", index=False, header=True)
writer.save()
writer.close()
def cluster_incon_std_df(self, user_infos=None) -> pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are clusters, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
# We need the name for the group (i.e. None) to be something useful)
for cluster, pw in self.cluster_prioritizer().items():
if isinstance(pw, Pairwise):
incon = [pw.incon_std(user) for user in user_infos]
rval[cluster] = pd.Series(incon, index=user_infos)
if None in rval.index:
rval = rval.rename(
lambda x: x if x is not None else "Group Average")
return rval
def node_incon_std_df(self, user_infos=None)->pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are (node,cluster) pairs, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
# We need the name for the group (i.e. None) to be something useful)
for info, pw in self.node_prioritizer().items():
if isinstance(pw, Pairwise):
incon = [pw.incon_std(user) for user in user_infos]
rval[info] = pd.Series(incon, index=user_infos)
if None in rval.index:
rval = rval.rename(lambda x: x if x is not None else "Group Average")
return rval
def set_pairwise_from_supermatrix(self, mat, username="Imported"):
"""
Sets up all pairwise comparisons from supermatrix
:param mat: As numpy array
:return: Nothing
"""
node_names = self.node_names()
nnodes = len(node_names)
## Handle node pairwise comparisons first
for wrtnode_pos in range(nnodes):
wrtnode = node_names[wrtnode_pos]
offset=0
cluster_offsets = []
for cluster in self.cluster_names():
cluster_nodes = self.node_names(cluster)
npri:Pairwise
npri = self.node_prioritizer(wrtnode, cluster)
if npri is not None and isinstance(npri, Pairwise):
nclusternodes=len(cluster_nodes)
for node_row_pos in range(nclusternodes):
for node_col_pos in range(node_row_pos+1, nclusternodes):
rownode = cluster_nodes[node_row_pos]
colnode = cluster_nodes[node_col_pos]
vr = mat[offset+node_row_pos, wrtnode_pos]
vc = mat[offset+node_col_pos, wrtnode_pos]
#print("wrt="+wrtnode+" "+str(vr)+", "+str(vc)+": "+rownode+", "+colnode)
if vr!=0 and vc!= 0:
val = vr/vc
npri.vote(username, rownode, colnode, val, createUnknownUser=True)
cluster_offsets.append(range(offset, offset+len(cluster_nodes)))
offset+=len(cluster_nodes)
## Handle cluster pairwise comparisons now
cluster_names = self.cluster_names()
nclusters = len(cluster_names)
for wrt_cluster_pos in range(nclusters):
node_range = cluster_offsets[wrt_cluster_pos]
matrix_cols:np.ndarray
matrix_cols = mat[:,node_range]
avg_cols = matrix_cols.mean(axis=1)
cluster_pris = np.array([0.0]*nclusters)
for other_cluster_pos in range(nclusters):
cluster_pris[other_cluster_pos]=0
for node_pos in cluster_offsets[other_cluster_pos]:
cluster_pris[other_cluster_pos]+=avg_cols[node_pos]
#Now we have cluster priorities, now we can compare
cpri:Pairwise
cpri = self.cluster_obj(wrt_cluster_pos).prioritizer
for row_cluster_pos in range(nclusters):
for col_cluster_pos in range(row_cluster_pos+1, nclusters):
rowcluster = cluster_names[row_cluster_pos]
colcluster = cluster_names[col_cluster_pos]
vr = cluster_pris[row_cluster_pos]
vc = cluster_pris[col_cluster_pos]
if vr!=0 and vc!=0:
val = vr/vc
cpri.vote(username, rowcluster, colcluster, val, createUnknownUser=True)
def unscaled_structurematrix(self, username=None, as_df=False, add_self_connections=False):
rval = self.unscaled_supermatrix(username=username)
for row in rval:
for i in range(len(row)):
if row[i] != 0:
row[i] = 1
if add_self_connections:
for i in range(len(rval)):
row = rval[i]
if len(row) > i:
row[i] = 1
return rval
def scaled_structurematrix(self, username=None, as_df=False):
rval = self.unscaled_structurematrix(username=username, as_df=False)
normalize(rval, inplace=True)
return self._node_matrix_as_df(rval, as_df)
def limit_structurematrix(self, username=None, as_df=False):
rval = self.scaled_structurematrix(username=username, as_df=as_df)
rval = self.limitcalc(rval)
return self._node_matrix_as_df(rval, as_df)
def structure_global_priority(self, username=None):
lm = self.limit_structurematrix(username)
rval = priority_from_limit(lm)
node_names = self.node_names()
return pd.Series(data=rval, index=node_names)
def _node_matrix_as_df(self, matrix, as_df=False):
if not as_df:
return matrix
else:
return matrix_as_df(matrix, self.node_names())
def structure_priority(self, username=None, ptype:PriorityType=None, alt_names=None)->pd.Series:
'''
'''
if ptype is None:
# Use the default priority type for this network
ptype = self.default_priority_type
gp = self.structure_global_priority(username)
if alt_names is None:
alt_names = self.alt_names()
rval = gp[alt_names]
if sum(rval) != 0:
rval /= sum(rval)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def structure_cluster_priority(self, username=None, ptype:PriorityType=None, mean=False)->pd.Series:
gp = self.structure_global_priority(username)
cluster_names = self.cluster_names()
nclusters = self.nclusters()
rval = pd.Series(data=[0.0]*nclusters, index=cluster_names)
for cluster in cluster_names:
count=0
for node in self.node_names(cluster):
rval[cluster]+=gp[node]
count+=1
if mean and count > 0:
rval[cluster]/=count
return rval
__PW_COL_REGEX = re.compile('\\s+vs\\s+.+\\s+wrt\\s+')
def is_pw_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a pairwise
comparison, i.e. A vs B wrt C
:param col: The title of the column to check
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
else:
return __PW_COL_REGEX.search(col) is not None
__RATING_COL_REGEX = re.compile('\\s+wrt\\s+')
def is_rating_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a rating
column of data, i.e. A wrt B
:param col: The name of the column
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
elif is_pw_col_name(col):
return False
else:
return __RATING_COL_REGEX.search(col) is not None
def anp_manual_scales_from_excel(anp:ANPNetwork, excel_fname):
"""
Parses manual rating scales from an Excel file
:param anp: The model to put the scale values in.
:param excel_fname: The string file name of the excel file with the data
:return: Nothing
"""
xl = | pd.ExcelFile(excel_fname) | pandas.ExcelFile |
import os
import tempfile
import numpy as np
import pandas as pd
from os import path
from os.path import dirname
from autosubsync import find_transform
from autosubsync import quality_of_fit
from autosubsync import features, model
from autosubsync.preprocessing import import_sound
from sklearn.model_selection import train_test_split
def load_features():
basepath = dirname(path.dirname(__file__))
labels = ['1', '0']
all_x = []
all_y = []
for labelVal in labels:
training_audio__dir_path = os.path.join(basepath, 'audioFiles', labelVal)
for filename in os.listdir(training_audio__dir_path):
if filename.endswith(".flac") or filename.endswith(".wav"):
input_audio_file = os.path.join(training_audio__dir_path, filename)
sound_data = import_sound(input_audio_file)
training_x = features.compute_train_features(sound_data)
training_y = np.full(shape=(len(training_x), 1), fill_value=labelVal, dtype=np.int)
training_y = np.hstack(training_y)
all_x.append(training_x)
all_y.append(training_y)
all_x = np.vstack(all_x)
all_y = np.hstack(all_y)
# np.save('spleeter_features_file_X_0519.npy', all_x)
# np.save('spleeter_features_file_Y_0519.npy', all_y)
return all_x, all_y
def cv_split_by_file(data_meta, data_x):
files = np.unique(data_meta.file_number)
np.random.shuffle(files)
n_train = int(round(len(files)*0.5))
train_files = files[:n_train]
print(train_files)
train_cols = data_meta.file_number.isin(train_files)
test_cols = ~train_cols
return data_meta[train_cols], data_x[train_cols,:], data_meta[test_cols], data_x[test_cols,:]
def validate_speech_detection(result_meta):
print('---- speech detection accuracy ----')
r = result_meta.groupby('file_number').agg('mean')
print(r)
from sklearn.metrics import roc_auc_score
print('AUC-ROC:', roc_auc_score(result_meta.label, result_meta.predicted_score))
return r
def test_correct_sync(result_meta, bias=0):
print('---- synchronization accuracy ----')
results = []
for unique_label in np.unique(result_meta.label):
part = result_meta[result_meta.label == unique_label]
skew, shift, quality = find_transform.find_transform_parameters(part.label, part.predicted_score, bias=bias)
skew_error = skew != 1.0
results.append([skew_error, shift, quality])
sync_results = pd.DataFrame(np.array(results), columns=['skew_error', 'shift_error', 'quality'])
print(sync_results)
print('skew errors:', sync_results.skew_error.sum())
print('shift RMSE:', np.sqrt(np.mean(sync_results.shift_error**2)))
return sync_results
if __name__ == '__main__':
# data_x, data_y = load_features()
data_x = np.load('spleeter_features_file_X_0519.npy', allow_pickle=True)
data_y = np.load('spleeter_features_file_Y_0519.npy', allow_pickle=True)
print('loaded training features of size', data_x.shape)
n_folds = 4
np.random.seed(1)
sync_results = []
for i in range(n_folds):
print('### Cross-validation fold %d/%d' % (i+1, n_folds))
X_train, X_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.20, random_state=42)
print('Training...', X_train.shape)
trained_model = model.train_with_spleeter_output(X_train, y_train)
# save some memory
del X_train
del y_train
# test serialization
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, 'model.bin')
print('testing serialization in temp file', tmp_file)
model.save(trained_model, tmp_file)
trained_model = model.load(tmp_file)
print('Validating...')
predicted_score = model.predict(trained_model, X_test)
predicted_label = np.round(predicted_score)
correct = predicted_label == y_test
result_meta = pd.DataFrame(np.array([y_test, predicted_score, predicted_label, correct]).T,
columns=['label', 'predicted_score', 'predicted_label', 'correct'])
result_meta['label'] = result_meta['label'].astype(float)
from sklearn.metrics import roc_auc_score
print('AUC-ROC:', roc_auc_score(y_test, predicted_label))
bias = trained_model[1]
r = result_meta.groupby('label').agg('mean')
sync_r = test_correct_sync(result_meta, bias)
sync_results.append(sync_r.assign(speech_detection_accuracy=list(r.correct)))
sync_results = | pd.concat(sync_results) | pandas.concat |
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
r"""Submodule frequentist_statistics.py includes the following functions: <br>
- **normal_check():** compare the distribution of numeric variables to a normal distribution using the
Kolmogrov-Smirnov test <br>
- **correlation_analysis():** Run correlations for numerical features and return output in different formats <br>
- **correlations_as_sample_increases():** Run correlations for subparts of the data to check robustness <br>
- **multiple_univariate_OLSs():** Tmp <br>
- **potential_for_change_index():** Calculate the potential for change index based on either variants of the r-squared
(from linear regression) or the r-value (pearson correlation) <br>
- **correct_pvalues():** function to correct for multiple testing <br>
- **partial_correlation():** function to calculate the partial correlations whilst correcting for other variables <br>
"""
from itertools import combinations
from itertools import product
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from matplotlib.lines import Line2D
from scipy import stats
from sklearn.linear_model import LinearRegression
from statsmodels.stats.multitest import multipletests
from .utils import apply_scaling
def normal_check(data: pd.DataFrame) -> pd.DataFrame:
r"""Compare the distribution of numeric variables to a normal distribution using the Kolmogrov-Smirnov test
Wrapper for `scipy.stats.kstest`: the empircal data is compared to a normally distributed variable with the
same mean and standard deviation. A significant result (p < 0.05) in the goodness of fit test means that the
data is not normally distributed.
Parameters
----------
data: pandas.DataFrame
Dataframe including the columns of interest
Returns
----------
df_normality_check: pd.DataFrame
Dataframe with column names, p-values and an indication of normality
Examples
----------
>>> tips = sns.load_dataset("tips")
>>> df_normality_check = normal_check(tips)
"""
# Select numeric columns only
num_features = data.select_dtypes(include="number").columns.tolist()
# Compare distribution of each feature to a normal distribution with given mean and std
df_normality_check = data[num_features].apply(
lambda x: stats.kstest(
x.dropna(), stats.norm.cdf, args=(np.nanmean(x), np.nanstd(x)), N=len(x)
)[1],
axis=0,
)
# create a label that indicates whether a feature has a normal distribution or not
df_normality_check = pd.DataFrame(df_normality_check).reset_index()
df_normality_check.columns = ["feature", "p-value"]
df_normality_check["normality"] = df_normality_check["p-value"] >= 0.05
return df_normality_check
def permute_test(a, test_type, test, **kwargs):
r"""Helper function to run tests for permutations
Parameters
----------
a : np.array
test_type: str {'correlation', 'independent_t_test'}
Type of the test to be used
test:
e.g. `scipy.stats.pearsonr` or `statsmodels.stats.weightstats.ttest_ind`
**kwargs:
Additional keywords to be added to `test`
- `a2` for the second feature if test_type = 'correlation'
Returns
----------
float:
p value for permutation
"""
if test_type == "correlation":
a2 = kwargs["a2"]
_, p = test(a, a2)
else:
raise ValueError("Unknown test_type provided")
def correlation_analysis(
data: pd.DataFrame,
col_list=None,
row_list=None,
check_norm=False,
method: str = "pearson",
dropna: str = "pairwise",
permutation_test: bool = False,
n_permutations: int = 1000,
random_state=None,
):
r"""Run correlations for numerical features and return output in different formats
Different methods to compute correlations and to handle missing values are implemented.
Inspired by `researchpy.corr_case` and `researchpy.corr_pair`.
Parameters
----------
data : pandas.DataFrame
Dataframe with variables in columns, cases in rows
row_list: list or None (default: None)
List with names of columns in `data` that should be in the rows of the correlogram.
If None, all columns are used but only every unique combination.
col_list: list or None (default: None)
List with names of columns in `data` that should be in the columns of the correlogram.
If None, all columns are used and only every unique combination.
check_norm: bool (default: False)
If True, normality will be checked for columns in `data` using `normal_check`. This influences the used method
for correlations, i.e. Pearson or Spearman. Note: normality check ignores missing values.
method: {'pearson', 'kendall', 'spearman'}, default 'pearson'
Type of correlation, either Pearson's r, Spearman's rho, or Kendall's tau, implemented via respectively
`scipy.stats.pearsonr`, `scipy.stats.spearmanr`, and `scipy.stats.kendalltau`
Will be ignored if check_norm=True. Instead, Person's r is used for every combination of normally distributed
columns and Spearman's rho is used for all other combinations.
dropna : {'listwise', 'pairwise'}, default 'pairwise'
Should rows with missing values be dropped over the complete `data` ('listwise') or for every correlation
separately ('pairwise')
permutation_test: bool (default: False)
If true, a permutation test will added
n_permutations: int (default: 1000)
Number of permutations in the permutation test
random_state: None or int (default: None)
Random state for permutation_test. If not None, random_state will be updated for every permutation
plot_permutation: bool (default: False)
Whether to plot the results of the permutation test
figsize: tuple (default: (11.7, 8.27))
Width and height of the figure in inches
Returns
----------
result_dict: dict
Dictionary containing with the following keys:
info : pandas.DataFrame
Description of correlation method, missing values handling and number of observations
r-values : pandas.DataFrame
Dataframe with correlation coefficients. Indices and columns are column names from `data`. Only lower
triangle is filled.
p-values : pandas.DataFrame
Dataframe with p-values. Indices and columns are column names from `data`. Only lower triangle is filled.
N : pandas.DataFrame
Dataframe with numbers of observations. Indices and columns are column names from `data`. Only lower
triangle is filled. If dropna ='listwise', every correlation will have the same number of observations.
summary : pandas.DataFrame
Dataframe with columns ['analysis', 'feature1', 'feature2', 'r-value', 'p-value', 'N', 'stat-sign']
which indicate the type of test used for the correlation, the pair of columns, the correlation coefficient,
the p-value, the number of observations for each combination of columns in `data` and whether the r-value is
statistically significant.
plotted_permuations: Figure
Examples
----------
>>> from jmspack.frequentist_statistics import correlation_analysis
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>> dict_results = correlation_analysis(iris, method='pearson', dropna='listwise', permutation_test=True,
>>> n_permutations=100, check_norm=True)
>>> dict_results['summary']
References
----------
<NAME> (2018). researchpy's documentation [Revision 9ae5ed63]. Retrieved from
https://researchpy.readthedocs.io/en/latest/
"""
# Settings test
if method == "pearson":
test, test_name = stats.pearsonr, "Pearson"
elif method == "spearman":
test, test_name = stats.spearmanr, "Spearman Rank"
elif method == "kendall":
test, test_name = stats.kendalltau, "Kendall's Tau-b"
else:
raise ValueError("method not in {'pearson', 'kendall', 'spearman'}")
# Copy numerical data from the original data
data = data.copy().select_dtypes("number")
# Get correct lists
if col_list and not row_list:
row_list = data.select_dtypes("number").drop(col_list, axis=1).columns.tolist()
elif row_list and not col_list:
col_list = data.select_dtypes("number").drop(row_list, axis=1).columns.tolist()
# Initializing dataframes to store results
info = pd.DataFrame()
summary = pd.DataFrame()
if not col_list and not row_list:
r_vals = pd.DataFrame(columns=data.columns, index=data.columns)
p_vals = pd.DataFrame(columns=data.columns, index=data.columns)
n_vals = | pd.DataFrame(columns=data.columns, index=data.columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.dates as mdates
import math
### PORTFOLIO
# 24% EQUITY
# 18% FIXED INCOME
# 19% GOLD
# 18% COMDTY TREND/GLOBAL MACRO
# 21% LONG VOL
### EQUITY
# 80% GLOBAL, 19.2% of tot
# 20% EM, 4.8% of tot
### FIXED INCOME
# US TSY 50%, 9% of tot
# Corp bonds, 25% 4.5% of tot
# EM BONDS, 25%, 4.5% of tot
### GOLD
# GLD 90%, 17.1% of tot
# GDX 10%, 1.9 of tot
### COMDTY TREND+GLOBAL MACRO
# LYNX 75%, 13.5% of tot
# SEB ASSET SELECTION C LUX 25%, 4.5% of tot
# IPM SYSTEMATIC MACRO UCITS 0%
# NORDKINN 0%
### LONG VOL
# AMUNDI 100%, 21% of tot
# LATEST DATE (START MONTH) 2007-11, AMUNDI
### GLOBAL VARIABLES / DATA ###
start_date = '2007-11'
years = 13.167
global_data_raw = pd.read_csv('MSCI_World_SEK.csv')
global_data_raw = global_data_raw.set_index('Date')
global_data = pd.DataFrame(global_data_raw.loc[start_date:])
# us_data_raw = pd.read_csv('SPP_aktiefond_USA.csv')
# us_data_raw = us_data_raw.set_index('Date')
# us_data = pd.DataFrame(us_data_raw.loc[start_date:])
# avanza_zero_raw = pd.read_csv('Avanza_zero.csv')
# avanza_zero_raw = avanza_zero_raw.set_index('Date')
# avanza_zero = pd.DataFrame(avanza_zero_raw.loc[start_date:])
em_data = pd.read_csv('MSCI_EM_SEK.csv')
em_data = em_data.set_index('Date')
em_stock = pd.DataFrame(em_data.loc[start_date:])
tlt_raw = pd.read_csv('TLT_SEK.csv')
tlt_raw = tlt_raw.set_index('Date')
tlt = | pd.DataFrame(tlt_raw.loc[start_date:]) | pandas.DataFrame |
import time
import sys
import pandas as pd
import json
import requests
import pymysql
import datetime
import holidays
import boto3
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from decouple import config
from datetime import date, timedelta
def get_jsonparsed_data(url):
"""
Sends a GET request to API and returns the resulting data in a dictionary
"""
# sending get request and saving the response as response object
response = requests.get(url=url)
data = json.loads(response.text)
return data
def create_unique_id(df):
"""
Creates unique_id used in database as primary key
"""
# Create unique identifier and append to list
id_list = []
for idx, row in df.iterrows():
symbol = row["symbol"]
date = str(row["date"])
unique_id = date + '-' + symbol
id_list.append(unique_id)
# Insert IDs into dataframe as new column
df.insert(0, "id", id_list)
return df
def clean_earnings_data(df):
"""
Clean earnings data by:
- Filtering out ADRs and other exchanges
- Removing stocks that have any null values in epsEstimated, or time
- Dropping revenue and revenueEstimated columns
- Creating a unique ID
- Changing date format
"""
# If ticker is greater than a length of 5, drop it
df["length"] = df.symbol.str.len()
df = df[df.length < 5]
# Filter missing columns out
df = df.dropna(subset=['date', 'symbol', 'epsEstimated', 'time'])
# Drop unwanted columns
df = df.drop(['revenue', 'revenueEstimated', 'length'], axis=1)
df = create_unique_id(df)
df = df.rename({'date': 'earnings_date',
'epsEstimated': 'eps_estimated', 'time': 'earnings_time'}, axis=1)
df["earnings_date"] = pd.to_datetime(
df["earnings_date"]).dt.strftime('%m/%d/%y')
return df
def clean_pricing_data(df, today):
"""
Clean pricing data by:
- Adding one day to earnings_date
- Removing label column
- Creating a unique ID
- Changing date format
"""
df.loc[:,'date'] = today
df = df.drop(['label'], axis=1)
df = create_unique_id(df)
df = df.rename({'date': 'earnings_date', 'open': 'open_price', 'high': 'high_price', 'low': 'low_price',
'close': 'close_price', 'adjClose': 'adj_close', 'volume': 'daily_volume',
'unadjustedVolume': 'unadjusted_volume', 'change': 'change_dollars',
'changePercent': 'change_percent', 'changeOverTime': 'change_over_time'}, axis=1)
df["earnings_date"] = pd.to_datetime(
df["earnings_date"]).dt.strftime('%m/%d/%y')
return df
def clean_technical_data(df):
"""
Clean technical data by:
- Renaming columns
- Changing date format
"""
df = create_unique_id(df)
df = df.rename({'date': 'earnings_date', 0: 'sma_5', 1: 'sma_10', 2: 'sma_20', 3: 'ema_5',
4: 'ema_10', 5: 'ema_20', 6: 'rsi_14', 7: 'wma_5', 8: 'wma_10', 9: 'wma_20'}, axis=1)
df["earnings_date"] = pd.to_datetime(
df["earnings_date"]).dt.strftime('%m/%d/%y')
return df
# Check if dataframe is empty, exit if so
def check_dataframe_empty(df, today):
if df.empty:
sys.exit("{}: No earnings available".format(today))
# Verify the integrity of dates by checking if its a business day and not a holiday
def verify_dates(today, last_day):
us_holidays = holidays.US()
if today in us_holidays:
sys.exit("{}: U.S. holiday. Exiting program.".format(today))
elif last_day in us_holidays:
last_day = find_true_last_day(last_day, us_holidays)
print("{}: Changed last_day to {}.".format(today, last_day))
return today, last_day
# Recursive function that finds the true last business day, taking into account U.S. holidays
def find_true_last_day(last_day, us_holidays):
if last_day in us_holidays:
temp_last_day = str((pd.to_datetime(last_day) - pd.tseries.offsets.BusinessDay(n=1)).date())
return find_true_last_day(temp_last_day, us_holidays)
else:
return last_day
if __name__ == "__main__":
start = time.time()
# Connect to boto3 and pull parameters
client = boto3.client('ssm')
response = client.get_parameters(Names=[
"/rds-pipelines/dev/aws-db-name",
"/rds-pipelines/dev/aws-key",
"/rds-pipelines/dev/aws-port",
"/rds-pipelines/dev/aws-user",
"/rds-pipelines/dev/db-url",
"/rds-pipelines/dev/fmp-cloud-key",
"/rds-pipelines/dev/fmp-key"
])
aws_database = response['Parameters'][0]['Value']
aws_password = response['Parameters'][1]['Value']
aws_port = response['Parameters'][2]['Value']
aws_username = response['Parameters'][3]['Value']
aws_hostname = response['Parameters'][4]['Value']
# Pull API keys from .env file
FMP_CLOUD_API_KEY = response['Parameters'][5]['Value']
FMP_API_KEY = response['Parameters'][6]['Value']
# Find today's and the last business day's date
today = str(date.today())
last_day = str((date.today() - pd.tseries.offsets.BusinessDay(n=1)).date())
today, last_day = verify_dates(today, last_day)
# Find which day of the week it is
weekno = datetime.datetime.today().weekday()
# Exit program if it is the weekend (no eanings/pricing data)
if weekno >= 5:
sys.exit("{}: No data available on the weekend".format(today))
print("{}: Beginning data pull...".format(today))
# Setup SQL Alchemy for AWS database
sqlalch_conn = "mysql+pymysql://{}:{}@{}/{}?charset=utf8mb4".format(
aws_username, aws_password, aws_hostname, aws_database)
engine = create_engine(sqlalch_conn, echo=False)
# Connect to FMP API and pull earnings data
earnings_res = get_jsonparsed_data(
"https://financialmodelingprep.com/api/v3/earning_calendar?from={}&to={}&apikey={}".format(today, today, FMP_API_KEY))
earnings_df = pd.DataFrame(earnings_res)
check_dataframe_empty(earnings_df, today)
# Filter earnings data
earnings_filtered = clean_earnings_data(earnings_df)
check_dataframe_empty(earnings_filtered, today)
try:
earnings_filtered.to_sql(
"earnings", con=engine, index=False, if_exists='append')
except Exception as e:
print("Earnings data already exists in table")
# Pull list of symbols
symbols = earnings_filtered.symbol
print("Gathering data for {} earnings reports...".format(len(symbols)))
# For each symbol pull today's pricing
pricing_df = pd.DataFrame()
for symbol in symbols:
url = "https://financialmodelingprep.com/api/v3/historical-price-full/{}?from={}&to={}&apikey={}".format(
symbol, last_day, last_day, FMP_API_KEY)
res = get_jsonparsed_data(url)
try:
price_res_df = pd.DataFrame.from_records(res["historical"])
# Insert symbol
price_res_df.insert(1, "symbol", symbol)
# Concat with main dataframe
pricing_df = pd.concat([pricing_df, price_res_df])
except KeyError as ke:
print("Skipping symbol: {}. Error message: {}".format(symbol,ke))
# Filter pricing data
pricing_filtered = clean_pricing_data(pricing_df, today)
try:
pricing_filtered.to_sql(
"pricing", con=engine, index=False, if_exists='append')
except Exception as e:
print("Pricing data already exists in table")
indicators = ["sma_5", "sma_10", "sma_20", "ema_5", "ema_10",
"ema_20", "rsi_14", "wma_5", "wma_10", "wma_20"]
# Pull technical indicators for each stock in today's earnings list
technical_df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = __author__
import pytest
import numpy as np
import pandas as pd
from pylife.core.broadcaster import Broadcaster
foo_bar_series = pd.Series({'foo': 1.0, 'bar': 2.0})
foo_bar_series_twice_in_frame = pd.DataFrame([foo_bar_series, foo_bar_series])
series_named_index = foo_bar_series.copy()
series_named_index.index.name = 'idx1'
foo_bar_frame = pd.DataFrame({'foo': [1.0, 1.5], 'bar': [2.0, 1.5]})
def test_broadcast_series_to_array():
param, obj = Broadcaster(foo_bar_series).broadcast([1.0, 2.0])
pd.testing.assert_series_equal(param, pd.Series([1.0, 2.0]))
pd.testing.assert_frame_equal(foo_bar_series_twice_in_frame, obj)
def test_broadcast_frame_to_array_match():
param, obj = Broadcaster(foo_bar_frame).broadcast([1.0, 2.0])
np.testing.assert_array_equal(param, [1.0, 2.0])
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_frame_to_array_mismatch():
with pytest.raises(ValueError, match=r"Dimension mismatch. "
"Cannot map 3 value array-like to a 2 element DataFrame signal."):
Broadcaster(foo_bar_frame).broadcast([1.0, 2.0, 3.0])
def test_broadcast_series_to_scalar():
param, obj = Broadcaster(foo_bar_series).broadcast(1.0)
assert param == 1.0
pd.testing.assert_series_equal(foo_bar_series, obj)
def test_broadcast_frame_to_scalar():
param, obj = Broadcaster(foo_bar_frame).broadcast(1.0)
expected_param = pd.Series([1.0, 1.0], index=foo_bar_frame.index)
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_series_index_named_to_series_index_named():
series = pd.Series([5.0, 6.0], index=pd.Index(['x', 'y'], name='idx2'))
param, obj = Broadcaster(series_named_index).broadcast(series)
expected_param = pd.Series({
('foo', 'x'): 5.0,
('foo', 'y'): 6.0,
('bar', 'x'): 5.0,
('bar', 'y'): 6.0
})
expected_obj = pd.Series({
('foo', 'x'): 1.0,
('foo', 'y'): 1.0,
('bar', 'x'): 2.0,
('bar', 'y'): 2.0
})
expected_obj.index.names = ['idx1', 'idx2']
expected_param.index.names = ['idx1', 'idx2']
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_series_equal(expected_obj, obj)
def test_broadcast_series_index_named_to_series_index_none():
series = pd.Series([5.0, 6.0], index=pd.Index([3, 4]))
param, obj = Broadcaster(series_named_index).broadcast(series)
expected_param = pd.Series({
('foo', 3): 5.0,
('foo', 4): 6.0,
('bar', 3): 5.0,
('bar', 4): 6.0
})
expected_obj = pd.Series({
('foo', 3): 1.0,
('foo', 4): 1.0,
('bar', 3): 2.0,
('bar', 4): 2.0
})
expected_obj.index.names = ['idx1', None]
expected_param.index.names = ['idx1', None]
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_series_equal(expected_obj, obj)
def test_broadcast_series_index_none_to_series_index_none():
series = pd.Series([1.0, 2.0], index=pd.Index([3, 4]))
param, obj = Broadcaster(foo_bar_series).broadcast(series)
expected = pd.DataFrame([foo_bar_series, foo_bar_series], index=series.index)
pd.testing.assert_series_equal(series, param)
pd.testing.assert_frame_equal(expected, obj)
def test_broadcast_series_index_none_to_series_index_none_no_string_index():
series = pd.Series([1.0, 2.0], index=pd.Index([3, 4]))
obj = foo_bar_series.copy()
obj.index = pd.Index([1, 2])
param, obj = Broadcaster(obj).broadcast(series)
expected = pd.DataFrame([foo_bar_series, foo_bar_series],
index=series.index)
expected.columns = [1, 2]
pd.testing.assert_series_equal(series, param)
pd.testing.assert_frame_equal(expected, obj)
def test_broadcast_series_index_none_to_series_index_named():
series = pd.Series([1.0, 2.0], index=pd.Index([3, 4], name='idx2'))
foo_bar = foo_bar_series.copy()
foo_bar.index.name = None
param, obj = Broadcaster(foo_bar).broadcast(series)
expected = pd.DataFrame([foo_bar_series, foo_bar_series], index=series.index)
pd.testing.assert_series_equal(series, param)
pd.testing.assert_frame_equal(expected, obj)
def test_broadcast_series_to_frame_2_elements_index_none():
df = pd.DataFrame({
'a': [1, 3],
'b': [2, 4]
}, index=['x', 'y'])
param, obj = Broadcaster(foo_bar_series).broadcast(df)
expected_obj = pd.DataFrame({
'foo': [1.0, 1.0], 'bar': [2.0, 2.0]
}, index=['x', 'y'])
pd.testing.assert_frame_equal(param, df)
pd.testing.assert_frame_equal(obj, expected_obj)
def test_broadcast_series_to_frame_3_elements_index_none():
df = pd.DataFrame({
'a': [1, 3, 5],
'b': [2, 4, 6]
}, index=['x', 'y', 'z'])
param, obj = Broadcaster(foo_bar_series).broadcast(df)
expected_obj = pd.DataFrame({
'foo': [1.0, 1.0, 1.0], 'bar': [2.0, 2.0, 2.0],
}, index=['x', 'y', 'z'])
pd.testing.assert_frame_equal(param, df)
pd.testing.assert_frame_equal(obj, expected_obj)
def test_broadcast_series_to_seires_same_single_index():
series = pd.Series([1, 3], index=pd.Index(['x', 'y'], name='iname1'), name='src')
foo_bar = pd.Series([1, 2], index=pd.Index(['x', 'y'], name='iname1'), name='dst')
param, obj = Broadcaster(foo_bar).broadcast(series)
pd.testing.assert_series_equal(param, series)
pd.testing.assert_series_equal(obj, foo_bar)
def test_broadcast_series_to_series_different_single_index_name():
series = pd.Series([1, 3], index=pd.Index(['x', 'y'], name='iname1'), name='dest')
foo_bar = pd.Series([1, 2], index=pd.Index([1, 2], name='srcname'), name='src')
expected_index = pd.MultiIndex.from_tuples([(1, 'x'), (1, 'y'), (2, 'x'), (2, 'y')], names=['srcname', 'iname1'])
expected_obj = pd.Series([1, 1, 2, 2], name='src', index=expected_index)
expected_param = pd.Series([1, 3, 1, 3], name='dest', index=expected_index)
param, obj = Broadcaster(foo_bar).broadcast(series)
pd.testing.assert_series_equal(param, expected_param)
pd.testing.assert_series_equal(obj, expected_obj)
def test_broadcast_frame_to_frame_same_single_index():
df = pd.DataFrame({
'a': [1, 3],
'b': [2, 4]
}, index=pd.Index(['x', 'y'], name='iname1'))
foo_bar = pd.DataFrame({'foo': [1, 2], 'bar': [3, 4]}, index= | pd.Index(['x', 'y'], name='iname1') | pandas.Index |
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert np.isnan(sparse.at['c'])
assert sparse.at['d'] == orig.at['d']
assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert sparse.at['c'] == orig.at['c']
assert sparse.at['d'] == orig.at['d']
assert sparse.at['e'] == orig.at['e']
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1., 2., 3.]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2., 3., 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype='float64')).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = | pd.SparseArray([1, 2, 3], kind=kind) | pandas.SparseArray |
import datetime
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_index_equal
import pandas_market_calendars as mcal
from pandas_market_calendars.exchange_calendar_nyse import NYSEExchangeCalendar
from tests.test_market_calendar import FakeCalendar, FakeBreakCalendar
def test_get_calendar():
assert isinstance(mcal.get_calendar('NYSE'), NYSEExchangeCalendar)
cal = mcal.get_calendar('NYSE', datetime.time(10, 0), datetime.time(14, 30))
assert isinstance(cal, NYSEExchangeCalendar)
assert cal.open_time == datetime.time(10, 0)
assert cal.close_time == datetime.time(14, 30)
# confirm that import works properly
_ = mcal.get_calendar('CME_Equity')
def test_get_calendar_names():
assert 'ASX' in mcal.get_calendar_names()
def test_date_range_exceptions():
cal = FakeCalendar(open_time= datetime.time(9), close_time= datetime.time(11, 30))
schedule = cal.schedule("2021-01-05", "2021-01-05")
### invalid closed argument
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", closed= "righ")
assert e.exconly() == "ValueError: closed must be 'left', 'right', 'both' or None."
### invalid force_close argument
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", force_close= "True")
assert e.exconly() == "ValueError: force_close must be True, False or None."
### close_time is before open_time
schedule = pd.DataFrame([["2020-01-01 12:00:00+00:00", "2020-01-01 11:00:00+00:00"]],
index= ["2020-01-01"], columns= ["market_open", "market_close"])
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", closed="right", force_close= True)
assert e.exconly() == "ValueError: Schedule contains rows where market_close < market_open,"\
" please correct the schedule"
### Overlap -
### the end of the last bar goes over the next start time
bcal = FakeBreakCalendar()
bschedule = bcal.schedule("2021-01-05", "2021-01-05")
with pytest.raises(ValueError) as e1:
# this frequency overlaps
mcal.date_range(bschedule, "2H", closed= "right", force_close= None)
# this doesn't
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
with pytest.raises(ValueError) as e2:
mcal.date_range(bschedule, "2H", closed= "both", force_close= None)
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
with pytest.raises(ValueError) as e3:
mcal.date_range(bschedule, "2H", closed= None, force_close= None)
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
for e in (e1, e2, e3):
assert e.exconly() == "ValueError: The chosen frequency will lead to overlaps in the calculated index. "\
"Either choose a higher frequency or avoid setting force_close to None "\
"when setting closed to 'right', 'both' or None."
try:
# should all be fine, since force_close cuts the overlapping interval
mcal.date_range(bschedule, "2H", closed="right", force_close=True)
with pytest.warns(UserWarning): # should also warn about lost sessions
mcal.date_range(bschedule, "2H", closed="right", force_close=False)
mcal.date_range(bschedule, "2H", closed="both", force_close=True)
mcal.date_range(bschedule, "2H", closed="both", force_close=False)
# closed = "left" should never be a problem since it won't go outside market hours anyway
mcal.date_range(bschedule, "2H", closed="left", force_close=True)
mcal.date_range(bschedule, "2H", closed="left", force_close=False)
mcal.date_range(bschedule, "2H", closed="left", force_close=None)
except ValueError as e:
pytest.fail(f"Unexpected Error: \n{e}")
def test_date_range_permutations():
# open_time = 9, close_time = 11.30, freq = "1H"
cal = FakeCalendar(open_time= datetime.time(9), close_time= datetime.time(11, 30))
schedule = cal.schedule("2021-01-05", "2021-01-05")
# result matching values for: closed force_close
# 9 10 11 left False/ left None/ both False/ None False
expected = pd.DatetimeIndex(
["2021-01-05 01:00:00+00:00", "2021-01-05 02:00:00+00:00",
"2021-01-05 03:00:00+00:00"], tz= "UTC")
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= False)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= None)
| assert_index_equal(actual, expected) | pandas.testing.assert_index_equal |
import unittest
import platform
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
import h5py
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs, dist_IR_contains, get_rank,
get_start_end)
from numba.config import IS_32BITS
kde_file = 'kde.parquet'
class TestIO(unittest.TestCase):
def setUp(self):
if get_rank() == 0:
# h5 filter test
n = 11
size = (n, 13, 21, 3)
A = np.random.randint(0, 120, size, np.uint8)
f = h5py.File('h5_test_filter.h5', "w")
f.create_dataset('test', data=A)
f.close()
# test_csv_cat1
data = ("2,B,SA\n"
"3,A,SBC\n"
"4,C,S123\n"
"5,B,BCD\n")
with open("csv_data_cat1.csv", "w") as f:
f.write(data)
# test_csv_single_dtype1
data = ("2,4.1\n"
"3,3.4\n"
"4,1.3\n"
"5,1.1\n")
with open("csv_data_dtype1.csv", "w") as f:
f.write(data)
# test_np_io1
n = 111
A = np.random.ranf(n)
A.tofile("np_file1.dat")
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_h5_read_seq(self):
def test_impl():
f = h5py.File("lr.hdf5", "r")
X = f['points'][:]
f.close()
return X
hpat_func = hpat.jit(test_impl)
np.testing.assert_allclose(hpat_func(), test_impl())
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_h5_read_const_infer_seq(self):
def test_impl():
p = 'lr'
f = h5py.File(p + ".hdf5", "r")
s = 'po'
X = f[s + 'ints'][:]
f.close()
return X
hpat_func = hpat.jit(test_impl)
np.testing.assert_allclose(hpat_func(), test_impl())
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_h5_read_parallel(self):
def test_impl():
f = h5py.File("lr.hdf5", "r")
X = f['points'][:]
Y = f['responses'][:]
f.close()
return X.sum() + Y.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl(), decimal=2)
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("fix collective create dataset")
def test_h5_write_parallel(self):
def test_impl(N, D):
points = np.ones((N, D))
responses = np.arange(N) + 1.0
f = h5py.File("lr_w.hdf5", "w")
dset1 = f.create_dataset("points", (N, D), dtype='f8')
dset1[:] = points
dset2 = f.create_dataset("responses", (N,), dtype='f8')
dset2[:] = responses
f.close()
N = 101
D = 10
hpat_func = hpat.jit(test_impl)
hpat_func(N, D)
f = h5py.File("lr_w.hdf5", "r")
X = f['points'][:]
Y = f['responses'][:]
f.close()
np.testing.assert_almost_equal(X, np.ones((N, D)))
np.testing.assert_almost_equal(Y, np.arange(N) + 1.0)
@unittest.skip("fix collective create dataset and group")
def test_h5_write_group(self):
def test_impl(n, fname):
arr = np.arange(n)
n = len(arr)
f = h5py.File(fname, "w")
g1 = f.create_group("G")
dset1 = g1.create_dataset("data", (n,), dtype='i8')
dset1[:] = arr
f.close()
n = 101
arr = np.arange(n)
fname = "test_group.hdf5"
hpat_func = hpat.jit(test_impl)
hpat_func(n, fname)
f = h5py.File(fname, "r")
X = f['G']['data'][:]
f.close()
np.testing.assert_almost_equal(X, arr)
def test_h5_read_group(self):
def test_impl():
f = h5py.File("test_group_read.hdf5", "r")
g1 = f['G']
X = g1['data'][:]
f.close()
return X.sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_h5_file_keys(self):
def test_impl():
f = h5py.File("test_group_read.hdf5", "r")
s = 0
for gname in f.keys():
X = f[gname]['data'][:]
s += X.sum()
f.close()
return s
hpat_func = hpat.jit(test_impl, h5_types={'X': hpat.int64[:]})
self.assertEqual(hpat_func(), test_impl())
# test using locals for typing
hpat_func = hpat.jit(test_impl, locals={'X': hpat.int64[:]})
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_h5_group_keys(self):
def test_impl():
f = h5py.File("test_group_read.hdf5", "r")
g1 = f['G']
s = 0
for dname in g1.keys():
X = g1[dname][:]
s += X.sum()
f.close()
return s
hpat_func = hpat.jit(test_impl, h5_types={'X': hpat.int64[:]})
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_h5_filter(self):
def test_impl():
f = h5py.File("h5_test_filter.h5", "r")
b = np.arange(11) % 3 == 0
X = f['test'][b, :, :, :]
f.close()
return X
hpat_func = hpat.jit(locals={'X:return': 'distributed'})(test_impl)
n = 4 # len(test_impl())
start, end = get_start_end(n)
np.testing.assert_allclose(hpat_func(), test_impl()[start:end])
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_read(self):
def test_impl():
t = pq.read_table('kde.parquet')
df = t.to_pandas()
X = df['points']
return X.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_pq_read_global_str1(self):
def test_impl():
df = pd.read_parquet(kde_file)
X = df['points']
return X.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_read_freevar_str1(self):
kde_file2 = 'kde.parquet'
def test_impl():
df = pd.read_parquet(kde_file2)
X = df['points']
return X.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pd_read_parquet(self):
def test_impl():
df = pd.read_parquet('kde.parquet')
X = df['points']
return X.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_str(self):
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
A = df.two.values == 'foo'
return A.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_str_with_nan_seq(self):
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
A = df.five.values == 'foo'
return A
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_str_with_nan_par(self):
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
A = df.five.values == 'foo'
return A.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_pq_str_with_nan_par_multigroup(self):
def test_impl():
df = pq.read_table('example2.parquet').to_pandas()
A = df.five.values == 'foo'
return A.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_bool(self):
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.three.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_nan(self):
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.one.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_pq_float_no_nan(self):
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.four.sum()
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_pq_pandas_date(self):
def test_impl():
df = pd.read_parquet('pandas_dt.pq')
return pd.DataFrame({'DT64': df.DT64, 'col2': df.DATE})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
@unittest.skip('Error: Attribute "dtype" are different\n'
'[left]: datetime64[ns]\n'
'[right]: object')
def test_pq_spark_date(self):
def test_impl():
df = | pd.read_parquet('sdf_dt.pq') | pandas.read_parquet |
# <NAME>
# Child Mind Institute
import numpy as np
import pandas as pd
from sklearn import metrics
from keras.layers import LSTM
from statsmodels import robust
from keras.layers import Dense
from keras.models import Sequential
from scipy.stats import ttest_rel
from itertools import combinations
from scipy.spatial import distance
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
#######################################################################################################################
# Distance calculation without thermal data
df = | pd.read_csv('tingle_pilot_data_shareable.csv') | pandas.read_csv |
'''
Functions used to create graphs and generate statistical measures from created graphs.
'''
import numpy as np
import scona as scn
import os
import sys
import pandas as pd
from colorama import Fore
def mean_std(values):
'''
Function to calculate mean and standard deviation.
Parameters
------------------------------------------------
values: list, list of values to calculate mean and std
Returns
----------------------------------------------
results: dict, dictionary of mean and std devations
'''
val = np.array(values)
mean = val.mean()
std_dev = np.std(val)
lower_2std = mean - 2*std_dev
upper_2std = mean + 2*std_dev
lower_1std = mean - std_dev
upper_1std = mean + std_dev
results = {
'mean': mean,
'upper_1std':upper_1std,
'upper_2std':upper_2std,
'lower_1std':lower_1std,
'lower_2std':lower_2std
}
return results
def create_graphs(data, names, centroids, threshold=10):
'''
Function to create a correlation matrix, graph and thresholded graph.
Wrapper around multiple scona functions.
Parameters
-----------------------------------------------------
data: pandas dataframe object with the data for graph.
names: list object. Names of brain regions.
centroids. Numpy array. Co-ordinates for names (x,y,z)
threshold: int, optional. Level to threshold the graph at.
Returns
-------------------------------------------------------------------
results: dict object. Dictionary of corr_matrix (correlation matrix),
graph (graph unthresholded) and graph_threshold (thresholded graph)
'''
residuals_df = scn.create_residuals_df(data, names)
corr_matrix = scn.create_corrmat(residuals_df, method='pearson')
graph = scn.BrainNetwork(network=corr_matrix, parcellation=names, centroids=centroids)
graph_threshold = graph.threshold(threshold)
results = {
'corr_matrix': corr_matrix,
'graph': graph,
'graph_threshold': graph_threshold
}
return results
def directories(name, data_path):
'''
Function to check if a directory exists. If directory doesn't exist
then creates a new directory.
Parameters
---------------------------------------
name: str, name of directory
perms: int, number of permuations used.
data_path: str,
Returns
-----------------------------------------
boolean: Retruns True if directory exists.
Returns False if directory doesn't
exist and makes the directory.
'''
dirs = os.listdir(data_path)
if name not in dirs:
path = os.path.join(data_path, name)
os.mkdir(path)
return False
else:
return True
def permutations(thresholded_graph, data_path, name='graph', perms=1000, overwrite=False, save=True):
'''
Function to simulate random graphs for checking that actual graphs
differs from the random graphs. Will also create a directory with
csvs if one doesn't exist.
Parameters
--------------------------------------------------
thresholded_graph: scona thresholded graph object.
data_path: str, path to directory where to save results.
name: optional str, Name of graph object
perms: int, number of permutations
overwrite: optional Boolean,
Returns
--------------------------------------------------
results: dict, pandas dataframe of global measures,
rich club and small world properties.
'''
folder_name = f'{name}_{perms}'
directory_exist = directories(folder_name, data_path)
path = os.path.join(data_path, folder_name)
if directory_exist == False or overwrite == True:
brain_bundle = scn.GraphBundle([thresholded_graph], [f'{name}_thresholded'])
brain_bundle.create_random_graphs(f'{name}_thresholded', perms)
global_measures = brain_bundle.report_global_measures()
rich_club = brain_bundle.report_rich_club()
small_world = brain_bundle.report_small_world(f'{name}_thresholded')
small_word_df = pd.DataFrame.from_dict(small_world, orient='index', columns=['small_world_coefficient'])
if save == True:
try:
rich_club.to_csv(f'{path}/rich_club.csv')
global_measures.to_csv(f'{path}/global_measures.csv')
small_word_df.to_csv(f'{path}/small_world.csv')
except Exception:
print(Fore.RED + 'Unable to save CSV files.' + Fore.RESET)
else:
try:
print("Loading CSVs")
rich_club = pd.read_csv(f'{path}/rich_club.csv')
global_measures = pd.read_csv(f'{path}/global_measures.csv')
small_world_df = | pd.read_csv(f'{path}/small_world.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
import json
from bs4 import BeautifulSoup
import requests
from tqdm import tqdm
def timestamp2date(timestamp):
# function converts a Uniloc timestamp into Gregorian date
return datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d')
def date2timestamp(date):
# function coverts Gregorian date in a given format to timestamp
return datetime.strptime(date, '%Y-%m-%d').timestamp()
def getCryptoOHLC(fsym, tsym):
# function fetches a crypto price-series for fsym/tsym and stores
# it in pandas DataFrame
cols = ['date', 'timestamp', 'open', 'high', 'low', 'close']
lst = ['time', 'open', 'high', 'low', 'close']
timestamp_today = datetime.today().timestamp()
curr_timestamp = timestamp_today
for j in range(2):
df = pd.DataFrame(columns=cols)
# (limit-1) * 2 = days
# One year is around 184
limit = 184
url = ("https://min-api.cryptocompare.com/data/histoday?fsym=" +
fsym + "&tsym=" + tsym + "&toTs=" + str(int(curr_timestamp)) + "&limit=" + str(limit))
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
dic = json.loads(soup.prettify())
for i in range(1, limit):
tmp = []
for e in enumerate(lst):
x = e[0]
y = dic['Data'][i][e[1]]
if(x == 0):
tmp.append(str(timestamp2date(y)))
tmp.append(y)
if(np.sum(tmp[-4::]) > 0):
df.loc[len(df)] = np.array(tmp)
df.index = pd.to_datetime(df.date)
df.drop('date', axis=1, inplace=True)
curr_timestamp = int(df.iloc[0][0])
if(j == 0):
df0 = df.copy()
else:
data = pd.concat([df, df0], axis=0)
# Fixing and error when the dataFrame contained strings instead of floats
data = data.astype(float)
return data
def normalize_data(df):
return df.divide(df.iloc[0])
def get_multiple_cryptos(symbols):
print('Obtaining data from cryptocompare.com ...')
# Intializing an empty DataFrame
data = pd.DataFrame()
# Adding columns with data for all requested cryptocurrencies
for symbol in tqdm(symbols):
fsym = symbol
tsym = "BTC"
data_symbol = getCryptoOHLC(fsym, tsym)
data = pd.concat([data, data_symbol['close']], axis = 1)
# Assinging correct names to the columns
data.columns = symbols
return data
def find_portfolio_statistics(allocs, df):
'''
Compute portfolio statistics:
1) Cumulative return
2) Daily return
3) Average daily return
4) Standard deviation of the daily returns
5) (Annual) Sharpe Ratio
6) Final value
7) Total returns
Parameters:
-----------
allocs: list of allocation fractions for each stock
The sum must be equal to 1!
example: allocs = [0.0, 0.5, 0.35, 0.15]
df: DataFrame with the data
'''
# Normalization
df = (df / df.iloc[0])
# Allocation of the resources
df = df * allocs
# Sum of the value of the resources
df = df.sum(axis = 1)
# Compute Portfolio Statistics
# Cumulative return
cumulative_return = (df.iloc[-1] / df.iloc[0]) - 1
# Daily returns
dailyreturns = (df.iloc[1:] / df.iloc[:-1].values) - 1
average_daily_return = dailyreturns.mean(axis = 0)
yearly_return = average_daily_return * 356 # 356 days of trading in a year
# Standard deviation of the daily returns
std_daily_return = dailyreturns.std(axis = 0)
# Sharpe Ratio
sharpe_ratio = (356 ** (0.5)) * ((average_daily_return - 0) / std_daily_return)
ending_value = df.iloc[-1]
total_returns = average_daily_return*(356 / 356)
'''
print('For allocation as follows:')
print(allocs)
print('Mean return:')
print(mean_return)
print('Standard deviation:')
print(std_return)
print('Annualized Sharpe ratio:')
print(sharpe_ratio)
'''
return yearly_return, std_daily_return, sharpe_ratio
def generate_random_portfolios(df, num_portfolios, stocks):
print('Generating random portfolios...')
# Number of stocks
num_stocks = len(stocks)
# Initialization the final result matrix with zeros
result_matrix = np.zeros([num_portfolios,3])
for i in tqdm(range(num_portfolios)):
random = np.random.random(num_stocks)
allocs = random/ np.sum(random)
mean_return, std_return, sharpe_ratio = find_portfolio_statistics(allocs, df)
result_matrix[i, 0] = mean_return
result_matrix[i, 1] = std_return
result_matrix[i, 2] = sharpe_ratio
return result_matrix
if __name__ == '__main__':
symbols = ['ETH', 'LTC', 'ETC', 'DOGE', 'DGB', 'SC']
data = get_multiple_cryptos(symbols)
# Normalizing the data
data = normalize_data(data)
result_matrix = generate_random_portfolios(data, 20000, symbols)
#convert results array to Pandas DataFrame
results_frame = | pd.DataFrame(result_matrix, columns=['ret','stdev','sharpe']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Get derived data and merge data sets
# In[7]:
import pandas as pd
import geopandas as gpd
import numpy as np
from fuzzywuzzy import process, fuzz
import os
# In[ ]:
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
DS_dir = currentdir + '/Data_Source'
try:
os.mkdir(DS_dir, mode = 0o666)
except FileExistsError:
pass
# ## Get Crime Rate(%)
# In[2]:
def getCrimeRate(crime_police, ocean):
crime_police['dummy'] = True
ocean_block = ocean[['NAME','POP2010']].copy()
ocean_block[['dummy']] = True
pre_crime_police = pd.merge(crime_police, ocean_block, on = 'dummy')
pre_crime_police.drop('dummy', axis = 1, inplace = True)
pre_crime_police['Token_Set_Ratio'] = pre_crime_police[['P_name', 'NAME']].apply(lambda x:fuzz.token_set_ratio(x.P_name, x.NAME), axis = 1)
pre_crime_police['Rank_Token_Set_Ratio'] = pre_crime_police.groupby('P_name')['Token_Set_Ratio'].rank(ascending = False, method = 'dense')
pre_crime_police = pre_crime_police.loc[(pre_crime_police.Rank_Token_Set_Ratio == 1) & (pre_crime_police.Token_Set_Ratio > 60)]
pre_crime_police.insert(19, 'crime_rate(%)', pre_crime_police['case_number']/pre_crime_police['POP2010']*20)
pre_crime_police = pre_crime_police.iloc[:,:-2]
crime_police = pd.merge(ocean_county, pre_crime_police, how='right', left_on = ['NAME','POP2010'], right_on = ['NAME','POP2010'])
return crime_police
# In[5]:
MB_nj = gpd.read_file('./Maps/map_02/Municipal_Boundaries_of_NJ.shp')
ocean_county = MB_nj[MB_nj['COUNTY'] == 'OCEAN']
ocean_county = ocean_county.to_crs(epsg = 4326)
# In[8]:
crime_police = pd.read_csv('./Data_Source/crime_police.csv')
crime_rate = getCrimeRate(crime_police, ocean_county)
crime_rate
# In[11]:
# crime_rate.to_csv('./Data_Source/crime_rate.csv', index = False)
# In[9]:
crime_rate.columns
# In[10]:
cols = crime_rate.columns.tolist()
cols = ['P_ID', 'P_name', 'P_address', 'P_city', 'ori', 'agency_name', 'NAME'] + cols[-4:-3] + ['POPDEN2010'] + cols[-3:]
crime_police_ult = crime_rate[cols].copy()
crime_police_ult.rename(columns = {'NAME':'mun_name', 'POPDEN2010': 'POPDEN2010 (per sq. mi.)'}, inplace = True)
crime_police_ult
crime_police_ult.nlargest(35,'POPDEN2010 (per sq. mi.)')
# In[ ]:
#crime_police_ult.to_csv('./Data_Source/crime_police_ult.csv', index = False)
# ## Merge data sets to get Final.csv
# In[5]:
home = | pd.read_csv('./Data_Source/home_ult.csv') | pandas.read_csv |
# http://github.com/timestocome
# take a look at the differences in daily returns for recent bull and bear markets
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
# pandas display options
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
######################################################################
# data
########################################################################
# read in datafile created in LoadAndMatchDates.py
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = [data.columns.values]
# create target --- let's try Nasdaq value 1 day change
data['returns'] = (data['NASDAQ'] - data['NASDAQ'].shift(1)) / data['NASDAQ']
# remove nan row from target creation
data = data.dropna()
'''
############################################################################
# plot returns on NASDAQ training data
#############################################################################
fig = plt.figure(figsize=(10,10))
plt.subplot(2,1,2)
plt.subplot(2,1,1)
plt.plot(data['returns'])
plt.title("Nasdaq daily returns")
# histogram of returns
plt.subplot(2,1,2)
plt.hist(data['returns'], bins=200)
plt.xlabel("Returns")
plt.ylabel("Probability")
plt.title("Histogram daily Nasdaq returns")
plt.grid(True)
# median
median_return = data['returns'].median()
l = plt.axvspan(median_return-0.0001, median_return+0.0001, color='red')
plt.show()
'''
#########################################################################
# split into bear and bull markets
##########################################################################
bull1_start = pd.to_datetime('01-01-1990') # beginning of this dataset
bull1_end = | pd.to_datetime('07-16-1990') | pandas.to_datetime |
#!/usr/bin/env python3
"""Script to get the classification performance."""
import argparse
from pathlib import Path
import random as rn
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from joblib import load
from utils import COLUMNS_NAME, load_dataset
PROJECT_ROOT = Path.cwd()
def main(dataset_name, disease_label, evaluated_dataset):
"""Calculate the performance of the classifier in each iteration of the bootstrap method."""
# ----------------------------------------------------------------------------
n_bootstrap = 1000
participants_path = PROJECT_ROOT / 'data' / evaluated_dataset / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / evaluated_dataset / 'freesurferData.csv'
outputs_dir = PROJECT_ROOT / 'outputs'
ids_path = outputs_dir / (evaluated_dataset + '_homogeneous_ids.csv')
hc_label = 1
# ----------------------------------------------------------------------------
# Set random seed
random_seed = 42
np.random.seed(random_seed)
rn.seed(random_seed)
classifier_dir = PROJECT_ROOT / 'outputs' / 'classifier_analysis'
classifier_dataset_dir = classifier_dir / dataset_name
classifier_dataset_analysis_dir = classifier_dataset_dir / '{:02d}_vs_{:02d}'.format(hc_label, disease_label)
classifier_storage_dir = classifier_dataset_analysis_dir / 'models'
generalization_dir = classifier_dataset_analysis_dir / 'generalization'
generalization_dir.mkdir(exist_ok=True)
evaluated_dataset_df = load_dataset(participants_path, ids_path, freesurfer_path)
aucs_test = []
# ----------------------------------------------------------------------------
for i_bootstrap in tqdm(range(n_bootstrap)):
rvm = load(classifier_storage_dir / '{:03d}_rvr.joblib'.format(i_bootstrap))
scaler = load(classifier_storage_dir / '{:03d}_scaler.joblib'.format(i_bootstrap))
x_data = evaluated_dataset_df[COLUMNS_NAME].values
tiv = evaluated_dataset_df['EstimatedTotalIntraCranialVol'].values
tiv = tiv[:, np.newaxis]
x_data = (np.true_divide(x_data, tiv)).astype('float32')
x_data = np.concatenate((x_data[evaluated_dataset_df['Diagn'] == hc_label],
x_data[evaluated_dataset_df['Diagn'] == disease_label]), axis=0)
y_data = np.concatenate((np.zeros(sum(evaluated_dataset_df['Diagn'] == hc_label)),
np.ones(sum(evaluated_dataset_df['Diagn'] == disease_label))))
# Scaling using inter-quartile
x_data = scaler.transform(x_data)
pred = rvm.predict(x_data)
predictions_proba = rvm.predict_proba(x_data)
auc = roc_auc_score(y_data, predictions_proba[:, 1])
aucs_test.append(auc)
aucs_df = pd.DataFrame(columns=['AUCs'], data=aucs_test)
aucs_df.to_csv(generalization_dir / '{:}_aucs.csv'.format(evaluated_dataset), index=False)
results = | pd.DataFrame(columns=['Measure', 'Value']) | pandas.DataFrame |
# force python 3.* compability
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object)
# regular imaports below:
from rikedom.security_loader import load_from_yahoo
import pandas as pd
import numpy as np
from datetime import datetime
from rikedom.simulator import TradingSimulator
import logging
logging.basicConfig(level=logging.DEBUG)
import statsmodels.api as sm
class HodrickPrescottAlgorithm(TradingSimulator):
def __init__(self):
super(HodrickPrescottAlgorithm, self).__init__()
self.cash = 10000 #sek
self.stock_assets = {}
self.buy_sell_hold_signal = None
self.data = None
def run_algorithm(self, interesting_stocks, start=datetime(2014, 2, 1), end=datetime.now() ):
logging.info('run_algorithm begin {}'.format(locals()))
self.interesting_stocks = interesting_stocks
# get data from yahoo
self.data = load_from_yahoo(stocks=self.interesting_stocks, indexes={}, start=start, end=end)
logging.debug('done loading from yahoo. {} {} {}'.format(self.interesting_stocks, start, end))
self.buy_sell_hold_signal = pd.DataFrame(index=self.data.index, columns=['buy_sell_hold_signal'])
self.recorder = pd.DataFrame(index=self.data.index, columns=self.interesting_stocks)
logging.debug('starting to run algo...')
self.perform_simulation()
logging.debug('done running algo')
def step(self, available_data, simulated_date):
try: self.i += 1
except: self.i = 1
logging.debug(self.i),
if self.i < 3:
#pass
return
if simulated_date == | pd.Timestamp('2014-10-15 00:00:00+00:00') | pandas.Timestamp |
__author__ = 'qchasserieau'
import json
import time
import warnings
from random import random
import numpy as np
import pandas as pd
import pyproj
import requests
import shapely
from tqdm import tqdm
try:
from geopy.distance import geodesic # works for geopy version >=2
except ImportError:
warnings.warn('Your geopy version is <2 while the latest available version is >=2', FutureWarning)
from geopy.distance import vincenty as geodesic # works for geopy version <2
from syspy.spatial import spatial
wgs84 = pyproj.Proj("EPSG:4326")
def dist_from_row(row, projection=wgs84):
"""
Uses vincenty formula to calculate the euclidean distances of an origin-destination pair.
:param row: a pd.Series containing the coordinates of the origin and the destination
:type row: pd.Series
:param projection: projection of the zoning
:type projection: pyproj.Proj
:return: euclidean_distance: euclidean distance of the origin-destination
:rtype: int
"""
coordinates_origin = (pyproj.transform(projection, wgs84, row['x_origin'], row['y_origin']))
coordinates_origin = (coordinates_origin[1], coordinates_origin[0])
coordinates_destination = (pyproj.transform(projection, wgs84, row['x_destination'], row['y_destination']))
coordinates_destination = (coordinates_destination[1], coordinates_destination[0])
return geodesic(coordinates_origin, coordinates_destination).m
def euclidean(zones, coordinates_unit='degree', projection=wgs84, epsg=False, method='numpy', origins=False, destinations=False,
latitude=False, longitude=False, intrazonal=False):
"""
Calculates the euclidean distances between each origin-destination pair of a zoning.
If the coordinates are in degree, the Vincenty formula is used.
:param zones: a shape dataframe containing the geometries (polygons) of the zoning
:type zones: pd.DataFrame
:param coordinates_unit: degree or meter
:type coordinates_unit: str
:param origins: a list of id of the zones from which the euclidean distance is needed
:type origins: list
:param destinations: a list of id of the zones to which the euclidean distance is needed
:type destination: list
:param method: 'numpy' or 'vincenty' numpy is faster but only handles wgs84 epsg 4326
:type method: str
:param projection: projection of the zoning
:type projection: pyproj.Proj
:param epsg: epsg code of the projection, if given, the projection arg is overwritten
:type projection: int or str
:param intrazonal: (bool), if True a non-zero intrazonal distance is computed.
In this case an intrazonal projection system must be provided
:return: euclidean_distance_dataframe: a pd.DataFrame with the coordinates of the centroids
and the euclidean distances between the zones
:rtype: pd.DataFrame
"""
projection = pyproj.Proj("+init=EPSG:" + str(epsg)) if epsg else projection
if 'geometry' in zones.columns:
z = zones[['geometry']].copy()
z['x'] = z['geometry'].apply(lambda g: g.centroid.coords[0][0])
z['y'] = z['geometry'].apply(lambda g: g.centroid.coords[0][1])
z.drop(['geometry'], axis=1, inplace=True)
elif bool(latitude) & bool(longitude):
z = zones[[latitude, longitude]].copy()
z['x'] = z[longitude]
z['y'] = z[latitude]
else:
print('If the DataFrame has no "geometry" field, longitude and latitude should be provided')
# zones_destination = zones_destination if zones_destination
iterables = [zones.index] * 2
od = pd.DataFrame(index=pd.MultiIndex.from_product(iterables, names=['origin', 'destination'])).reset_index()
od = pd.merge(od, z, left_on='origin', right_index=True)
od = pd.merge(od, z, left_on='destination', right_index=True, suffixes=['_origin', '_destination'])
if origins:
od = od[od['origin'].isin(origins)]
if destinations:
od = od[od['destination'].isin(destinations)]
# Compute distance
if coordinates_unit == 'degree':
if method == 'numpy':
columns = ['x_origin', 'y_origin', 'x_destination', 'y_destination']
od['euclidean_distance'] = get_distance_from_lon_lat_in_m(*[od[s] for s in columns])
else:
od['euclidean_distance'] = od.apply(dist_from_row, axis=1, args={projection})
elif coordinates_unit == 'meter':
od['euclidean_distance'] = np.sqrt(
(od['x_origin'] - od['x_destination'])**2
+ (od['y_origin'] - od['y_destination'])**2
)
else:
raise('Invalid coordinates_unit.')
if intrazonal:
for i in od.index:
if od['origin'][i] == od['destination'][i]:
od['euclidean_distance'][i] = np.sqrt(zones['area'][od['origin'][i]]) / 2
return od[['origin', 'destination', 'euclidean_distance', 'x_origin', 'y_origin', 'x_destination', 'y_destination']]
# google maps ################################################
def all_skim_matrix(zones=None, token=None, od_matrix=None, coordinates_unit='degree', **skim_matrix_kwargs):
if od_matrix is not None:
df = od_matrix.copy()
else:
df = euclidean(zones, coordinates_unit=coordinates_unit)
try:
assert token is not None
if isinstance(token, str):
token = [token, token] # il semble que l'on vide trop tôt la pile
t = token.pop()
print('Building driving skims matrix with Google API.')
skim_lists = []
rows = tqdm(list(df.iterrows()), 'skim matrix')
for index, row in rows:
computed = False
while computed is False and token:
try:
skim_lists.append(
driving_skims_from_row(
row,
t,
**skim_matrix_kwargs
)
)
computed = True
except TokenError as e:
print(e)
try:
t = token.pop()
print('Popped:', t)
except Exception:
print('Could not complete the skim matrix computation: not enough credentials.')
df[['distance', 'duration', 'duration_in_traffic']] = pd.DataFrame(skim_lists)
print('Done')
except IndexError as e:
print('Exception [%s] occured' % e)
print('WARNING: the build of the real skim matrix has failed.')
df[['distance', 'duration']] = df.apply(
pseudo_driving_skims_from_row, args=[token], axis=1)
print('A random one has been generated instead to allow testing of the next steps.')
return df
def skim_matrix(zones, token, n_clusters, coordinates_unit='degree', skim_matrix_kwargs={}):
clusters, cluster_series = spatial.zone_clusters(zones, n_clusters, 1e-9)
cluster_euclidean = all_skim_matrix(
clusters,
token,
coordinates_unit=coordinates_unit,
**skim_matrix_kwargs
)
df = euclidean(zones, coordinates_unit=coordinates_unit)
df = pd.merge(
df,
pd.DataFrame(cluster_series),
left_on='origin',
right_index=True)
df = pd.merge(
df,
pd.DataFrame(cluster_series),
left_on='destination',
right_index=True,
suffixes=['_origin', '_destination'])
df = pd.merge(
df,
cluster_euclidean.rename(
columns={
'origin': 'cluster_origin',
'destination': 'cluster_destination',
'distance': 'cluster_distance',
'duration': 'cluster_duration'
}
),
on=['cluster_origin', 'cluster_destination'],
suffixes=['', '_cluster']
)
df['distance_rate'] = (
df['euclidean_distance'] / df['euclidean_distance_cluster']
).fillna(0)
df['distance'] = df['cluster_distance'] * df['distance_rate']
df['duration'] = df['cluster_duration'] * df['distance_rate']
euclidean_to_path_length = 1 / (df['euclidean_distance_cluster'] / df['cluster_distance']).mean()
euclidean_speed = (df['euclidean_distance_cluster'] / df['duration']).mean()
df.loc[df['euclidean_distance_cluster'] == 0, 'duration'] = df['euclidean_distance'] / euclidean_speed
df.loc[df['euclidean_distance_cluster'] == 0, 'distance'] = df['euclidean_distance'] * euclidean_to_path_length
return df.fillna(0)
def in_url(coordinates):
"""
:param coordinates: list of coordinates [longitude, latitude]
:type coordinates: list
:return: in_url_coordninates
:rtype: str
"""
return str(coordinates[1]) + ',' + str(coordinates[0])
class TokenError(Exception):
def __init__(self, message='out of credentials'):
# Call the base class constructor with the parameters it needs
super(TokenError, self).__init__(message)
def driving_skims_from_coordinate_couple(
origin_coordinates,
destination_coordinates,
token,
timestamp=time.time(),
errors='ignore',
proxy=None
):
"""
Requests google maps distancematrix API with a couple of coordinates. Returns the road skims of the trip.
:param origin_coordinates: origin coordinates in wgs84 EPSG 4326
:type origin_coordinates: list
:param destination_coordinates: destination coordinates in wgs84 EPSG 4326
:type destination_coordinates: list
:param token: Google distancematrix API token (provided by Google when politely asked)
:type token: str
:param timestamp: timestamp of the very period to investigate
:type timestamp: timestamp
:return: skim_series: a pd.Series with the duration and the distance of the trip
:rtype: pd.Series
"""
api_url = "https://maps.googleapis.com/maps/api/distancematrix/json?"
proto_url = api_url + "origins={0}&destinations={1}"
proto_url += "&mode=driving&language=en-EN&sensor=false&departure_time={2}&trafic_model=pessimistic&key={3}"
url = proto_url.format(in_url(origin_coordinates), in_url(destination_coordinates), timestamp, token)
print(url)
try:
# Call to the proxy here
if proxy is not None:
data = {
'latitude_origin': origin_coordinates[1],
'longitude_origin': origin_coordinates[0],
'latitude_destination': destination_coordinates[1],
'longitude_destination': destination_coordinates[0],
'timestamp': int(timestamp),
'token': token
}
resp = proxy.get(**data) # get the json string
if proxy.get_status != 0: # Not found in the db
resp_json = json.loads(resp)
if resp_json["status"] == 'OK': # Itinerary computation done
proxy.populate(resp=resp, **data)
proxy.insert()
element = resp_json['rows'][0]['elements'][0]
else:
raise TokenError
else:
element = json.loads(resp)['rows'][0]['elements'][0]
else:
element = json.loads(requests.get(url).text)['rows'][0]['elements'][0]
try:
duration_in_traffic = element['duration_in_traffic']['value']
except KeyError:
duration_in_traffic = np.nan
return pd.Series(
{
'duration': element['duration']['value'],
'distance': element['distance']['value'],
'duration_in_traffic': duration_in_traffic,
}
)
except (KeyError): # Exception
# duration_in_traffic may not be provided
assert(errors == 'ignore'), 'Token probably out of credentials.'
return pd.Series({
'duration': np.nan,
'distance': np.nan,
'duration_in_traffic': np.nan
})
def driving_skims_from_row(
row,
token,
projection=wgs84,
timestamp=time.time(),
**skim_matrix_kwargs
):
time.sleep(0.1)
origin_coordinates = pyproj.transform(
projection,
wgs84,
row['x_origin'],
row['y_origin']
)
destination_coordinates = pyproj.transform(
projection,
wgs84,
row['x_destination'],
row['y_destination']
)
driving_skims = driving_skims_from_coordinate_couple(
origin_coordinates,
destination_coordinates,
token,
timestamp,
**skim_matrix_kwargs
)
return driving_skims
def pseudo_driving_skims_from_row(
row,
token,
projection=wgs84,
timestamp=time.time()
):
random_distance = 1000 * random()
random_distance_factor = 1.3 + random() / 5
random_duration_factor = 0.3 + random() / 20
distance = random_distance + get_distance_from_row_in_m(row) * random_distance_factor
duration = distance * random_duration_factor
return pd.Series({'distance': distance, 'duration': duration})
def get_distance_from_row_in_m(row):
return get_distance_from_lon_lat_in_m(*list(row[
['x_origin', 'y_origin', 'x_destination', 'y_destination']].values))
def get_distance_from_lon_lat_in_m(lon1, lat1, lon2, lat2):
r = 6371 # Radius of the earth in km
d_lat = deg_to_rad(lat2 - lat1) # deg2rad user defined
d_lon = deg_to_rad(lon2 - lon1)
a = np.sin(d_lat / 2) * np.sin(d_lat / 2) + \
np.cos(deg_to_rad(lat1)) * np.cos(deg_to_rad(lat2)) * \
np.sin(d_lon / 2) * np.sin(d_lon / 2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
d = r * c # Distance in km
return d * 1000
def deg_to_rad(deg):
return deg * (np.pi / 180)
def add_coordinates(dataframe):
df = dataframe.copy()
df['coords'] = df['geometry'].apply(lambda g: g.coords)
df['x_origin'] = df['coords'].apply(lambda c: c[0][0])
df['y_origin'] = df['coords'].apply(lambda c: c[0][1])
df['x_destination'] = df['coords'].apply(lambda c: c[-1][0])
df['y_destination'] = df['coords'].apply(lambda c: c[-1][1])
return df
def drop_coordinates(dataframe):
return dataframe.drop(
['coords', 'x_origin', 'x_destination', 'y_origin', 'y_destination'],
axis=1,
errors='ignore'
)
def distance_from_geometry(geometry_series, projection=wgs84, method='numpy'):
df = pd.DataFrame(geometry_series)
df.columns = ['geometry']
df = add_coordinates(df)
if method == 'numpy' and projection == wgs84:
cols = ['x_origin', 'y_origin', 'x_destination', 'y_destination']
df['distance'] = get_distance_from_lon_lat_in_m(*[df[s] for s in cols])
else:
df['distance'] = df.apply(dist_from_row, axis=1)
return df['distance']
def a_b_from_geometry(geometry):
boundary = geometry.envelope.boundary
a = shapely.geometry.linestring.LineString(list(boundary.coords)[0:2])
b = shapely.geometry.linestring.LineString(list(boundary.coords)[1:3])
return pd.Series([a, b])
def area_factor(geometry_series):
df = | pd.DataFrame(geometry_series) | pandas.DataFrame |
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#path of the data file- path
data = pd.read_csv(path)
data = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
# Creating series from numpy array (1D)
arr_series = np.array([1, 2, 3, 4, 5, 6, 6, 7])
ex_series = | pd.Series(arr_series) | pandas.Series |
import pandas as pd
import numpy as np
import csv
from pathlib import Path
import questionary
df_2017 = pd.read_csv(
Path('2017_data.csv'),
)
final_2017_df = pd.DataFrame()
final_2017_df['BTC_Monthly_Close'] = df_2017['price_close']
final_2017_df['ETH_Monthly_Close'] = df_2017['price_close.1']
final_2017_df['ITC_Monthly_Close'] = df_2017['price_close.2']
final_2017_df['USDT_Monthly_Close'] = df_2017['price_close.3']
final_2017_df['XLM_Monthly_Close'] = df_2017['price_close.4']
final_2017_df['XRP_Monthly_Close'] = df_2017['price_close.5']
final_2017_df['ZEC_Monthly_Close'] = df_2017['price_close.6']
final_2017_df['DASH_Monthly_Close'] = df_2017['price_close.7']
monthly_2017_returns = pd.DataFrame()
monthly_2017_returns['BTC'] = final_2017_df['BTC_Monthly_Close'].pct_change().dropna()
monthly_2017_returns['ETH']= final_2017_df['ETH_Monthly_Close'].pct_change().dropna()
monthly_2017_returns['ITC'] = final_2017_df['ITC_Monthly_Close'].pct_change().dropna()
monthly_2017_returns['USDT'] = final_2017_df['USDT_Monthly_Close'].pct_change().dropna()
monthly_2017_returns['XLM'] = final_2017_df['XLM_Monthly_Close'].pct_change().dropna()
monthly_2017_returns['XRP'] = final_2017_df['XRP_Monthly_Close'].pct_change().dropna()
monthly_2017_returns['ZEC'] = final_2017_df['ZEC_Monthly_Close'].pct_change().dropna()
monthly_2017_returns['DASH'] = final_2017_df['DASH_Monthly_Close'].pct_change().dropna()
market_var_2017 = final_2017_df['BTC_Monthly_Close'].var()
market_cov_2017 = final_2017_df['BTC_Monthly_Close'].cov(final_2017_df['BTC_Monthly_Close'])
eth_cov_2017 = monthly_2017_returns['ETH'].cov(monthly_2017_returns['BTC'])
itc_cov_2017 = monthly_2017_returns['ITC'].cov(monthly_2017_returns['BTC'])
usdt_cov_2017 = monthly_2017_returns['USDT'].cov(monthly_2017_returns['BTC'])
xlm_cov_2017 = monthly_2017_returns['XLM'].cov(monthly_2017_returns['BTC'])
xrp_cov_2017 = monthly_2017_returns['XRP'].cov(monthly_2017_returns['BTC'])
zec_cov_2017 = monthly_2017_returns['ZEC'].cov(monthly_2017_returns['BTC'])
dash_cov_2017 = monthly_2017_returns['DASH'].cov(monthly_2017_returns['BTC'])
btc_beta_2017 = market_cov_2017 / market_var_2017
eth_beta_2017 = eth_cov_2017 / market_var_2017
itc_beta_2017 = itc_cov_2017 / market_var_2017
usdt_beta_2017 = usdt_cov_2017 / market_var_2017
xlm_beta_2017= xlm_cov_2017 / market_var_2017
xrp_beta_2017 = xrp_cov_2017 / market_var_2017
zec_beta_2017 = zec_cov_2017 / market_var_2017
dash_beta_2017 = dash_cov_2017 / market_var_2017
# ----------------------------------------------------------
df_2018 = pd.read_csv(
Path('2018_data.csv'),
)
final_2018_df = | pd.DataFrame() | pandas.DataFrame |
from .gamedata import getPlayers, getPointLog, getMatches, getUnplayed, getDisqualified
from .pwr import PWRsystems
from .regression import Regression
from .simulate import simulateBracket, simulateMatch, simulateGamelog
from .players import Player, Players
from .tiebreak import getPlayoffSeeding
from .util import playoff_series_ids
from joblib import Parallel, delayed
import pandas as pd
import numpy as np
class Simulate(object):
def __init__(self, n_sims, pwr_systems=None, rank_adj=0.5, st_dev=1.6, season=2):
self.n_sims = n_sims
self.rank_adj = rank_adj
self.st_dev = st_dev
self.season = season
if pwr_systems is None:
self.pwr_systems = PWRsystems()
else:
self.pwr_systems = pwr_systems
self.players = getPlayers(season)
self.points = getPointLog(season)
self.played = getMatches(season)
self.unplayed = getUnplayed(season)
self.dq = getDisqualified(season)
for system in self.pwr_systems.systems:
system.calculate(gamelog=self.points, season=season)
self.regress(system)
self.pwr = self.pwr_systems.combine()
self.regress(self.pwr)
def run(self, parallel=True, combine=True):
simulations = []
if parallel:
simulations = Parallel(n_jobs=-1)(delayed(self.simulate)() for i in range(self.n_sims))
else:
for i in range(self.n_sims):
simulations.append(self.simulate())
self.simulations = Simulations(simulations, combine)
return self
def playoffs(self, reindex=False):
if self.simulations.combined:
return self.copied(self.simulations.playoffs.copy(), reindex)
def regularseason(self, reindex=False):
if self.simulations.combined:
return self.copied(self.simulations.regularseason.copy(), reindex)
def standings(self, reindex=False):
if self.simulations.combined:
return self.copied(self.simulations.standings.copy(), reindex)
def copied(self, df, reindex):
if reindex:
return df.reset_index(level='Simulation')
else:
return df
def simulate(self):
return Simulation(self)
def regress(self, system):
if system.regress_to is not None:
if type(system.regress_to) is not Regression:
system.regress_to = Regression(to=system.regress_to)
system.regress(system.values)
class Simulation(object):
def __init__(self, sim):
self.rankings = sim.pwr.values.copy()
pwr_adjustments = np.random.normal(0, sim.rank_adj, self.rankings.shape[0])
self.rankings['PWR'] = self.rankings['PWR'].values - pwr_adjustments
if sim.unplayed.empty:
self.regularseason = sim.played
else:
simulated = simulateGamelog(sim.unplayed, self.rankings, sim.st_dev, sim.season)
self.regularseason = pd.concat([sim.played, simulated], ignore_index=True)
adjusted = pd.DataFrame([x + [1] for x in self.regularseason[['Winner','Loser','W Pts']].values.tolist()] +
[x + [0] for x in self.regularseason[['Loser','Winner','L Pts']].values.tolist()],
columns=['Player','Opponent','Pts','Wins'])
df = pd.merge(pd.merge(adjusted, sim.players, on='Player'),
sim.players.rename({'Player':'Opponent','Division':'OppDivision'}, axis=1), on='Opponent')
self.standings = pd.merge(df.groupby(['Player','Division']).agg({'Pts':'sum','Wins':'sum'}).reset_index(),
self.rankings, on='Player')
df['Wins'] = np.where(np.isin(df['Player'].values, sim.dq), 0, df['Wins'].values)
self.seeding = getPlayoffSeeding(df)
self.playoffs = self.simulatePlayoffs(sim)
self.standings = | pd.merge(self.standings, self.seeding, how='left', on='Player', suffixes=('', '_')) | pandas.merge |
'''
Set of common transformations
'''
import numpy as np
import pandas as pd
def nconst_to_float(series):
'''
Transform the nconst column to float.
The type float64 support NaN values in Pandas.
'''
return pd.to_numeric(series.str.replace('nm', ''), downcast='unsigned')
def tconst_to_float(series):
'''
Transform the tconst column to float.
The type float64 support NaN values in Pandas.
'''
return pd.to_numeric(series.str.replace('tt', ''), downcast='unsigned')
def expand_rows_using_repeat(df, target_column, separator):
'''
Expand the rows of a DataFrame splitting values of the target column using numpy repeat.
'''
target_df = df[target_column].str.split(separator)
lens = [len(item) for item in target_df]
other_df = df[df.columns.difference([target_column])]
other_array = np.repeat(other_df.values, lens, axis=0)
# Put each target element in a row
target_array = np.concatenate(target_df.values).reshape(-1, 1)
data = np.concatenate((other_array, target_array), axis=1)
columns = np.append(other_df.columns.values, target_column)
final_df = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
pd.Series(index=dr, data=temp_air),
pd.Series(index=dr, data=wind_speed))
assert_series_equal(out, pd.Series(index=dr, data=expected))
# now use optional arguments
temp_model_params.update({'transmittance_absorptance': 0.8,
'array_height': 2,
'mount_standoff': 2.0})
expected = 60.477703576
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed,
effective_irradiance=1100.)
assert_allclose(out, expected)
def test_PVSystem_noct_celltemp_error():
poa_global, temp_air, wind_speed, module_efficiency = (1000., 25., 1., 0.2)
temp_model_params = {'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
with pytest.raises(KeyError):
system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_functions(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad_one = pd.Series(1000, index=times)
irrad_two = pd.Series(500, index=times)
temp_air = pd.Series(25, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system, (irrad_one, irrad_two), temp_air, wind_speed)
assert (temp_one != temp_two).all()
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_temp(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air_one = pd.Series(25, index=times)
temp_air_two = pd.Series(5, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_one, temp_air_two),
wind_speed
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_two, temp_air_one),
wind_speed
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_wind(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air = pd.Series(25, index=times)
wind_speed_one = pd.Series(1, index=times)
wind_speed_two = pd.Series(5, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_one, wind_speed_two)
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_two, wind_speed_one)
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1,), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1, 1, 1), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1,))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1, 1, 1))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_poa_length_mismatch(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, 1000, 25, 1)
def test_PVSystem_fuentes_celltemp(mocker):
noct_installed = 45
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
spy = mocker.spy(temperature, 'fuentes')
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
out = system.fuentes_celltemp(irrads, temps, winds)
assert_series_equal(spy.call_args[0][0], irrads)
assert_series_equal(spy.call_args[0][1], temps)
assert_series_equal(spy.call_args[0][2], winds)
assert spy.call_args[1]['noct_installed'] == noct_installed
assert_series_equal(out, pd.Series([52.85, 55.85, 55.85], index,
name='tmod'))
def test_PVSystem_fuentes_celltemp_override(mocker):
# test that the surface_tilt value in the cell temp calculation can be
# overridden but defaults to the surface_tilt attribute of the PVSystem
spy = mocker.spy(temperature, 'fuentes')
noct_installed = 45
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
# uses default value
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 20
# can be overridden
temp_model_params = {'noct_installed': noct_installed, 'surface_tilt': 30}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 30
def test_Array__infer_temperature_model_params():
array = pvsystem.Array(module_parameters={},
racking_model='open_rack',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'sapm']['open_rack_glass_polymer']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='freestanding',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['freestanding']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='insulated',
module_type=None)
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['insulated']
assert expected == array._infer_temperature_model_params()
def test_Array__infer_cell_type():
array = pvsystem.Array(module_parameters={})
assert array._infer_cell_type() is None
def test_calcparams_desoto(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.096], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_cec(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_cec(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
Adjust=cec_module_params['Adjust'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.0896], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_pvsyst(pvsyst_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
temp_cell = pd.Series([25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_pvsyst(
effective_irradiance,
temp_cell,
alpha_sc=pvsyst_module_params['alpha_sc'],
gamma_ref=pvsyst_module_params['gamma_ref'],
mu_gamma=pvsyst_module_params['mu_gamma'],
I_L_ref=pvsyst_module_params['I_L_ref'],
I_o_ref=pvsyst_module_params['I_o_ref'],
R_sh_ref=pvsyst_module_params['R_sh_ref'],
R_sh_0=pvsyst_module_params['R_sh_0'],
R_s=pvsyst_module_params['R_s'],
cells_in_series=pvsyst_module_params['cells_in_series'],
EgRef=pvsyst_module_params['EgRef'])
assert_series_equal(
IL.round(decimals=3), pd.Series([0.0, 4.8200], index=times))
assert_series_equal(
I0.round(decimals=3), pd.Series([0.0, 1.47e-7], index=times))
assert_allclose(Rs, 0.500)
assert_series_equal(
Rsh.round(decimals=3), pd.Series([1000.0, 305.757], index=times))
assert_series_equal(
nNsVth.round(decimals=4), pd.Series([1.6186, 1.7961], index=times))
def test_PVSystem_calcparams_desoto(cec_module_params, mocker):
mocker.spy(pvsystem, 'calcparams_desoto')
module_parameters = cec_module_params.copy()
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
system = pvsystem.PVSystem(module_parameters=module_parameters)
effective_irradiance = np.array([0, 800])
temp_cell = 25
IL, I0, Rs, Rsh, nNsVth = system.calcparams_desoto(effective_irradiance,
temp_cell)
pvsystem.calcparams_desoto.assert_called_once_with(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=module_parameters['EgRef'],
dEgdT=module_parameters['dEgdT'])
assert_allclose(IL, np.array([0.0, 6.036]), atol=1)
assert_allclose(I0, 2.0e-9, atol=1.0e-9)
assert_allclose(Rs, 0.1, atol=0.1)
assert_allclose(Rsh, np.array([np.inf, 20]), atol=1)
assert_allclose(nNsVth, 0.5, atol=0.1)
def test_PVSystem_calcparams_pvsyst(pvsyst_module_params, mocker):
mocker.spy(pvsystem, 'calcparams_pvsyst')
module_parameters = pvsyst_module_params.copy()
system = pvsystem.PVSystem(module_parameters=module_parameters)
effective_irradiance = np.array([0, 800])
temp_cell = np.array([25, 50])
IL, I0, Rs, Rsh, nNsVth = system.calcparams_pvsyst(effective_irradiance,
temp_cell)
pvsystem.calcparams_pvsyst.assert_called_once_with(
effective_irradiance,
temp_cell,
alpha_sc=pvsyst_module_params['alpha_sc'],
gamma_ref=pvsyst_module_params['gamma_ref'],
mu_gamma=pvsyst_module_params['mu_gamma'],
I_L_ref=pvsyst_module_params['I_L_ref'],
I_o_ref=pvsyst_module_params['I_o_ref'],
R_sh_ref=pvsyst_module_params['R_sh_ref'],
R_sh_0=pvsyst_module_params['R_sh_0'],
R_s=pvsyst_module_params['R_s'],
cells_in_series=pvsyst_module_params['cells_in_series'],
EgRef=pvsyst_module_params['EgRef'],
R_sh_exp=pvsyst_module_params['R_sh_exp'])
assert_allclose(IL, np.array([0.0, 4.8200]), atol=1)
assert_allclose(I0, np.array([0.0, 1.47e-7]), atol=1.0e-5)
assert_allclose(Rs, 0.5, atol=0.1)
assert_allclose(Rsh, np.array([1000, 305.757]), atol=50)
assert_allclose(nNsVth, np.array([1.6186, 1.7961]), atol=0.1)
@pytest.mark.parametrize('calcparams', [pvsystem.PVSystem.calcparams_pvsyst,
pvsystem.PVSystem.calcparams_desoto,
pvsystem.PVSystem.calcparams_cec])
def test_PVSystem_multi_array_calcparams(calcparams, two_array_system):
params_one, params_two = calcparams(
two_array_system, (1000, 500), (30, 20)
)
assert params_one != params_two
@pytest.mark.parametrize('calcparams, irrad, celltemp',
[ (f, irrad, celltemp)
for f in (pvsystem.PVSystem.calcparams_desoto,
pvsystem.PVSystem.calcparams_cec,
pvsystem.PVSystem.calcparams_pvsyst)
for irrad, celltemp in [(1, (1, 1)), ((1, 1), 1)]])
def test_PVSystem_multi_array_calcparams_value_error(
calcparams, irrad, celltemp, two_array_system):
with pytest.raises(ValueError,
match='Length mismatch for per-array parameter'):
calcparams(two_array_system, irrad, celltemp)
@pytest.fixture(params=[
{ # Can handle all python scalar inputs
'Rsh': 20.,
'Rs': 0.1,
'nNsVth': 0.5,
'I': 3.,
'I0': 6.e-7,
'IL': 7.,
'V_expected': 7.5049875193450521
},
{ # Can handle all rank-0 array inputs
'Rsh': np.array(20.),
'Rs': np.array(0.1),
'nNsVth': np.array(0.5),
'I': np.array(3.),
'I0': np.array(6.e-7),
'IL': np.array(7.),
'V_expected': np.array(7.5049875193450521)
},
{ # Can handle all rank-1 singleton array inputs
'Rsh': np.array([20.]),
'Rs': np.array([0.1]),
'nNsVth': np.array([0.5]),
'I': np.array([3.]),
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'V_expected': np.array([7.5049875193450521])
},
{ # Can handle all rank-1 non-singleton array inputs with infinite shunt
# resistance, Rsh=inf gives V=Voc=nNsVth*(np.log(IL + I0) - np.log(I0)
# at I=0
'Rsh': np.array([np.inf, 20.]),
'Rs': np.array([0.1, 0.1]),
'nNsVth': np.array([0.5, 0.5]),
'I': np.array([0., 3.]),
'I0': np.array([6.e-7, 6.e-7]),
'IL': np.array([7., 7.]),
'V_expected': np.array([0.5*(np.log(7. + 6.e-7) - np.log(6.e-7)),
7.5049875193450521])
},
{ # Can handle mixed inputs with a rank-2 array with infinite shunt
# resistance, Rsh=inf gives V=Voc=nNsVth*(np.log(IL + I0) - np.log(I0)
# at I=0
'Rsh': np.array([[np.inf, np.inf], [np.inf, np.inf]]),
'Rs': np.array([0.1]),
'nNsVth': np.array(0.5),
'I': 0.,
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'V_expected': 0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))*np.ones((2, 2))
},
{ # Can handle ideal series and shunt, Rsh=inf and Rs=0 give
# V = nNsVth*(np.log(IL - I + I0) - np.log(I0))
'Rsh': np.inf,
'Rs': 0.,
'nNsVth': 0.5,
'I': np.array([7., 7./2., 0.]),
'I0': 6.e-7,
'IL': 7.,
'V_expected': np.array([0., 0.5*(np.log(7. - 7./2. + 6.e-7) -
np.log(6.e-7)), 0.5*(np.log(7. + 6.e-7) -
np.log(6.e-7))])
},
{ # Can handle only ideal series resistance, no closed form solution
'Rsh': 20.,
'Rs': 0.,
'nNsVth': 0.5,
'I': 3.,
'I0': 6.e-7,
'IL': 7.,
'V_expected': 7.804987519345062
},
{ # Can handle all python scalar inputs with big LambertW arg
'Rsh': 500.,
'Rs': 10.,
'nNsVth': 4.06,
'I': 0.,
'I0': 6.e-10,
'IL': 1.2,
'V_expected': 86.320000493521079
},
{ # Can handle all python scalar inputs with bigger LambertW arg
# 1000 W/m^2 on a Canadian Solar 220M with 20 C ambient temp
# github issue 225 (this appears to be from PR 226 not issue 225)
'Rsh': 190.,
'Rs': 1.065,
'nNsVth': 2.89,
'I': 0.,
'I0': 7.05196029e-08,
'IL': 10.491262,
'V_expected': 54.303958833791455
},
{ # Can handle all python scalar inputs with bigger LambertW arg
# 1000 W/m^2 on a Canadian Solar 220M with 20 C ambient temp
# github issue 225
'Rsh': 381.68,
'Rs': 1.065,
'nNsVth': 2.681527737715915,
'I': 0.,
'I0': 1.8739027472625636e-09,
'IL': 5.1366949999999996,
'V_expected': 58.19323124611128
},
{ # Verify mixed solution type indexing logic
'Rsh': np.array([np.inf, 190., 381.68]),
'Rs': 1.065,
'nNsVth': np.array([2.89, 2.89, 2.681527737715915]),
'I': 0.,
'I0': np.array([7.05196029e-08, 7.05196029e-08, 1.8739027472625636e-09]),
'IL': np.array([10.491262, 10.491262, 5.1366949999999996]),
'V_expected': np.array([2.89*np.log1p(10.491262/7.05196029e-08),
54.303958833791455, 58.19323124611128])
}])
def fixture_v_from_i(request):
return request.param
@pytest.mark.parametrize(
'method, atol', [('lambertw', 1e-11), ('brentq', 1e-11), ('newton', 1e-8)]
)
def test_v_from_i(fixture_v_from_i, method, atol):
# Solution set loaded from fixture
Rsh = fixture_v_from_i['Rsh']
Rs = fixture_v_from_i['Rs']
nNsVth = fixture_v_from_i['nNsVth']
I = fixture_v_from_i['I']
I0 = fixture_v_from_i['I0']
IL = fixture_v_from_i['IL']
V_expected = fixture_v_from_i['V_expected']
V = pvsystem.v_from_i(Rsh, Rs, nNsVth, I, I0, IL, method=method)
assert(isinstance(V, type(V_expected)))
if isinstance(V, type(np.ndarray)):
assert(isinstance(V.dtype, type(V_expected.dtype)))
assert(V.shape == V_expected.shape)
assert_allclose(V, V_expected, atol=atol)
def test_i_from_v_from_i(fixture_v_from_i):
# Solution set loaded from fixture
Rsh = fixture_v_from_i['Rsh']
Rs = fixture_v_from_i['Rs']
nNsVth = fixture_v_from_i['nNsVth']
I = fixture_v_from_i['I']
I0 = fixture_v_from_i['I0']
IL = fixture_v_from_i['IL']
V = fixture_v_from_i['V_expected']
# Convergence criteria
atol = 1.e-11
I_expected = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL,
method='lambertw')
assert_allclose(I, I_expected, atol=atol)
I = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL)
assert(isinstance(I, type(I_expected)))
if isinstance(I, type(np.ndarray)):
assert(isinstance(I.dtype, type(I_expected.dtype)))
assert(I.shape == I_expected.shape)
assert_allclose(I, I_expected, atol=atol)
@pytest.fixture(params=[
{ # Can handle all python scalar inputs
'Rsh': 20.,
'Rs': 0.1,
'nNsVth': 0.5,
'V': 7.5049875193450521,
'I0': 6.e-7,
'IL': 7.,
'I_expected': 3.
},
{ # Can handle all rank-0 array inputs
'Rsh': np.array(20.),
'Rs': np.array(0.1),
'nNsVth': np.array(0.5),
'V': np.array(7.5049875193450521),
'I0': np.array(6.e-7),
'IL': np.array(7.),
'I_expected': np.array(3.)
},
{ # Can handle all rank-1 singleton array inputs
'Rsh': np.array([20.]),
'Rs': np.array([0.1]),
'nNsVth': np.array([0.5]),
'V': np.array([7.5049875193450521]),
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'I_expected': np.array([3.])
},
{ # Can handle all rank-1 non-singleton array inputs with a zero
# series resistance, Rs=0 gives I=IL=Isc at V=0
'Rsh': np.array([20., 20.]),
'Rs': np.array([0., 0.1]),
'nNsVth': np.array([0.5, 0.5]),
'V': np.array([0., 7.5049875193450521]),
'I0': np.array([6.e-7, 6.e-7]),
'IL': np.array([7., 7.]),
'I_expected': np.array([7., 3.])
},
{ # Can handle mixed inputs with a rank-2 array with zero series
# resistance, Rs=0 gives I=IL=Isc at V=0
'Rsh': np.array([20.]),
'Rs': np.array([[0., 0.], [0., 0.]]),
'nNsVth': np.array(0.5),
'V': 0.,
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'I_expected': np.array([[7., 7.], [7., 7.]])
},
{ # Can handle ideal series and shunt, Rsh=inf and Rs=0 give
# V_oc = nNsVth*(np.log(IL + I0) - np.log(I0))
'Rsh': np.inf,
'Rs': 0.,
'nNsVth': 0.5,
'V': np.array([0., 0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))/2.,
0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))]),
'I0': 6.e-7,
'IL': 7.,
'I_expected': np.array([7., 7. - 6.e-7*np.expm1((np.log(7. + 6.e-7) -
np.log(6.e-7))/2.), 0.])
},
{ # Can handle only ideal shunt resistance, no closed form solution
'Rsh': np.inf,
'Rs': 0.1,
'nNsVth': 0.5,
'V': 7.5049875193450521,
'I0': 6.e-7,
'IL': 7.,
'I_expected': 3.2244873645510923
}])
def fixture_i_from_v(request):
return request.param
@pytest.mark.parametrize(
'method, atol', [('lambertw', 1e-11), ('brentq', 1e-11), ('newton', 1e-11)]
)
def test_i_from_v(fixture_i_from_v, method, atol):
# Solution set loaded from fixture
Rsh = fixture_i_from_v['Rsh']
Rs = fixture_i_from_v['Rs']
nNsVth = fixture_i_from_v['nNsVth']
V = fixture_i_from_v['V']
I0 = fixture_i_from_v['I0']
IL = fixture_i_from_v['IL']
I_expected = fixture_i_from_v['I_expected']
I = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL, method=method)
assert(isinstance(I, type(I_expected)))
if isinstance(I, type(np.ndarray)):
assert(isinstance(I.dtype, type(I_expected.dtype)))
assert(I.shape == I_expected.shape)
assert_allclose(I, I_expected, atol=atol)
def test_PVSystem_i_from_v(mocker):
system = pvsystem.PVSystem()
m = mocker.patch('pvlib.pvsystem.i_from_v', autospec=True)
args = (20, 0.1, 0.5, 7.5049875193450521, 6e-7, 7)
system.i_from_v(*args)
m.assert_called_once_with(*args)
def test_i_from_v_size():
with pytest.raises(ValueError):
pvsystem.i_from_v(20, [0.1] * 2, 0.5, [7.5] * 3, 6.0e-7, 7.0)
with pytest.raises(ValueError):
pvsystem.i_from_v(20, [0.1] * 2, 0.5, [7.5] * 3, 6.0e-7, 7.0,
method='brentq')
with pytest.raises(ValueError):
pvsystem.i_from_v(20, 0.1, 0.5, [7.5] * 3, 6.0e-7, np.array([7., 7.]),
method='newton')
def test_v_from_i_size():
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1] * 2, 0.5, [3.0] * 3, 6.0e-7, 7.0)
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1] * 2, 0.5, [3.0] * 3, 6.0e-7, 7.0,
method='brentq')
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1], 0.5, [3.0] * 3, 6.0e-7, np.array([7., 7.]),
method='newton')
def test_mpp_floats():
"""test max_power_point"""
IL, I0, Rs, Rsh, nNsVth = (7, 6e-7, .1, 20, .5)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = {'i_mp': 6.1362673597376753, # 6.1390251797935704, lambertw
'v_mp': 6.2243393757884284, # 6.221535886625464, lambertw
'p_mp': 38.194210547580511} # 38.194165464983037} lambertw
assert isinstance(out, dict)
for k, v in out.items():
assert np.isclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.isclose(v, expected[k])
def test_mpp_array():
"""test max_power_point"""
IL, I0, Rs, Rsh, nNsVth = (np.array([7, 7]), 6e-7, .1, 20, .5)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = {'i_mp': [6.1362673597376753] * 2,
'v_mp': [6.2243393757884284] * 2,
'p_mp': [38.194210547580511] * 2}
assert isinstance(out, dict)
for k, v in out.items():
assert np.allclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.allclose(v, expected[k])
def test_mpp_series():
"""test max_power_point"""
idx = ['2008-02-17T11:30:00-0800', '2008-02-17T12:30:00-0800']
IL, I0, Rs, Rsh, nNsVth = (np.array([7, 7]), 6e-7, .1, 20, .5)
IL = pd.Series(IL, index=idx)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = pd.DataFrame({'i_mp': [6.1362673597376753] * 2,
'v_mp': [6.2243393757884284] * 2,
'p_mp': [38.194210547580511] * 2},
index=idx)
assert isinstance(out, pd.DataFrame)
for k, v in out.items():
assert np.allclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.allclose(v, expected[k])
def test_singlediode_series(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell=25,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677
)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth)
assert isinstance(out, pd.DataFrame)
def test_singlediode_array():
# github issue 221
photocurrent = np.linspace(0, 10, 11)
resistance_shunt = 16
resistance_series = 0.094
nNsVth = 0.473
saturation_current = 1.943e-09
sd = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
method='lambertw')
expected = np.array([
0. , 0.54538398, 1.43273966, 2.36328163, 3.29255606,
4.23101358, 5.16177031, 6.09368251, 7.02197553, 7.96846051,
8.88220557])
assert_allclose(sd['i_mp'], expected, atol=0.01)
sd = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
expected = pvsystem.i_from_v(resistance_shunt, resistance_series, nNsVth,
sd['v_mp'], saturation_current, photocurrent,
method='lambertw')
assert_allclose(sd['i_mp'], expected, atol=0.01)
def test_singlediode_floats():
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5, method='lambertw')
expected = {'i_xx': 4.2498,
'i_mp': 6.1275,
'v_oc': 8.1063,
'p_mp': 38.1937,
'i_x': 6.7558,
'i_sc': 6.9651,
'v_mp': 6.2331,
'i': None,
'v': None}
assert isinstance(out, dict)
for k, v in out.items():
if k in ['i', 'v']:
assert v is None
else:
assert_allclose(v, expected[k], atol=1e-3)
def test_singlediode_floats_ivcurve():
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5, ivcurve_pnts=3, method='lambertw')
expected = {'i_xx': 4.2498,
'i_mp': 6.1275,
'v_oc': 8.1063,
'p_mp': 38.1937,
'i_x': 6.7558,
'i_sc': 6.9651,
'v_mp': 6.2331,
'i': np.array([6.965172e+00, 6.755882e+00, 2.575717e-14]),
'v': np.array([0., 4.05315, 8.1063])}
assert isinstance(out, dict)
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-3)
def test_singlediode_series_ivcurve(cec_module_params):
times = pd.date_range(start='2015-06-01', periods=3, freq='6H')
effective_irradiance = pd.Series([0.0, 400.0, 800.0], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell=25,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth, ivcurve_pnts=3,
method='lambertw')
expected = OrderedDict([('i_sc', array([0., 3.01054475, 6.00675648])),
('v_oc', array([0., 9.96886962, 10.29530483])),
('i_mp', array([0., 2.65191983, 5.28594672])),
('v_mp', array([0., 8.33392491, 8.4159707])),
('p_mp', array([0., 22.10090078, 44.48637274])),
('i_x', array([0., 2.88414114, 5.74622046])),
('i_xx', array([0., 2.04340914, 3.90007956])),
('v', array([[0., 0., 0.],
[0., 4.98443481, 9.96886962],
[0., 5.14765242, 10.29530483]])),
('i', array([[0., 0., 0.],
[3.01079860e+00, 2.88414114e+00,
3.10862447e-14],
[6.00726296e+00, 5.74622046e+00,
0.00000000e+00]]))])
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-2)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth, ivcurve_pnts=3)
expected['i_mp'] = pvsystem.i_from_v(Rsh, Rs, nNsVth, out['v_mp'], I0, IL,
method='lambertw')
expected['v_mp'] = pvsystem.v_from_i(Rsh, Rs, nNsVth, out['i_mp'], I0, IL,
method='lambertw')
expected['i'] = pvsystem.i_from_v(Rsh, Rs, nNsVth, out['v'].T, I0, IL,
method='lambertw').T
expected['v'] = pvsystem.v_from_i(Rsh, Rs, nNsVth, out['i'].T, I0, IL,
method='lambertw').T
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-2)
def test_scale_voltage_current_power():
data = pd.DataFrame(
np.array([[2, 1.5, 10, 8, 12, 0.5, 1.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
expected = pd.DataFrame(
np.array([[6, 4.5, 20, 16, 72, 1.5, 4.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
out = pvsystem.scale_voltage_current_power(data, voltage=2, current=3)
assert_frame_equal(out, expected, check_less_precise=5)
def test_PVSystem_scale_voltage_current_power(mocker):
data = None
system = pvsystem.PVSystem(modules_per_string=2, strings_per_inverter=3)
m = mocker.patch(
'pvlib.pvsystem.scale_voltage_current_power', autospec=True)
system.scale_voltage_current_power(data)
m.assert_called_once_with(data, voltage=2, current=3)
def test_PVSystem_multi_scale_voltage_current_power(mocker):
data = (1, 2)
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(modules_per_string=2, strings=3),
pvsystem.Array(modules_per_string=3, strings=5)]
)
m = mocker.patch(
'pvlib.pvsystem.scale_voltage_current_power', autospec=True
)
system.scale_voltage_current_power(data)
m.assert_has_calls(
[mock.call(1, voltage=2, current=3),
mock.call(2, voltage=3, current=5)],
any_order=True
)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.scale_voltage_current_power(None)
def test_PVSystem_get_ac_sandia(cec_inverter_parameters, mocker):
inv_fun = mocker.spy(inverter, 'sandia')
system = pvsystem.PVSystem(
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3))
pdcs = idcs * vdcs
pacs = system.get_ac('sandia', pdcs, v_dc=vdcs)
inv_fun.assert_called_once()
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
@fail_on_pvlib_version('0.10')
def test_PVSystem_snlinverter(cec_inverter_parameters):
system = pvsystem.PVSystem(
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0,50,3))
idcs = pd.Series(np.linspace(0,11,3))
pdcs = idcs * vdcs
with pytest.warns(pvlibDeprecationWarning):
pacs = system.snlinverter(vdcs, pdcs)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
def test_PVSystem_get_ac_sandia_multi(cec_inverter_parameters, mocker):
inv_fun = mocker.spy(inverter, 'sandia_multi')
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3)) / 2
pdcs = idcs * vdcs
pacs = system.get_ac('sandia', (pdcs, pdcs), v_dc=(vdcs, vdcs))
inv_fun.assert_called_once()
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', vdcs, (pdcs, pdcs))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', vdcs, (pdcs,))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', (vdcs, vdcs), (pdcs, pdcs, pdcs))
def test_PVSystem_get_ac_pvwatts(pvwatts_system_defaults, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
out = pvwatts_system_defaults.get_ac('pvwatts', pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_defaults.inverter_parameters)
assert out < pdc
def test_PVSystem_get_ac_pvwatts_kwargs(pvwatts_system_kwargs, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
out = pvwatts_system_kwargs.get_ac('pvwatts', pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_kwargs.inverter_parameters)
assert out < pdc
def test_PVSystem_get_ac_pvwatts_multi(
pvwatts_system_defaults, pvwatts_system_kwargs, mocker):
mocker.spy(inverter, 'pvwatts_multi')
expected = [pd.Series([0.0, 48.123524, 86.400000]),
pd.Series([0.0, 45.893550, 85.500000])]
systems = [pvwatts_system_defaults, pvwatts_system_kwargs]
for base_sys, exp in zip(systems, expected):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter_parameters=base_sys.inverter_parameters,
)
pdcs = pd.Series([0., 25., 50.])
pacs = system.get_ac('pvwatts', (pdcs, pdcs))
assert_series_equal(pacs, exp)
assert inverter.pvwatts_multi.call_count == 2
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', (pdcs,))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', pdcs)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', (pdcs, pdcs, pdcs))
@pytest.mark.parametrize('model', ['sandia', 'adr', 'pvwatts'])
def test_PVSystem_get_ac_single_array_tuple_input(
model,
pvwatts_system_defaults,
cec_inverter_parameters,
adr_inverter_parameters):
vdcs = {
'sandia': pd.Series(np.linspace(0, 50, 3)),
'pvwatts': None,
'adr': pd.Series([135, 154, 390, 420, 551])
}
pdcs = {'adr': pd.Series([135, 1232, 1170, 420, 551]),
'sandia': pd.Series(np.linspace(0, 11, 3)) * vdcs['sandia'],
'pvwatts': 50}
inverter_parameters = {
'sandia': cec_inverter_parameters,
'adr': adr_inverter_parameters,
'pvwatts': pvwatts_system_defaults.inverter_parameters
}
expected = {
'adr': pd.Series([np.nan, 1161.5745, 1116.4459, 382.6679, np.nan]),
'sandia': pd.Series([-0.020000, 132.004308, 250.000000])
}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array()],
inverter_parameters=inverter_parameters[model]
)
ac = system.get_ac(p_dc=(pdcs[model],), v_dc=(vdcs[model],), model=model)
if model == 'pvwatts':
assert ac < pdcs['pvwatts']
else:
assert_series_equal(ac, expected[model])
def test_PVSystem_get_ac_adr(adr_inverter_parameters, mocker):
mocker.spy(inverter, 'adr')
system = pvsystem.PVSystem(
inverter_parameters=adr_inverter_parameters,
)
vdcs = pd.Series([135, 154, 390, 420, 551])
pdcs = pd.Series([135, 1232, 1170, 420, 551])
pacs = system.get_ac('adr', pdcs, vdcs)
assert_series_equal(pacs, pd.Series([np.nan, 1161.5745, 1116.4459,
382.6679, np.nan]))
inverter.adr.assert_called_once_with(vdcs, pdcs,
system.inverter_parameters)
def test_PVSystem_get_ac_adr_multi(adr_inverter_parameters):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter_parameters=adr_inverter_parameters,
)
pdcs = | pd.Series([135, 1232, 1170, 420, 551]) | pandas.Series |
import numpy as np
import pandas as pd
from tensorflow.keras import Input
from keras.layers.core import Dropout, Dense
from keras.layers import LSTM, Bidirectional, Concatenate
from keras.layers.embeddings import Embedding
from keras.models import Model
from tensorflow.keras.preprocessing.text import Tokenizer
from src.utils import *
from model import (do_padding,get_extra,preprocess_text,convert_cities,convert_countries)
train = pd.read_csv("data/train.csv")
test = | pd.read_csv("data/test.csv") | pandas.read_csv |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.feature_generation.elementary_arithmethics import ElementaryArithmetics
@pytest.fixture
def data_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), coef=-2.0, operator="+"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
).astype(np.float32)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
dtype=np.float32,
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_name_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A+B", "A+C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
column_names=["A+B", "A+C"],
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_mult():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0.0],
[3.0, 4.0, 5.0, 12.0, 15.0],
[6.0, 7.0, 8.0, 42.0, 48.0],
]
),
columns=["A", "B", "C", "A__*__B", "A__*__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="*"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_div():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0],
[3.0, 4.0, 5.0, 0.75, 0.59999988],
[6.0, 7.0, 8.0, 0.85714286, 0.7499999],
]
),
columns=["A", "B", "C", "A__/__B", "A__/__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="/"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), coef=-2.0, operator="+"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
).astype(np.float32)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
dtype=np.float32,
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_name_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A+B", "A+C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
column_names=["A+B", "A+C"],
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_mult_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0.0],
[3.0, 4.0, 5.0, 12.0, 15.0],
[6.0, 7.0, 8.0, 42.0, 48.0],
]
),
columns=["A", "B", "C", "A__*__B", "A__*__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="*"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_div_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0],
[3.0, 4.0, 5.0, 0.75, 0.59999988],
[6.0, 7.0, 8.0, 0.85714286, 0.7499999],
]
),
columns=["A", "B", "C", "A__/__B", "A__/__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="/"
).fit(X)
return obj, X, X_expected
def test_add_pd(data_add):
obj, X, X_expected = data_add
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_add_ks(data_add_ks):
obj, X, X_expected = data_add_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_add_pd_np(data_add):
obj, X, X_expected = data_add
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_add_ks_np(data_add_ks):
obj, X, X_expected = data_add_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_float32_add_pd(data_float32_add):
obj, X, X_expected = data_float32_add
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_add_ks_ks(data_float32_add_ks):
obj, X, X_expected = data_float32_add_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_float32_add_pd_np(data_float32_add):
obj, X, X_expected = data_float32_add
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 18:35:41 2021
@author: piyab
"""
import os
#os.chdir("D:/Saarland/NN TI/NNTI_WS2021_Project")
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
import pandas as pd
import TASK_1.py
from Task1_word_Embeddings.ipynb import *
SEED = 1234
#torch.manual_seed(SEED)
#torch.backends.cudnn.deterministic = True
#TEXT = data.Field(tokenize = 'spacy', tokenizer_language = 'en_core_web_sm')
#LABEL = data.LabelField(dtype = torch.float)
df = | pd.DataFrame.from_csv("hindi_hatespeech.tsv", sep="\t") | pandas.DataFrame.from_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 17:57:02 2020
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn import neighbors
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
import pickle
weather_df=pd.read_csv("C:\\Users\\EE1303227\\Desktop\\flask app\\pavagada_nasa_dataset.csv")
weather_df.info()
weather_desc=pd.DataFrame(weather_df.describe())
weather_df['GENERATED_ENERGY'] = weather_df.apply(lambda row: row.ALLSKY_SFC_LW_DWN*1.6*15.6*0.75 , axis = 1)
weather_df.columns
df=weather_df[['PRECTOT', 'QV2M', 'RH2M', 'PS', 'TS','T2MDEW', 'T2MWET', 'T2M_MAX',
'T2M_MIN', 'T2M', 'WS10M', 'WS50M','WS10M_MAX', 'WS50M_MAX', 'WS50M_MIN',
'WS10M_MIN', 'GENERATED_ENERGY']]
df_corr=pd.DataFrame(df.corr())
X=df[['PRECTOT', 'QV2M', 'PS', 'T2M_MIN', 'T2M','WS10M_MAX']]
y=df['GENERATED_ENERGY']
X_corr=pd.DataFrame(X.corr())
Xtrain,Xtest,ytrain,ytest=train_test_split(X, y, test_size=0.3, random_state=100)
# LINEAR REGRESSION
lm=LinearRegression()
lm.fit(Xtrain,ytrain)
print(lm.intercept_)
print(lm.coef_)
X.columns
cdf=pd.DataFrame(lm.coef_,Xtrain.columns,columns=['coeff'])
predictions = lm.predict(Xtest)
plt.scatter(ytest,predictions)
sns.distplot((ytest-predictions)) # if normally distributed then the model is correct choice
metrics.mean_absolute_error(ytest,predictions)
metrics.mean_squared_error(ytest,predictions)
np.sqrt(metrics.mean_squared_error(ytest,predictions))
# KNN
scaler=StandardScaler()
scaler.fit(X)
scaled_features=scaler.transform(X)
X_feat=pd.DataFrame(scaled_features,columns=X.columns)
Xtrain,Xtest,ytrain,ytest=train_test_split(X_feat, y, test_size=0.3, random_state=0)
rmse_val = [] #to store rmse values for different k
for K in range(40):
K = K+1
model = neighbors.KNeighborsRegressor(n_neighbors = K)
model.fit(Xtrain, ytrain) #fit the model
pred=model.predict(Xtest) #make prediction on test set
error = np.sqrt(metrics.mean_squared_error(ytest,pred)) #calculate rmse
rmse_val.append(error) #store rmse values
print('RMSE value for k= ' , K , 'is:', error)
#plotting the rmse values against k values
curve = | pd.DataFrame(rmse_val) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import pydot
from sklearn import preprocessing, model_selection
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
from treeinterpreter import treeinterpreter as ti
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Preparation of initial dataset
df_name1 = 'DatasetRF'
path = 'your path to dataset'
df_name2 = '.csv'
df_name3 = path + df_name1 + df_name2
datas = pd.read_csv(df_name3)
datas['date'] = pd.to_datetime(datas['time'], unit='s')
datas.drop(datas.tail(1).index,inplace=True)
datas['date'] = pd.to_datetime(datas['time'], unit='s')
list2 = datas.drop('time', axis=1)
date = list2['date']
list2.index = pd.MultiIndex.from_product([date])
list3 = list2.drop('date', axis=1)
forecast_col = 'Price_BTC'
price2 = list3
price2['label'] = list3[forecast_col]
df = price2
df.fillna(-99999, inplace=True)
# Technical indicators calculation on the dataset
close = df['Price_BTC']
open1 = df['Open_BTC']
low = df['Low_BTC']
high = df['High_BTC']
volume = df['Volume_BTC']
n = 7
n_slow = 14
mfm = ((close - low) - (high - close) / (high - low))
mfv = mfm * volume
adl = mfm.cumsum(axis=0)
df['ADL'] = adl
df['MA'] = pd.Series(close.rolling(n, min_periods=n).mean(), name='MA_' + str(12))
df['EMA'] = pd.Series(close.ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
df['Average'] = (high+low)/2
df['Momentum'] = pd.Series(close.diff(n), name='Momentum_' + str(n))
M = close.diff(n - 1)
N = close.shift(n - 1)
df['ROC'] = pd.Series(M / N, name='ROC_' + str(n))
df['MSD'] = pd.Series(close.rolling(n, min_periods=n).std())
b1 = 4 * df['MSD'] / df['MA']
B1 = | pd.Series(b1, name='Bollinger') | pandas.Series |
import pandas as pd
import re
import Methods as m
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from spellchecker import SpellChecker
from nltk.tokenize.treebank import TreebankWordDetokenizer
from sklearn.feature_extraction.text import CountVectorizer
spell = SpellChecker(distance = 1)
text_set = []
corpus = []# Final corpus
#----collect dataSet----
print("reading dataset 1")
dataSet1 = pd.read_csv('venv/Data/newUpdate.csv', names=['id', 'text'], header=1)
for text in dataSet1["text"]:
text_set.append(text)
print("size of data" , len(text_set))
print("reading dataset 2")
dataSet2 = pd.read_csv('venv/Data/protest.csv', names=['id', 'text'], header=1)
for text in dataSet2["text"]:
text_set.append(text)
print("size of data" , len(text_set))
print("reading dataset 3")
dataSet3 = | pd.read_csv('venv/Data/corona.csv', names=['id', 'text'], header=1) | pandas.read_csv |
import pytest
import numpy as np
import pandas as pd
import geopandas
from shapely import geometry
from disarm_gears.frames import PointPattern
# Inputs
b_points_1 = np.random.uniform(0, 1, 10)
b_points_2 = np.random.uniform(0, 1, 30).reshape(10, 3)
g_points = np.random.uniform(0, 1, 20).reshape(10, -1)
b_attrib = np.random.random(25)
g_attrib_1 = np.random.random(10)
g_attrib_2 = np.random.random(40).reshape(10, -1)
g_attrib_3 = pd.DataFrame({li: ci for li,ci in zip(['a', 'b', 'c', 'd'], g_attrib_2.T)})
n_points = g_points.shape[0]
X = np.vstack([g_points.copy()[5:], np.array([10, 10])])
B = geopandas.GeoDataFrame({'id': [0], 'geometry': [geometry.Polygon(((0.2, 0.3), (0.2, 0.8),
(0.7, 0.8), (0.2, 0.3)))]})
B2 = geopandas.GeoDataFrame({'id': [0, 1], 'geometry': [geometry.Polygon(((0.2, 0.3), (0.2, 0.8),
(0.7, 0.8), (0.2, 0.3))),
geometry.Polygon(((0.2, 0.3), (0.7, 0.3),
(0.7, 0.8), (0.2, 0.3)))]})
def test_inputs():
# Check bad inputs
with pytest.raises(AssertionError):
PointPattern(points=0)
with pytest.raises(AssertionError):
PointPattern(points=b_points_1)
with pytest.raises(AssertionError):
PointPattern(points=b_points_2)
with pytest.raises(AssertionError):
PointPattern(points=g_points, attributes=b_attrib)
with pytest.raises(NotImplementedError):
PointPattern(points=g_points, attributes=None, crs=0)
with pytest.raises(ValueError):
PointPattern(points=g_points, attributes=list())
with pytest.raises(ValueError):
PointPattern(points=g_points, attributes=b_attrib.reshape(1, 1, -1))
def test_outputs():
# Check output types
sf_0 = PointPattern(points=pd.DataFrame(g_points), attributes=None, crs=None)
sf_1 = PointPattern(points=g_points, attributes=None, crs=None)
sf_2 = PointPattern(points=g_points, attributes=g_attrib_1, crs=None)
sf_3 = PointPattern(points=g_points, attributes=g_attrib_2, crs=None)
sf_4 = PointPattern(points=g_points, attributes=g_attrib_3, crs=None)
# Check sf.region is geopandas.GeoDataFrame
isinstance(sf_2.centroids, np.ndarray)
isinstance(sf_2.centroids, np.ndarray)
sf_4.centroids.shape[0] == n_points
isinstance(sf_1.region, geopandas.GeoDataFrame)
isinstance(sf_0.region, geopandas.GeoDataFrame)
isinstance(sf_3.region, geopandas.GeoDataFrame)
# Check sf.region shape
sf_1.region.ndim == 2
sf_1.region.shape[0] == n_points
sf_3.region.shape[0] == n_points
sf_1.region.shape[1] == 1
sf_2.region.shape[1] == 2
sf_4.region.shape[1] == 5
# Check sf.region.columns
'geometry' in sf_3.region.columns
'geometry' in sf_4.region.columns
# Check attribute names
np.array('var_%s' %i in sf_3.region.columns for i in range(4)).all()
np.array(v in sf_3.region.columns for v in ['a', 'b', 'c', 'd']).all()
# Check box type
isinstance(sf_2.box, pd.DataFrame)
sf_3.box.ndim == 2
sf_1.box.shape[0] == 2
sf_4.box.shape[1] == 2
def test_attributes_array():
sf_1 = PointPattern(points=g_points, attributes=None, crs=None)
sf_1.attributes_array() is None
_attr = np.random.uniform(0, 100, g_points.size).reshape(-1, 2)
sf_1 = PointPattern(points=g_points, attributes=_attr, crs=None)
sf_attr = sf_1.attributes_array()
isinstance(sf_attr, np.ndarray)
sf_attr.shape[0] == _attr.shape[0]
sf_attr.shape[1] == _attr.shape[1]
def test_set_boundary():
new_points = np.array([[.22, .68, .68, .22], [.32, .32, .78, .78]]).T
sf_1 = PointPattern(points=new_points, attributes=None, crs=None)
with pytest.raises(AssertionError):
sf_1.set_boundary(B=B.geometry[0])
sf_1.set_boundary(B=B)
assert isinstance(sf_1.boundary, geopandas.GeoDataFrame)
assert sf_1.box.loc[0, 'x'] == 0.2
assert sf_1.box.loc[1, 'x'] == 0.7
assert sf_1.box.loc[0, 'y'] == 0.3
assert sf_1.box.loc[1, 'y'] == 0.8
sf_2 = PointPattern(points=new_points, attributes=None, crs=None)
sf_2.set_boundary(B2)
assert isinstance(sf_2.boundary, geopandas.GeoDataFrame)
assert sf_1.region.shape[0] < sf_2.region.shape[0]
def test_make_grid():
sf_0 = PointPattern(points= | pd.DataFrame(g_points) | pandas.DataFrame |
from unittest import TestCase
import pandas as pd
from moonstone.normalization.counts.random_selection import (
RandomSelection, TaxonomyRandomSelection
)
class TestRandomSelection(TestCase):
def setUp(self):
self.raw_data = [
[199, 1, 48, 75],
[0, 24, 1, 0],
[1, 25, 1, 25],
]
self.column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
self.index = ['Gen_1', 'Gen_2', 'Gen_3']
self.raw_df = pd.DataFrame(self.raw_data, columns=self.column_names, index=self.index)
def test_normalize_default_threshold(self):
expected_data = [
[50, 1, 48, 40],
[0, 24, 1, 0],
[0, 25, 1, 10],
]
expected_df = pd.DataFrame(expected_data, columns=self.column_names, index=self.index).astype(float)
tested_normalization = RandomSelection(self.raw_df, random_seed=2935)
pd.testing.assert_frame_equal(tested_normalization.normalized_df, expected_df)
def test_normalize_threshold_20(self):
expected_data = [
[20, 0, 20, 16],
[0, 9, 0, 0],
[0, 11, 0, 4],
]
expected_df = pd.DataFrame(expected_data, columns=self.column_names, index=self.index).astype(float)
tested_normalization = RandomSelection(self.raw_df, threshold=20, random_seed=2935)
| pd.testing.assert_frame_equal(tested_normalization.normalized_df, expected_df) | pandas.testing.assert_frame_equal |
import os
import sys
import time
import datetime
import argparse
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
##custom modules required
sys.path.append('../pytorch_model_helpers')
import pytorch_utils
import pytorch_helpers
from pytorch_utils import initialize_weights,SmoothBCEwLogits,TrainDataset,TestDataset
from pytorch_utils import train_fn,valid_fn,inference_fn,SimpleNN_Model,seed_everything
from pytorch_helpers import drug_stratification,normalize,umap_factor_features,model_eval_results
from pytorch_helpers import preprocess,split_data,check_if_shuffle_data,save_to_csv
class cp_L1000_simplenn_moa_train_prediction:
"""
This function performs Simple NN model training on the combined Cell painting & L1000 level-4 profiles
and also performs prediction on the hold-out test set. The model training includes running 5-Kfold cross
validation on the train data for the purpose of tuning the hyperparameters, and making prediction on the
entire test dataset for every fold and then averaging out the predictions to get the final test predictions.
For more info:https://github.com/guitarmind/kaggle_moa_winner_hungry_for_gold/blob/main/final\
/Best%20LB/Training/3-stagenn-train.ipynb
Args:
data_dir: directory that contains train, test and moa target labels
(with their corresponding compounds) csv files.
model_pred_dir: directory where model predictions for train & test data will be stored
shuffle: True or False argument, to check if the train data is shuffled i.e. given to the wrong
target labels OR NOT
Epochs: A number that defines the number of times the Model will be trained on the entire training
dataset.
Batch_size: A number that defines number of samples to work through before updating the
internal model parameters. The number of training examples in one forward & backward pass.
learning_rate: A number that controls how much we are adjusting the weights of our Simple-NN network
with respect the loss gradient after every pass/iteration.
Output:
dataframes: train and hold-out test predictions are read in as csv files to the model_pred_dir
saved simple nn model: the Simple-NN model for every train fold and random seed is saved in a model directory
in the data_dir
"""
def __init__(self, data_dir=None, model_pred_dir=None, shuffle=None, Epochs=None, Batch_size=None, learning_rate=None):
self.data_dir = data_dir
self.model_pred_dir = model_pred_dir
self.shuffle = shuffle
self.EPOCHS = Epochs
self.BATCH_SIZE = Batch_size
self.LEARNING_RATE = learning_rate
def cp_L1000_nn_moa_train_prediction(self):
print("Is GPU Available?")
if torch.cuda.is_available():
print("Yes, GPU is Available!!")
else:
print("No, GPU is NOT Available!!", "\n")
DEVICE = ('cuda' if torch.cuda.is_available() else 'cpu')
no_of_compts = 25
no_of_dims = 25
IS_TRAIN = True
NSEEDS = 5
SEED = range(NSEEDS)
NFOLDS = 5
WEIGHT_DECAY = 1e-5
EARLY_STOPPING_STEPS = 10
EARLY_STOP = False
hidden_size=2048
##dir names
model_file_name = "cp_L1000_simplenn"
model_dir_name = "cp_L1000_simplenn"
trn_pred_name = 'cp_L1000_train_preds_simplenn'
tst_pred_name = 'cp_L1000_test_preds_simplenn'
model_file_name,model_dir_name,trn_pred_name,tst_pred_name = \
check_if_shuffle_data(self.shuffle, model_file_name, model_dir_name, trn_pred_name, tst_pred_name)
model_dir = os.path.join(self.data_dir, model_dir_name)
os.makedirs(model_dir, exist_ok=True)
if self.shuffle:
df_train = pd.read_csv(os.path.join(self.data_dir, 'train_shuffle_lvl4_data.csv.gz'),
compression='gzip',low_memory = False)
else:
df_train = pd.read_csv(os.path.join(self.data_dir, 'train_lvl4_data.csv.gz'),
compression='gzip',low_memory = False)
df_test = pd.read_csv(os.path.join(self.data_dir, 'test_lvl4_data.csv.gz'),
compression='gzip',low_memory = False)
df_targets = pd.read_csv(os.path.join(self.data_dir, 'target_labels.csv'))
metadata_cols = ['replicate_name', 'replicate_id', 'Metadata_broad_sample', 'Metadata_pert_id',
'Metadata_Plate', 'Metadata_Well', 'Metadata_broad_id', 'Metadata_moa', 'sig_id',
'pert_id', 'pert_idose', 'det_plate', 'det_well', 'pert_iname','moa', 'dose']
target_cols = df_targets.columns[1:]
df_train_x, df_train_y, df_test_x, df_test_y = split_data(df_train, df_test, metadata_cols, target_cols)
df_train_x, df_test_x = umap_factor_features(df_train_x, df_test_x, no_of_compts, no_of_dims)
features = df_train_x.columns.tolist()
num_features=len(features)
num_targets=len(target_cols)
df_train = drug_stratification(df_train,NFOLDS,target_cols,col_name='replicate_id',cpd_freq_num=24)
pos_weight = initialize_weights(df_train, target_cols, DEVICE)
def model_train_pred(fold, seed):
seed_everything(seed)
model_path = os.path.join(model_dir, model_file_name + f"_SEED{seed}_FOLD{fold}.pth")
trn_idx = df_train[df_train['fold'] != fold].index
val_idx = df_train[df_train['fold'] == fold].index
x_fold_train = df_train_x.loc[trn_idx].reset_index(drop=True).copy()
y_fold_train = df_train_y.loc[trn_idx].reset_index(drop=True).copy()
x_fold_val = df_train_x.loc[val_idx].reset_index(drop=True).copy()
y_fold_val = df_train_y.loc[val_idx].reset_index(drop=True).copy()
df_test_x_copy = df_test_x.copy()
x_fold_train, x_fold_val, df_test_x_copy = normalize(x_fold_train, x_fold_val, df_test_x_copy)
train_dataset = TrainDataset(x_fold_train.values, y_fold_train.values)
valid_dataset = TrainDataset(x_fold_val.values, y_fold_val.values)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=self.BATCH_SIZE, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=self.BATCH_SIZE, shuffle=False)
model = SimpleNN_Model(num_features=num_features, num_targets=num_targets, hidden_size=hidden_size)
model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), weight_decay=WEIGHT_DECAY, lr=self.LEARNING_RATE, eps=1e-9)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e3,
max_lr=1e-2, epochs=self.EPOCHS, steps_per_epoch=len(trainloader))
loss_train = SmoothBCEwLogits(smoothing = 0.001, pos_weight=pos_weight)
loss_val = nn.BCEWithLogitsLoss()
early_stopping_steps = EARLY_STOPPING_STEPS
early_step = 0
oof = np.zeros(df_train_y.shape)
best_loss = np.inf
best_loss_epoch = -1
if IS_TRAIN:
for epoch in range(self.EPOCHS):
train_loss = train_fn(model, optimizer, scheduler, loss_train, trainloader, DEVICE)
valid_loss, valid_preds = valid_fn(model, loss_val, validloader, DEVICE)
if valid_loss < best_loss:
best_loss = valid_loss
best_loss_epoch = epoch
oof[val_idx] = valid_preds
torch.save(model.state_dict(), model_path)
elif (EARLY_STOP == True):
early_step += 1
if (early_step >= early_stopping_steps):
break
if epoch % 10 == 0 or epoch == self.EPOCHS-1:
print(f"seed: {seed}, FOLD: {fold}, EPOCH: {epoch},\
train_loss: {train_loss:.6f}, valid_loss: {valid_loss:.6f}, best_loss: {best_loss:.6f},\
best_loss_epoch: {best_loss_epoch}")
#--------------------- PREDICTION---------------------
testdataset = TestDataset(df_test_x_copy.values)
testloader = torch.utils.data.DataLoader(testdataset, batch_size=self.BATCH_SIZE, shuffle=False)
model = SimpleNN_Model(num_features=num_features, num_targets=num_targets, hidden_size=hidden_size)
model.load_state_dict(torch.load(model_path))
model.to(DEVICE)
if not IS_TRAIN:
valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE)
oof[val_idx] = valid_preds
predictions = np.zeros(df_test_y.shape)
predictions = inference_fn(model, testloader, DEVICE)
return oof, predictions
def run_k_fold(folds, seed):
oof = np.zeros(df_train_y.shape)
predictions = np.zeros(df_test_y.shape)
for fold in range(folds):
oof_, pred_ = model_train_pred(fold, seed)
predictions += pred_ / folds
oof += oof_
return oof, predictions
oofs = np.zeros(df_train_y.shape)
predictions = np.zeros(df_test_y.shape)
time_start = time.time()
for seed in SEED:
oofs_, predictions_ = run_k_fold(NFOLDS, seed)
oofs += oofs_ / len(SEED)
predictions += predictions_ / len(SEED)
print(f"elapsed time: {time.time() - time_start}")
df_oofs = pd.DataFrame(oofs, columns=df_train_y.columns)
df_preds = | pd.DataFrame(predictions, columns=df_test_y.columns) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
| tm.assert_equal(result, expected) | pandas._testing.assert_equal |
# pylint: disable=E1101
from datetime import datetime
import os
import warnings
import nose
import numpy as np
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import read_stata, StataReader
import pandas.util.testing as tm
from pandas.util.misc import is_little_endian
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
# Unit test datasets for dta7 - dta9 (old stata formats 104, 105 and 107) can be downloaded from:
# http://stata-press.com/data/glmext.html
self.dirpath = tm.get_data_path()
self.dta1 = os.path.join(self.dirpath, 'stata1.dta')
self.dta2 = os.path.join(self.dirpath, 'stata2.dta')
self.dta3 = os.path.join(self.dirpath, 'stata3.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4 = os.path.join(self.dirpath, 'stata4.dta')
self.dta7 = os.path.join(self.dirpath, 'cancer.dta')
self.csv7 = os.path.join(self.dirpath, 'cancer.csv')
self.dta8 = os.path.join(self.dirpath, 'tbl19-3.dta')
self.csv8 = os.path.join(self.dirpath, 'tbl19-3.csv')
self.dta9 = os.path.join(self.dirpath, 'lbw.dta')
self.csv9 = os.path.join(self.dirpath, 'lbw.csv')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.dta1_13 = os.path.join(self.dirpath, 'stata1_v13.dta')
self.dta2_13 = os.path.join(self.dirpath, 'stata2_v13.dta')
self.dta3_13 = os.path.join(self.dirpath, 'stata3_v13.dta')
self.dta4_13 = os.path.join(self.dirpath, 'stata4_v13.dta')
def read_dta(self, file):
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_dta1(self):
reader = StataReader(self.dta1)
parsed = reader.data()
reader_13 = StataReader(self.dta1_13)
parsed_13 = reader_13.data()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed, expected)
tm.assert_frame_equal(parsed_13, expected)
def test_read_dta2(self):
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT')
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
with warnings.catch_warnings(record=True) as w:
parsed = self.read_dta(self.dta2)
parsed_13 = self.read_dta(self.dta2_13)
np.testing.assert_equal(
len(w), 1) # should get a warning for that format.
tm.assert_frame_equal(parsed, expected)
tm.assert_frame_equal(parsed_13, expected)
def test_read_dta3(self):
parsed = self.read_dta(self.dta3)
parsed_13 = self.read_dta(self.dta3_13)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int32)
expected['quarter'] = expected['quarter'].astype(np.int16)
tm.assert_frame_equal(parsed, expected)
tm.assert_frame_equal(parsed_13, expected)
def test_read_dta4(self):
parsed = self.read_dta(self.dta4)
parsed_13 = self.read_dta(self.dta4_13)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
tm.assert_frame_equal(parsed, expected)
tm.assert_frame_equal(parsed_13, expected)
def test_read_write_dta5(self):
if not is_little_endian():
raise nose.SkipTest("known failure of test_write_dta5 on "
"non-little endian")
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
if not is_little_endian():
raise nose.SkipTest("known failure of test_write_dta6 on "
"non-little endian")
original = self.read_csv(self.csv3)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
@nose.tools.nottest
def test_read_dta7(self):
expected = read_csv(self.csv7, parse_dates=True, sep='\t')
parsed = self.read_dta(self.dta7)
tm.assert_frame_equal(parsed, expected)
@nose.tools.nottest
def test_read_dta8(self):
expected = | read_csv(self.csv8, parse_dates=True, sep='\t') | pandas.io.parsers.read_csv |
# This script performs the statistical analysis for the pollution growth paper
# Importing required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data_kp.csv')
# Prepping data for pollution regression
# Data sets for ndividual pollutants
co2_data = data[['ln_co2', 'ln_co2_lag', 'ln_sk', 'ln_n5', 'ln_co2_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_co2_intensity_lag']].dropna()
ch4_data = data[['ln_ch4', 'ln_ch4_lag3', 'ln_sk', 'ln_n5', 'ln_ch4_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_ch4_intensity_lag3']].dropna()
nox_data = data[['ln_nox', 'ln_nox_lag', 'ln_sk', 'ln_n5', 'ln_nox_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_nox_intensity_lag']].dropna()
ghg_data = data[['ln_ghg', 'ln_ghg_lag', 'ln_sk', 'ln_n5', 'ln_ghg_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_ghg_intensity_lag']].dropna()
# Creating dummy variables for each pollutant
co2_national_dummies = pd.get_dummies(co2_data['Country'])
co2_year_dummies = pd.get_dummies(co2_data['Year'])
ch4_national_dummies = pd.get_dummies(ch4_data['Country'])
ch4_year_dummies = pd.get_dummies(ch4_data['Year'])
nox_national_dummies = pd.get_dummies(nox_data['Country'])
nox_year_dummies = | pd.get_dummies(nox_data['Year']) | pandas.get_dummies |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assertRaisesRegexp(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
self.assertTrue(np.all(np.isnan(result)))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
class TestTypeInference(tm.TestCase):
_multiprocess_can_split_ = True
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
self.assertEqual(result, 'integer')
result = lib.infer_dtype([])
self.assertEqual(result, 'empty')
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'datetime64')
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'date')
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
class TestConvert(tm.TestCase):
def test_convert_objects(self):
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
self.assertTrue(result.dtype == np.object_)
def test_convert_objects_ints(self):
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype_str))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.integer))
def test_convert_objects_complex_number(self):
for dtype in np.sctypes['complex']:
arr = np.array(list(1j * np.arange(20, dtype=dtype)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.complexfloating))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1, )))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
self.assertFalse(lib.isscalar(zerodim))
self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
self.assertFalse(lib.isscalar(np.array([])))
self.assertFalse(lib.isscalar(np.array([[]])))
self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
def test_lisscalar_pandas_containers(self):
self.assertFalse(lib.isscalar(pd.Series()))
self.assertFalse(lib.isscalar(pd.Series([1])))
self.assertFalse(lib.isscalar(pd.DataFrame()))
self.assertFalse(lib.isscalar(pd.DataFrame([[1]])))
self.assertFalse(lib.isscalar(pd.Panel()))
self.assertFalse(lib.isscalar(pd.Panel([[[1]]])))
self.assertFalse(lib.isscalar(pd.Index([])))
self.assertFalse(lib.isscalar(pd.Index([1])))
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
self.assert_numpy_array_equal(result, expected)
self.assert_numpy_array_equal(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), | long(3) | pandas.compat.long |
# coding: utf-8
# # VISIONS'18: Tucker trawl 2018-07-21
# Cruise number: RR1812 (R/V <NAME>)
#
# This notebook shows an estimation of where the time-depth trajectory of the Tucker trawl tow on 2018-07-21 was with respect to the animals in the water column (observed through ADCP).
# ## Loading ADCP raw beam data
# First let's load in some libraries we will need to read and plot the data.
# In[1]:
import os, re, glob
import numpy as np
import matplotlib.pyplot as plt
import datetime
import arlpy # ARL underwater acoustics toolbox
from mpl_toolkits.axes_grid1 import make_axes_locatable
# sys.path.append('/Users/wujung/adcpcode/programs')
from pycurrents.adcp.rdiraw import Multiread
import adcp_func
# Find out what are the available ADCP raw files.
# In[2]:
# Set up paths and params
pname_150 = '/Volumes/current_cruise/adcp/RR1812/raw/os150/'
fname_150 = glob.glob(pname_150+'rr2018_202*.raw')
fname_150.sort() # sort filename
fname_150
# It's a bit of a guess work to figure out which files contain the section during the net tow.
#
# We know the last number string in the filename are the number of seconds since 00:00 of the day. The net tow was in water around 03:26 UTC time = 12360 secs. This means files `rr2018_202_07200.raw` and `rr2018_202_14400.raw` should cover the section of the net tow.
#
# Let's give it a try!
# In[3]:
m_150,data_150,param_150 = adcp_func.load_raw_files([pname_150+'rr2018_202_07200.raw',pname_150+'rr2018_202_14400.raw'])
# Next we grab the time stamp from the ADCP raw data stream.
# In[216]:
# set up x-axis (time stamp) for ADCP data
ping_jump_150 = int(np.floor(data_150.dday.shape[0]/8))
ping_num_150 = np.arange(0,data_150.amp1.shape[0],ping_jump_150)
time_str_150 = [str('%02d'%data_150.rVL['Hour'][x])+':'+str('%02d'%data_150.rVL['Minute'][x]) for x in ping_num_150]
# Let's plot and check if the data make sense.
# In[217]:
val_mtx = data_150.amp1-param_150['absorption']-2*param_150['spreading_loss']
actual_depth_bin = np.round(param_150['range'],2)
fig = plt.figure(figsize=(15,4))
ax = fig.add_subplot(1,1,1)
im = ax.imshow(val_mtx.T,aspect='auto',interpolation='none', extent=[0,val_mtx.shape[0],actual_depth_bin[-1],actual_depth_bin[0]], vmin=160, vmax=260)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="1%", pad=0.05)
cbar = plt.colorbar(im,cax=cax)
cbar.ax.tick_params(labelsize=12)
ax.set_xticks(ping_num_150)
ax.set_xticklabels(time_str_150,fontsize=12)
ax.set_xlabel('UTC Time (hr:min)',fontsize=14)
ax.set_yticklabels(np.arange(0,400,50),fontsize=12)
ax.set_ylabel('Depth (m)',fontsize=14)
ax.set_ylim([350,0])
ax.set_title('ADCP 150 kHz "echogram"',fontsize=14)
plt.show()
# We can see a strong diel vertical migration (DVM) signal starting around 04:00 UTC time, which is about 19:00 local time, so the ADCP echogram makes sense. The Tucker trawl was in water during 03:26-04:13 UTC time, right around when the DVM happened.
# ## Loading net time-depth trajectory
# Let's now try putting the net tow time-depth trajectory onto the echogram to see which were the layers we actually sampled.
# In[218]:
import pandas as pd
from pytz import common_timezones
# In[219]:
csv_pname = '/Volumes/Transcend/Dropbox/Z_wjlee/20180719_ooi_cruise/net_tow/'
csv_fname = '20180721_EAO600m_tow.csv'
# In[220]:
net = pd.read_csv(csv_pname+csv_fname, names=['Index','Device_ID','File_ID', 'year','month','day','hour','minute','second', 'Offset','Pressure','Temperature'])
# In[221]:
net['second'] = net['Offset']
# ## Plotting net time-depth trajectory on ADCP echogram
# Now we mess around with the timestamps from the ADCP and the time-depth sensor on the net. The goal is to plot the time-depth trajectory directly on the ADCP echogram.
# First we create a `datetime` string for the time-depth sensor on the net.
# In[222]:
net_timestamp = pd.to_datetime(net.loc[:, 'year':'second'])
net_timestamp = net_timestamp.dt.tz_localize('US/Pacific').dt.tz_convert('UTC') # convert from Pacific to UTC
# In[223]:
net_depth = pd.Series((net['Pressure']-1013.25)*0.010197442889221,name='depth')
# In[224]:
net = pd.Series(net_depth.values,index=net_timestamp.values)
# And then we create a `datetime` string for the ADCP data.
# In[225]:
adcp_timestack = np.vstack((data_150.rVL['Year']+2000,data_150.rVL['Month'],data_150.rVL['Day'], data_150.rVL['Hour'],data_150.rVL['Minute'],data_150.rVL['Second'])).T
# In[226]:
adcp_timestamp = pd.to_datetime(pd.DataFrame(adcp_timestack,columns=['year','month','day','hour','minute','second']))
adcp_timestamp = adcp_timestamp.dt.tz_localize('UTC')
# Now we want to interpolate the net time-depth trajectory onto the same time indices as the ADCP data.
# In[227]:
x = pd.concat([net, pd.Series(index=adcp_timestamp)])
net_depth_on_adcp_timestamp = x.groupby(x.index).first().sort_index().interpolate(method='nearest')[adcp_timestamp]
# And then we are ready to plot them together!
# In[228]:
val_mtx = data_150.amp1-param_150['absorption']-2*param_150['spreading_loss']
actual_depth_bin = np.round(param_150['range'],2)
val_mtx.shape
# In[233]:
# Plotting
# ADCP echogram
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(1,1,1)
im = ax.imshow(val_mtx.T,aspect='auto',interpolation='none', extent=[500,5000,actual_depth_bin[-1],actual_depth_bin[0]], vmin=160, vmax=260)
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="1%", pad=0.05)
# cbar = plt.colorbar(im,cax=cax)
# cbar.ax.tick_params(labelsize=14)
ax.set_xticks(ping_num_150)
ax.set_xticklabels(time_str_150,fontsize=16)
ax.set_xlabel('UTC Time (hr:min)',fontsize=18)
ax.set_yticklabels(np.arange(0,400,50),fontsize=16)
ax.set_ylabel('Depth (m)',fontsize=18)
ax.set_ylim([350,0])
ax.set_title('ADCP 150 kHz "echogram"',fontsize=18)
# Net tow trajectory
ax.plot(net_depth_on_adcp_timestamp.values,color='w',linewidth=3)
# Annotation
ax.text(x=2700,y=220,s='Net trajectory',color='w',fontsize=22)
# ax.annotate('Net trajectory', xy=(3000, 200), xytext=(3000, 230),
# arrowprops=dict(facecolor='w', edgecolor='w', shrink=0.05))
plt.savefig('/Volumes/Transcend/Dropbox/Z_wjlee/20180719_ooi_cruise/net_tow/2018-07-21-adcp-tow.png',dpi=150)
plt.show()
# ## Messing around with seaborn but it didn't quite work...
# In[26]:
adcp_depth = pd.Series(actual_depth_bin,name='depth')
# In[27]:
# Convert ADCP echogram to DataFrame
adcp_echogram = pd.DataFrame(val_mtx)
# In[28]:
adcp_echogram.shape
# In[29]:
adcp_echogram.columns = adcp_depth
adcp_echogram.index = adcp_timestamp.dt.strftime('%H:%M')
# In[30]:
adcp_echogram
# In[31]:
idx_jump = int(np.floor(adcp_echogram.shape[0]/8))
idx_cnt = np.arange(0,adcp_echogram.shape[0],idx_jump)
adcp_echogram.shape
# In[32]:
import seaborn as sns
sns.set()
# In[ ]:
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(1,1,1)
g = sns.heatmap(adcp_echogram.T,ax=ax,cmap='viridis',vmax=260,vmin=160,xticklabels=1000,yticklabels=10)
g.set_xlabel('UTC Time (hr:min)',fontsize=16,fontweight='bold')
g.set_ylabel('Depth (m)',fontsize=16,fontweight='bold')
sns.set_style("ticks")
g.tick_params(labelsize=14)
sns.lineplot(data=net_td)
# net_td.plot(ax=ax)
# ax.plot(net_td.index,net_td.depth,color='w',linewidth=10)
# sns.lineplot(data=net_td,color='w')
# sns.lineplot(data=net_td,color='w',alpha=0.7)
plt.show()
# In[41]:
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(1,1,1)
ax.plot(net_timestamp,net_td.depth,color='g')
# In[44]:
sns.tsplot(data=net_td.depth,time=net_td.index)
# In[45]:
net_td.plot()
# In[139]:
type(net_depth_on_adcp_timestamp)
# In[26]:
| pd.concat([data, ts]) | pandas.concat |
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
import datetime
import logging
import os
import shutil
from datetime import timedelta
from functools import partial
from pathlib import Path
from unittest.mock import patch
from unittest.mock import PropertyMock
import faker
import pandas as pd
from django.test import override_settings
from api.models import Provider
from api.utils import DateHelper
from masu.config import Config
from masu.processor.aws.aws_report_parquet_processor import AWSReportParquetProcessor
from masu.processor.azure.azure_report_parquet_processor import AzureReportParquetProcessor
from masu.processor.gcp.gcp_report_parquet_processor import GCPReportParquetProcessor
from masu.processor.ocp.ocp_report_parquet_processor import OCPReportParquetProcessor
from masu.processor.parquet.parquet_report_processor import CSV_EXT
from masu.processor.parquet.parquet_report_processor import CSV_GZIP_EXT
from masu.processor.parquet.parquet_report_processor import ParquetReportProcessor
from masu.processor.parquet.parquet_report_processor import ParquetReportProcessorError
from masu.processor.report_parquet_processor_base import ReportParquetProcessorBase
from masu.test import MasuTestCase
from masu.util.aws.common import aws_generate_daily_data
from masu.util.aws.common import aws_post_processor
from masu.util.azure.common import azure_generate_daily_data
from masu.util.azure.common import azure_post_processor
from masu.util.gcp.common import gcp_post_processor
from masu.util.ocp.common import ocp_generate_daily_data
from reporting.provider.aws.models import AWSEnabledTagKeys
from reporting.provider.azure.models import AzureEnabledTagKeys
from reporting.provider.gcp.models import GCPEnabledTagKeys
from reporting.provider.ocp.models import OCPEnabledTagKeys
class TestParquetReportProcessor(MasuTestCase):
"""Test cases for Parquet Report Processor."""
@classmethod
def setUpClass(cls):
"""Set up the class."""
super().setUpClass()
cls.fake = faker.Faker()
cls.fake_uuid = "d4703b6e-cd1f-4253-bfd4-32bdeaf24f97"
cls.today = DateHelper().today
cls.yesterday = cls.today - timedelta(days=1)
def setUp(self):
"""Set up shared test variables."""
super().setUp()
self.test_assembly_id = "882083b7-ea62-4aab-aa6a-f0d08d65ee2b"
self.test_etag = "fake_etag"
self.request_id = 1
self.account_id = self.schema[4:]
self.manifest_id = 1
self.report_name = "koku-1.csv.gz"
self.report_path = f"/my/{self.test_assembly_id}/{self.report_name}"
self.report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path=self.report_path,
provider_uuid=self.aws_provider_uuid,
provider_type=Provider.PROVIDER_AWS_LOCAL,
manifest_id=self.manifest_id,
context={"request_id": self.request_id, "start_date": DateHelper().today, "create_table": True},
)
def test_resolve_enabled_tag_keys_model(self):
"""
Test that the expected enabled tag keys model is resolved from each provider type.
"""
test_matrix = (
(Provider.PROVIDER_AWS, AWSEnabledTagKeys),
(Provider.PROVIDER_AWS_LOCAL, AWSEnabledTagKeys),
(Provider.PROVIDER_AZURE, AzureEnabledTagKeys),
(Provider.PROVIDER_AZURE_LOCAL, AzureEnabledTagKeys),
(Provider.PROVIDER_GCP, GCPEnabledTagKeys),
(Provider.PROVIDER_GCP_LOCAL, GCPEnabledTagKeys),
(Provider.PROVIDER_OCP, OCPEnabledTagKeys),
(Provider.PROVIDER_IBM, None),
(Provider.PROVIDER_IBM_LOCAL, None),
)
for provider_type, expected_tag_keys_model in test_matrix:
prp = ParquetReportProcessor(
schema_name="self.schema",
report_path="self.report_path",
provider_uuid="self.aws_provider_uuid",
provider_type=provider_type,
manifest_id="self.manifest_id",
context={},
)
self.assertEqual(prp.enabled_tags_model, expected_tag_keys_model)
def test_request_id(self):
"""Test that the request_id property is handled."""
self.assertIsNotNone(self.report_processor.request_id)
# Test with missing context
with self.assertRaises(ParquetReportProcessorError):
report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path=self.report_path,
provider_uuid=self.aws_provider_uuid,
provider_type=Provider.PROVIDER_AWS_LOCAL,
manifest_id=self.manifest_id,
context={},
)
report_processor.request_id
def test_start_date(self):
"""Test that the start_date property is handled."""
self.assertIsInstance(self.report_processor.start_date, datetime.date)
report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path=self.report_path,
provider_uuid=self.aws_provider_uuid,
provider_type=Provider.PROVIDER_AWS_LOCAL,
manifest_id=self.manifest_id,
context={"start_date": "2021-04-22"},
)
self.assertIsInstance(report_processor.start_date, datetime.date)
report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path=self.report_path,
provider_uuid=self.aws_provider_uuid,
provider_type=Provider.PROVIDER_AWS_LOCAL,
manifest_id=self.manifest_id,
context={"start_date": datetime.datetime.utcnow()},
)
self.assertIsInstance(report_processor.start_date, datetime.date)
with self.assertRaises(ParquetReportProcessorError):
report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path=self.report_path,
provider_uuid=self.aws_provider_uuid,
provider_type=Provider.PROVIDER_AWS_LOCAL,
manifest_id=self.manifest_id,
context={},
)
report_processor.start_date
def test_file_extension(self):
"""Test that the file_extension property is handled."""
self.assertEqual(self.report_processor.file_extension, CSV_GZIP_EXT)
report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path="file.csv",
provider_uuid=self.aws_provider_uuid,
provider_type=Provider.PROVIDER_AWS_LOCAL,
manifest_id=self.manifest_id,
context={"request_id": self.request_id, "start_date": DateHelper().today, "create_table": True},
)
self.assertEqual(report_processor.file_extension, CSV_EXT)
with self.assertRaises(ParquetReportProcessorError):
report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path="file.xlsx",
provider_uuid=self.aws_provider_uuid,
provider_type=Provider.PROVIDER_AWS_LOCAL,
manifest_id=self.manifest_id,
context={"request_id": self.request_id, "start_date": DateHelper().today, "create_table": True},
)
report_processor.file_extension
def test_post_processor(self):
"""Test that the post_processor property is handled."""
test_matrix = [
{
"provider_uuid": str(self.aws_provider_uuid),
"provider_type": Provider.PROVIDER_AWS,
"expected": aws_post_processor,
},
{
"provider_uuid": str(self.azure_provider_uuid),
"provider_type": Provider.PROVIDER_AZURE,
"expected": azure_post_processor,
},
{
"provider_uuid": str(self.gcp_provider_uuid),
"provider_type": Provider.PROVIDER_GCP,
"expected": gcp_post_processor,
},
]
for test in test_matrix:
report_processor = ParquetReportProcessor(
schema_name=self.schema,
report_path=self.report_path,
provider_uuid=test.get("provider_uuid"),
provider_type=test.get("provider_type"),
manifest_id=self.manifest_id,
context={"request_id": self.request_id, "start_date": DateHelper().today, "create_table": True},
)
self.assertEqual(report_processor.post_processor, test.get("expected"))
@patch("masu.processor.parquet.parquet_report_processor.os.path.exists")
@patch("masu.processor.parquet.parquet_report_processor.os.remove")
def test_convert_to_parquet(self, mock_remove, mock_exists):
"""Test the convert_to_parquet task."""
logging.disable(logging.NOTSET)
expected = "Skipping convert_to_parquet. Parquet processing is disabled."
with self.assertLogs("masu.processor.parquet.parquet_report_processor", level="INFO") as logger:
self.report_processor.convert_to_parquet()
self.assertIn(expected, " ".join(logger.output))
with patch.object(ParquetReportProcessor, "csv_path_s3", new_callable=PropertyMock) as mock_csv_path:
mock_csv_path.return_value = None
file_name, data_frame = self.report_processor.convert_to_parquet()
self.assertEqual(file_name, "")
self.assertTrue(data_frame.empty)
with patch("masu.processor.parquet.parquet_report_processor.enable_trino_processing", return_value=True):
with patch("masu.processor.parquet.parquet_report_processor.get_path_prefix"):
with patch(
"masu.processor.parquet.parquet_report_processor.remove_files_not_in_set_from_s3_bucket"
) as mock_remove:
with patch(
"masu.processor.parquet.parquet_report_processor.ParquetReportProcessor."
"convert_csv_to_parquet"
) as mock_convert:
with patch(
"masu.processor.parquet.parquet_report_processor."
"ReportManifestDBAccessor.get_s3_parquet_cleared",
return_value=False,
) as mock_get_cleared:
with patch(
"masu.processor.parquet.parquet_report_processor."
"ReportManifestDBAccessor.mark_s3_parquet_cleared"
) as mock_mark_cleared:
with patch.object(ParquetReportProcessor, "create_daily_parquet"):
mock_convert.return_value = "", pd.DataFrame(), True
self.report_processor.convert_to_parquet()
mock_get_cleared.assert_called()
mock_remove.assert_called()
mock_mark_cleared.assert_called()
expected = "Failed to convert the following files to parquet"
with patch("masu.processor.parquet.parquet_report_processor.enable_trino_processing", return_value=True):
with patch("masu.processor.parquet.parquet_report_processor.get_path_prefix"):
with patch(
"masu.processor.parquet.parquet_report_processor.ParquetReportProcessor.convert_csv_to_parquet",
return_value=("", pd.DataFrame(), False),
):
with patch.object(ParquetReportProcessor, "create_daily_parquet"):
with self.assertLogs(
"masu.processor.parquet.parquet_report_processor", level="INFO"
) as logger:
self.report_processor.convert_to_parquet()
self.assertIn(expected, " ".join(logger.output))
with patch("masu.processor.parquet.parquet_report_processor.enable_trino_processing", return_value=True):
with patch("masu.processor.parquet.parquet_report_processor.get_path_prefix"):
with patch(
"masu.processor.parquet.parquet_report_processor.ParquetReportProcessor.convert_csv_to_parquet",
return_value=("", pd.DataFrame(), False),
):
with patch.object(ParquetReportProcessor, "create_daily_parquet"):
self.report_processor.convert_to_parquet()
with patch("masu.processor.parquet.parquet_report_processor.enable_trino_processing", return_value=True):
with patch("masu.processor.parquet.parquet_report_processor.get_path_prefix"):
with patch(
"masu.processor.parquet.parquet_report_processor.ParquetReportProcessor.convert_csv_to_parquet",
return_value=("", | pd.DataFrame() | pandas.DataFrame |
"""
Written by <NAME> for COMP9418 assignment 2.
"""
import copy
import re
import math
from collections import OrderedDict as odict
from itertools import product
from functools import reduce
import numpy as np
import pandas as pd
from graphviz import Digraph, Graph
from tabulate import tabulate
class GraphicalModel:
def __init__(self):
self.net = dict()
self.factors = dict()
self.outcomeSpace = dict()
self.node_value = dict()
####################################
######################################
# Representation
########################################
##########################################
def load(self, FileName):
"""
Load and initiate model from file
input:
FileName: String
"""
self.__init__()
with open(FileName, 'r') as f:
content = f.read()
node_pattern = re.compile(
r'node (.+) \n\{\n states = \( \"(.+)\" \);\n\}')
potential_pattern = re.compile(
r'potential \( (.+) \) \n\{\n data = \((.+)\)[ ]?;\n\}')
nodes_records = node_pattern.findall(content)
data_records = potential_pattern.findall(content)
for record in nodes_records:
outcome = tuple(re.split(r'\" \"', record[1]))
self.insert(record[0], outcome)
for record in data_records:
splits = record[0].split(' | ')
node = splits[0]
parents = []
if len(splits) > 1:
parents = list(reversed(splits[1].split()))
data = [float(i) for i in re.findall(
r'[0-1][.][0-9]+', record[1])]
self.factorize(node, data, parents)
def connect(self, father, child):
"""
Connect Two nodes.
Inputs:
father: String, name of the father node
child: String, name of the child node
"""
if father in self.net and child in self.net and child not in self.net[father]:
self.net[father].append(child)
def disconnect(self, father, child):
"""
Disconnect Two nodes.
Inputs:
father: String, name of the father node
child: String, name of the child node
"""
if father in self.net and child in self.net and child in self.net[father]:
self.net[father].remove(child)
def factorize(self, node, data, parents=[]):
"""
Specify probabilities for a node.
data is a 1-d array or a simple list.
Inputs:
node: Sting, the node you want to specify
data: 1-D array like, the CPT of the node
parents: list of strings, parents of the node
"""
dom = parents + [node]
dom = tuple(dom)
for parent in parents:
self.connect(parent, node)
self.factors[node] = {'dom': dom, 'table': odict()}
outcome_product = product(*[self.outcomeSpace[node] for node in dom])
assert np.prod([len(self.outcomeSpace[node])
for node in dom]) == len(data), 'CPT length illegal'
for i, combination in enumerate(outcome_product):
self.factors[node]['table'][combination] = data[i]
def insert(self, Name, Outcome):
"""
simply insert a node to the graph
Inputs:
Outcome: a 1-D array-like, outcome space of this node
Name: String, the name of the node
"""
if Name not in self.net:
self.net[Name] = []
self.outcomeSpace[Name] = Outcome
else:
print(f'Already have node {Name}')
def remove(self, node):
if node in self.net:
if node in self.factors:
for child in self.net[node]:
if node in self.factors[child]['dom']:
self.sum_out(child, node)
self.factors.pop(node)
self.net.pop(node)
self.outcomeSpace.pop(node)
for other_node in self.net:
if node in self.net[other_node]:
self.net[other_node].remove(node)
def sum_out(self, node, victim):
"""
sum out the victim in the factor of node
Inputs:
node: String, name of the node
victim: String, name of the node to be sum out
"""
assert victim in self.factors[node]['dom'], 'the node to sum out is not one of the parents'
f = self.factors[node]
new_dom = list(f['dom'])
new_dom.remove(victim)
table = list()
for entries in product(*[self.outcomeSpace[node] for node in new_dom]):
s = 0
for val in self.outcomeSpace[victim]:
entriesList = list(entries)
entriesList.insert(f['dom'].index(victim), val)
p = f['table'][tuple(entriesList)]
s = s + p
table.append((entries, s))
self.factors[node] = {'dom': tuple(new_dom), 'table': odict(table)}
def save(self, fileName):
"""
save the graph to a file
Inputs:
fileNamea: String, the path of the file you want to save to.
"""
f = open(fileName, 'w')
f.write('net\n{\n}\n')
# first node domain part
for node, values in self.outcomeSpace.items():
outcome = " ".join(
['"' + value + '"' for value in values])
text = 'node %s \n{\n states = ( %s );\n}\n' % (node, outcome)
f.write(text)
# add data
for node, factor in self.factors.items():
potential = factor['dom'][-1]
data = " ".join([str(_) for _ in factor['table'].values()])
if len(factor['dom']) > 1:
parents = list(factor['dom'][:-1])
parents.reverse()
potential += ' | ' + " ".join(parents)
text = 'potential ( %s ) \n{\n data = ( %s );\n}\n' % (
potential, data)
f.write(text)
f.close()
def printFactor(self, node):
"""
print the factor table of the node
"""
f = self.factors[node]
table = list()
for key, item in f['table'].items():
k = list(key)
k.append(item)
table.append(k)
dom = list(f['dom'])
dom.append('Pr')
print(tabulate(table, headers=dom, tablefmt='orgtbl'))
def showGraph(self):
"""
Visualize the net graph.
"""
dot = Digraph()
dot.attr(overlap="False", splines="True")
for node, children in self.net.items():
dot.node(node)
for child in children:
dot.edge(node, child)
return dot
####################################
######################################
# Pruning and pre-processing techniques for inference
########################################
##########################################
def prune(self, query, **evidences):
"""
Prune the graph based of the query vcariables and evidences
Inputs:
query: list of strings, the query variables
evidences: dictionary, key: node, value: outcome of the node
Outputs:
a new graph
"""
evi_vars = list(evidences.keys())
qe = set(query + evi_vars)
assert all([_ in self.net for _ in qe])
newG = copy.deepcopy(self)
all_deleted = 0
# prune nodes
while not all_deleted:
all_deleted = 1
W = set()
for node, children in newG.net.items():
if node not in qe and not children:
W.add(node)
all_deleted = 0
for leaf in W:
newG.remove(leaf)
# clear the child who have been deleted
for node, children in newG.net.items():
newG.net[node] = [_ for _ in children if _ not in W]
# prune edge
for node, value in evidences.items():
for child in newG.net[node]:
newG.factors[child] = self.update(
newG.factors[child], node, value, newG.outcomeSpace)
newG.net[node] = []
newG.node_value[node] = value
netcopy = copy.deepcopy(newG.net)
reachable_from_q = self.spread(self.make_undirected(netcopy), query)
nodes = list(newG.net.keys())
for node in nodes:
if node not in reachable_from_q:
newG.remove(node)
return newG
@staticmethod
def update(factor, node, value, outcomeSpace):
"""
Specify a value to a node.
Inputs:
factor: the factor of the node
node: the node to update
value: the value that will be assigned to the node
outcomeSpace: Dictionary, the outcome space of all nodes
Return:
a new factor without node
"""
assert node in factor['dom'][:-1], 'such node is not in this CPT'
assert value in outcomeSpace[node], 'no such value for this node'
new_dom = copy.copy(factor['dom'])
factor_outcomeSpace = {node: outcomeSpace[node] for node in new_dom}
factor_outcomeSpace[node] = (value,)
node_index = new_dom.index(node)
new_dom_list = list(new_dom)
new_dom_list.remove(node)
new_dom = tuple(new_dom_list)
new_table = odict()
valid_records = product(*[_ for _ in factor_outcomeSpace.values()])
for record in valid_records:
record_list = list(record)
record_list.pop(node_index)
new_record = tuple(record_list)
new_table[new_record] = factor['table'][record]
return {'dom': new_dom, 'table': new_table}
def spread(self, graph, source):
"""
find all nodes reachable from source
Inputs:
graph: Dictionary, the graph
source: list of strings, the node where we start the spread
Return:
visted: a set of strings, the nodes reachabel from source.
"""
visited = set()
for node in source:
self.spread_help(graph, node, visited)
return visited
def spread_help(self, graph, node, visited):
visited.add(node)
for child in graph[node]:
if child not in visited:
self.spread_help(graph, child, visited)
def make_undirected(self, graph):
"""
Input:
graph: a directed graph
Return:
an undirected (bidirected) graph
"""
undirectG = graph.copy()
GT = self.transposeGraph(graph)
for node in graph:
undirectG[node] += GT[node]
return undirectG
@staticmethod
def transposeGraph(G):
"""
Input:
graph: a directed graph
Return:
a transposed graph
"""
GT = dict((v, []) for v in G)
for v in G:
for w in G[v]:
if w in GT:
GT[w].append(v)
else:
GT[w] = [v]
return GT
def min_degree_order(self):
"""
get the variable elimination from the graph based on min-degree heuristic
Return:
prefix: a list of strings, list of variables in the elimination order
width: the width of the order
"""
prefix = []
moral_graph = self.moralize()
moral_graph.factors = dict()
width = 0
while len(moral_graph.net) > 0:
low = math.inf
min_degree = math.inf
for node, neighbors in moral_graph.net.items():
fill_num = moral_graph.count_fill(node)
degree = len(moral_graph.net[node])
if degree < min_degree:
min_degree_node = node
low = fill_num
min_degree = degree
width = max(width, degree)
elif degree == min_degree:
if fill_num < low:
min_degree_node = node
low = fill_num
width = max(width, degree)
moral_graph.remove(min_degree_node)
prefix.append(min_degree_node)
return prefix, width
def min_fill_order(self):
"""
get the variable elimination from the graph based on min degree heuristic
Return:
prefix: a list of strings, list of variables in the elimination order
width: the width of the order
"""
prefix = []
moral_graph = self.moralize()
moral_graph.factors = dict()
width = 0
while len(moral_graph.net) > 0:
low = math.inf
min_degree = math.inf
for node, neighbors in moral_graph.net.items():
fill_num = moral_graph.count_fill(node)
degree = len(moral_graph.net[node])
if fill_num < low:
min_fill_node = node
low = fill_num
min_degree = degree
width = max(width, degree)
elif fill_num == low:
if degree < min_degree:
min_fill_node = node
min_degree = degree
width = max(width, degree)
moral_graph.remove(min_fill_node)
prefix.append(min_fill_node)
return prefix, width
def count_fill(self, node):
"""
count the fill in edges if eliminate node
Input:
node: string, the name of the node to be eliminate
Return:
int: fill-in edge count
"""
neighbors = self.net[node]
neighbor_num = len(neighbors)
before = 0
for neighbor in neighbors:
for neighbor_s_neighbor in self.net[neighbor]:
if neighbor_s_neighbor in neighbors:
before += 1
before //= 2
after = neighbor_num*(neighbor_num-1)//2
return after - before
def moralize(self):
"""
moralize the graph
return:
a new moral graph
"""
new_graph = copy.deepcopy(self)
graphT = self.transposeGraph(new_graph.net)
new_graph.net = self.make_undirected(new_graph.net)
for parents in graphT.values():
new_graph.connect_all(parents)
return new_graph
def connect_all(self, nodes):
"""
connect every node in nodes to every other node
"""
for father in nodes:
for child in nodes:
if father != child:
self.connect(father, child)
def show_moral_graph(self):
moral_graph = self.moralize()
dot = Graph(strict="True")
for node, children in moral_graph.net.items():
dot.node(node)
for child in children:
dot.edge(node, child)
return dot
####################################
######################################
# Exact inference
########################################
##########################################
def to_jointree(self, order):
"""
self must be a moral graph
Args:
order (list): elimination order
"""
for node, neighbors in self.net.items():
for neighbor in neighbors:
assert node in self.net[neighbor], 'the graph is not moral'
moral_graph = copy.deepcopy(self)
# 1. construct clusters
clusters = []
max_cluster_size = 0
for node in order:
cluster = set([node] + moral_graph.net[node])
moral_graph.connect_all(moral_graph.net[node])
moral_graph.remove(node)
if len(cluster) > max_cluster_size:
max_cluster_size = len(cluster)
clusters.append(cluster)
# 2. maitain RIP
cluster_seq = [tuple(_) for _ in clusters]
n = len(clusters)
for cluster in reversed(clusters):
if len(cluster) < max_cluster_size:
i = cluster_seq.index(tuple(cluster))
for pre in reversed(cluster_seq[:i]):
if cluster.issubset(pre):
cluster_seq.remove(tuple(cluster))
cluster_seq.insert(i, pre)
cluster_seq.remove(pre)
break
# 3. assembly
cluster_net = dict()
cluster_net[cluster_seq[-1]] = []
n = len(cluster_seq)
for i in range(n-2, -1, -1):
cluster_net[cluster_seq[i]] = []
edge = set(cluster_seq[i+1]).union(
*[set(_) for _ in cluster_seq[i+2:]]) & set(cluster_seq[i])
for other in cluster_seq[i+1:]:
if edge.issubset(other):
cluster_net[cluster_seq[i]].append(other)
break
# assign factors to jointree
factors = dict()
for cluster in cluster_seq:
factors[cluster] = self.join(
*[self.factors[node] for node in cluster])
return JoinTree(cluster_net, factors, self.outcomeSpace)
def join(self, *factors):
common_vars = list(reduce(lambda x, y: x | y, [
set(f['dom']) for f in factors]))
table = list()
for entries in product(*[self.outcomeSpace[node] for node in common_vars]):
entryDict = dict(zip(common_vars, entries))
p = 1
for f in factors:
f_entry = tuple(entryDict[var] for var in f['dom'])
pf = f['table'][f_entry]
p *= pf
table.append((entries, p))
return {'dom': tuple(common_vars), 'table': odict(table)}
####################################
######################################
# Approximate inference
########################################
##########################################
def gibbs_sampling(self, sample_num=100, chain_num=2, q_vars='all', **q_evis):
"""
gibbs sampling the graph based on query, sample_num and chain_num specified by the user
Input:
sample_num: # of samples
chain_num: # of chains
q_vars: list of strings, the query variables, defalt is 'all', which means all variables in the graph other than query evidences
q_evis: dictionary, the query evidences
Return:
samples: a list of dictionaries, each one is a sample contains the node and its value in query
"""
if q_vars == 'all':
q_vars = [_ for _ in self.net.keys() if _ not in q_evis]
prunned_graph = self.prune(q_vars, **q_evis)
chains = prunned_graph.burn_in(chain_num, **q_evis)
samples = []
# fisrt sample
sample = dict()
for var in q_vars:
sample[var] = chains[0].node_value[var]
samples.append(sample)
curr = 1
while curr < sample_num:
sample = dict()
for var in q_vars:
chain = chains[np.random.choice(chain_num)]
pre_value = samples[curr - 1][var]
value = chain.sample_once(var)
# A = chain.get_acceptance(var, pre_value, value)
# sample[var] = np.random.choice(
# [value, pre_value], 1, p=[A, 1-A])[0]
sample[var] = value
samples.append(sample)
curr += 1
return samples
def get_acceptance(self, node, pre, curr):
"""
compute the acceptande probability of this sample
Inputs:
node: string, the node waiting to be asigned
pre: string, the previous value assigned to this node
curr: string, the current value waiting to be assigned to this node
Return:
accpt_prob: float, the acceptande probability
"""
dom = self.factors[node]['dom']
parents = dom[: -1]
parents_value = [self.node_value[parent] for parent in parents]
ppre = self.factors[node]['table'][tuple(parents_value + [pre])]
pcurr = self.factors[node]['table'][tuple(parents_value + [curr])]
return min(1, pcurr/ppre)
def burn_in(self, chain_num, window_size=100, **evidences):
"""
generate chains and keep sampling until mixed
Inputs:
chain_num: int, # of chains
window_size: int, # of samples used to test if chains are mixed, defalt is 100
evidences: dictionary, the evidences of the query
Return:
chains: list of GraphicalModel objects, the list of mixed chains
"""
assert chain_num > 1, 'chain num is at least 2'
chains = []
chains_non_evis = []
for seed in range(chain_num):
np.random.seed(seed)
chain = copy.deepcopy(self)
# 1. fix evidence
chain.node_value = evidences.copy()
# 2: Initialize other variables
non_evis = dict()
for node, domain in self.outcomeSpace.items():
if node not in evidences:
value = np.random.choice(domain, 1)[0]
chain.node_value[node] = value
non_evis[node] = [value]
chains.append(chain)
chains_non_evis.append(non_evis)
sample_count = 1
while True:
if sample_count >= window_size:
if self.mixed(chains_non_evis, self.outcomeSpace):
break
# clear the chains_non_evis
chains_non_evis = [{
node: []
for node in chains_non_evis[i].keys()
} for i in range(chain_num)]
sample_count = 0
# 3: Choose a variable ordering O
O = np.random.permutation(list(chains_non_evis[0].keys()))
# 4: Repeat sample non_evis in the order O
for var in O:
for i, chain in enumerate(chains):
value = chain.sample_once(var)
chain.node_value[var] = value
chains_non_evis[i][var].append(value)
sample_count += 1
return chains
def sample_once(self, node):
"""
sample once for a particular node
Input:
node: string, name of the node to sample
Return:
a string, a value from this node's outcomeSpace
"""
dom = self.factors[node]['dom']
parents = dom[: -1]
parents_value = [self.node_value[parent] for parent in parents]
combinations = [tuple(parents_value + [node_value])
for node_value in self.outcomeSpace[node]]
prob_list = np.array([self.factors[node]['table'][combination]
for combination in combinations])
prob_list /= np.sum(prob_list)
return np.random.choice(self.outcomeSpace[node], 1, p=prob_list)[0]
@staticmethod
def convert(list_of_dict, outcomeSpace):
"""
convert the outcome string value from the outcomespace into float value between 0 and 1
Input:
list_of_dic: list of dictionary, each key in the dictionary is a variable and corresponding value is the history of its sample value
outcomeSpace: dictionary, the outcome space of all nodes
Return:
list_of_dic, converted list_of_dict
"""
mapping = dict()
for node, values in outcomeSpace.items():
mapping[node] = dict()
for value in values:
mapping[node][value] = (values.index(value)+1) / len(values)
for i, record in enumerate(list_of_dict):
list_of_dict[i] = {key: [mapping[key][value]
for value in item] for key, item in record.items()}
return list_of_dict
def mixed(self, chain_vars, outcomeSpace):
"""
to judge whether chain_vars are mixed up
Inputs:
chain_vars = [
{a:[...], b:[...] ...},
{a:[...], b:[...] ...}]
the history of samples' value
outcomeSpace: dictionary, the outcome space of all nodes
Return:
bool, whether chain_vars are mixed up
"""
# covert text value into num like value
chain_vars = self.convert(chain_vars, outcomeSpace)
parameters = list(chain_vars[0].keys())
P_hat = []
df_list = [ | pd.DataFrame(var_dic) | pandas.DataFrame |
# coding: utf-8
# In[71]:
import random as random
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from math import pi
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
#데이터 생성 단계입니다. (x*sin(x), x*cos(x)) 형태의 점들을 그러줍니다.
#그 점들에 약간의 랜덤값을 더해주었습니다.
seed=np.add(3,range(20))
rand=[]
for i in range(0,20):
rand.append(random.random())
x_plus=np.add(np.multiply(seed,np.sin(np.divide(seed,10/pi))),0.2*np.multiply(seed,rand))
y_plus=np.add(np.multiply(seed,np.cos(np.divide(seed,10/pi))),0.2*np.multiply(seed,rand))
x_minus=np.add(np.multiply(seed,np.sin(np.divide(seed,-10/pi))),0.2*np.multiply(seed,rand))
y_minus=np.add(np.multiply(seed,-np.cos(np.divide(seed,10/pi))),0.2*np.multiply(seed,rand))
class1= pd.concat([pd.DataFrame(x_plus),pd.DataFrame(y_plus)],axis=1)
class2= pd.concat([pd.DataFrame(x_minus),pd.DataFrame(y_minus)],axis=1)
tutorial_sample=class1.append(class2)
#tutorial_sample로 지정된 변수가 앞으로 학습시킬 데이터 입니다.
#Y가 라벨이며 (1이 파란색, 0이 빨간색) Y1은 1, -1로 변환시킨 라벨 입니다.
Y_sample=np.append(np.array([1]*20),np.array([0]*20))
Y1_sample=np.subtract(np.multiply(2,Y_sample), 1)
W=np.array([0.025]*40)
plt.plot(x_plus,y_plus, 'bo', x_minus,y_minus,'ro')
plt.grid()
plt.show()
# In[72]:
#단순하게 어떤 값 초과이면 1, 이하이면 0을 반환하는 분류기입니다.
#Input으로는 차례대로 분류할 데이터(X), 기준이 되는 변수 (여기서는 0=X, 1=Y), 기준점, 초과/미만일때 1 여부
#이며 Output은 X를 해당 기준으로 분류한 결과입니다.
def classifier (X,variable,thresh=3,plus=1):
classify=[]
if plus==1:
for i in range(0,len(X.index)):
if X.iloc[i,variable]>thresh:
classify.append(1)
else:
classify.append(0)
return classify
else:
for i in range(0,len(X.index)):
if X.iloc[i,variable]<=thresh:
classify.append(1)
else:
classify.append(0)
return classify
print(classifier(tutorial_sample,0,0,1))
# In[73]:
#선택된 변수가 지정된 Weight 하에서 error를 최소화 하는 지점을 찾아주는 함수입니다.
#Input으로는 독립변수,종속변수,Weight,i번째 독립변수들의 i값을 사용하며
#Output은 기준점 초과를 1이라고 판단했을 때 최적의 기준점, 그 때의 에러, 기준점 이하를 1이라고 판단했을 때
#최적의 기준점, 그 때의 에러입니다.
def part(X,Y,W,variable):
thresh=-20
correction=thresh+1
error_vec=[]
for i in range(0,40):
thresh=thresh+1
estimate = classifier(X, variable,thresh=thresh,plus=1)
False_Estimate = Y != estimate
error = sum(np.multiply(W, False_Estimate))
error_vec.append(error)
return np.array([np.argmin(error_vec)+correction,np.min(error_vec),np.argmax(error_vec)+correction,np.min(np.subtract(1,error_vec))])
print(part(tutorial_sample,Y_sample,W,0))
# In[74]:
#모든 독립변수들에 대해 최적의 기준점 및 그때의 에러를 반환해 주는 함수입니다. 지금은 X,Y에 해당하는 값을 반환합니다.
#Input은 앞으로 변화가 많이 없으니 설명을 생략하겠습니다.
def selector(X,Y,W):
stack=pd.DataFrame()
for i in range(0,2):
part_result=pd.DataFrame(part(X,Y,W,i))
stack=pd.concat([stack,part_result],axis=1)
stack.columns = ['X', 'Y']
stack.index = ['thresh_초과', 'error_초과', 'thresh_이하', 'error_이하']
return stack
print(selector(tutorial_sample,Y_sample,W))
# In[75]:
#selector 함수에서 error가 최소인 기준을 토대로 X를 한번 분류한 결과를 보여줍니다.
#Output은 분류결과, 분류에 사용된 변수 (0=X, 1=Y), 기준점, 초과/이하 여부 입니다
def stumped_tree(X,Y,W):
selected=selector(X,Y,W)
error_from_variables=selected.iloc[1,:].append(selected.iloc[3,:])
if np.min(error_from_variables)==selected.iloc[1,0]:
return[classifier(X,0,selected.iloc[0,0],1),0,selected.iloc[0,0],1]
elif np.min(error_from_variables)==selected.iloc[1,1]:
return [classifier(X,1,selected.iloc[0,1],1),1,selected.iloc[0,1],1]
elif np.min(error_from_variables)==selected.iloc[3,0]:
return [classifier(X,0,selected.iloc[2,0],0),0,selected.iloc[2,0],0]
else:
return [classifier(X,1,selected.iloc[2,1],0),1,selected.iloc[2,1],0]
print(stumped_tree(tutorial_sample,Y_sample,W))
# In[76]:
#분류를 한번 진행했을 때 Adaboost에 필요한 재료들을 반환하는 함수입니다.
#Output은 차례대로 변환된 Weight, alpha, epsilon, 1단계 분류 결과 입니다.
def result(X,Y,W):
True_False=stumped_tree(X,Y,W)[0]==Y
temp_result=np.subtract(np.multiply(2,stumped_tree(X,Y,W)[0]),1)
epsilon=1-sum(np.multiply(W,True_False))
alpha=0.5*np.log((1-epsilon)/epsilon)
True_False=np.subtract(np.multiply(2,True_False),1)
numerator=np.multiply(W,np.exp(np.multiply(-alpha,True_False)))
denominator=sum(numerator)
W2=np.divide(numerator,denominator)
return [W2,alpha,epsilon,temp_result]
print(result(tutorial_sample,Y_sample,W))
# In[77]:
#위 함수들을 결합하여 최종적으로 Adaboost를 실행하는 함수입니다.
#X를 나누는 기준을 학습시켜 A를 분류해주며 thresh값은 정확도를 어느 정도 까지 분류할지를 정해주는 parameter입니다.
#조금 시간이 걸려 learning step, predicting step단계를 print하게 만들었습니다.
#Output은 최종 분류결과/단계별 분류기준점/알파/분류기준 변수/초과 이하구분 입니다.
def Adaboost(X,Y,A,thresh=0.1):
W=[1/len(X.index)]*len(X.index)
estimate=np.multiply(0,result(X,Y,W)[3])
final_error = 0.5
variable = []
critical = []
plus_minus = []
alpha = []
predict=0
count = 0
A=np.reshape(A,(-1,2))
A=pd.DataFrame(A)
while final_error > thresh:
epsilon = result(X,Y, W)[2]
count = count + 1
alpha.append(result(X,Y, W)[1])
estimate = estimate + np.multiply(result(X,Y, W)[1], result(X,Y, W)[3])
final_classify = np.sign(estimate)
Y1 = np.subtract(np.multiply(2, Y), 1)
final_error = sum(final_classify != Y1) / len(Y1)
W = result(X,Y, W)[0]
print("step {} finished with error {} in learning".format(count, final_error))
W=[1/len(X.index)]*len(X.index)
for i in range(0,count):
variable.append(stumped_tree(X,Y,W)[1])
critical.append(stumped_tree(X,Y,W)[2])
plus_minus.append(stumped_tree(X,Y,W)[3])
W = result(X,Y, W)[0]
predict=predict+np.multiply(alpha[i],np.subtract(np.multiply(2,classifier(A,variable[i],critical[i],plus_minus[i])),1))
print("step {} finished in predicting".format(i+1))
XY=[]
for i in range(0,len(variable)):
if variable[i]==0:
XY.append("X")
else:
XY.append("Y")
PM=[]
for i in range(0, len(plus_minus)):
if plus_minus[i] == 1:
PM.append("초과이면 1")
else:
PM.append("이하이면 1")
return[np.sign(predict),np.add(critical,0.5),alpha,XY,PM]
#자기 자신을 학습한 결과로 자신을 test 하는 것이기 때문에 모두 TRUE가 나와야 정상입니다
print(Adaboost(tutorial_sample,Y_sample,tutorial_sample,0.01)[0]==Y1_sample)
# In[78]:
#시각화를 위해 조금 더 간단한 예제를 만들어 보겠습니다
#X로 TRAIN한 ADABOOST CLASSIFIER가 test를 잘 분류하는지 확인해보기 위한 데이터셋 구성 단계입니다.
A=pd.DataFrame([2,6,4,7,9,4,8,3,2,6])
B=pd.DataFrame([3,4,1,1,1,8,5,3,7,2])
Y=np.array([1,1,0,0,1,0,1,1,0,0])
X= | pd.concat([A,B],axis=1) | pandas.concat |
from __future__ import print_function, division, absolute_import
import os.path
import pandas as pd
import pytest
from sqlalchemy import create_engine
from sqlalchemy.engine import reflection
from sqlalchemy import MetaData, Table, String, Column
from sqlalchemy.sql import select, not_
from framequery import util
metadata = MetaData()
pg_namespace = Table(
'pg_namespace', metadata,
Column('nspname', String())
)
@pytest.mark.parametrize('qs', ['', '?model=dask'])
def test_create_engine_connect(qs):
engine = create_engine('framequery:///' + qs)
with engine.connect():
pass
def test_add_dataframe_query():
engine = create_engine('framequery:///')
engine.executor.update(foo=pd.DataFrame({'foo': [0, 1, 2]}))
assert engine.execute('select * from foo').fetchall() == [(0,), (1,), (2,)]
def test_duplicate_names():
engine = create_engine('framequery:///')
engine.executor.update(foo=pd.DataFrame({'foo': [0, 1, 2]}))
assert sorted(engine.execute('select * from foo as a, foo as b').fetchall()) == [
(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2),
]
def test_add_dataframe_query__transaction():
engine = create_engine('framequery:///')
engine.executor.update(foo=pd.DataFrame({'foo': [0, 1, 2]}))
with engine.begin() as conn:
assert conn.execute('select * from foo').fetchall() == [(0,), (1,), (2,)]
@pytest.mark.parametrize('qs', ['', '?model=dask'])
def test_scope_files(qs):
fname = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'scope.json'))
engine = create_engine('framequery:///' + fname + qs)
assert engine.table_names() == ['foo']
with engine.begin() as conn:
actual = conn.execute('select g, sum(i) from foo group by g').fetchall()
actual = sorted(actual)
assert actual == [(0, 6), (1, 9), (2, 6)]
@pytest.mark.parametrize('qs', [
'',
pytest.mark.xfail(reason='copy to not yet supported')('?model=dask'),
])
def test_scope_load_and_save(tmpdir, qs):
source = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'test.csv'))
target = os.path.join(str(tmpdir), 'test.csv')
engine = create_engine('framequery:///' + qs)
for q in [
"COPY foo FROM '{}' WITH delimiter ';', format 'csv' ".format(source),
"CREATE TABLE bar AS select g, sum(i) from foo group by g",
"COPY bar TO '{}' WITH delimiter ';', format 'csv'".format(target),
"DROP TABLE bar",
]:
engine.execute(q)
assert engine.table_names() == ['foo']
actual = | pd.read_csv(target, sep=";") | pandas.read_csv |
from typing import List, Dict
import json
import luigi.util
import pandas as pd
from .lr2ircache_tasks import _CallLr2irCacheApi, _DownloadRankings
from .safe_output_task import _SafeOutputTaskBase
from .cleanse_bms_table import cleanse_bms_table
class MakeBmsTablesJson(_SafeOutputTaskBase):
targets = luigi.DictParameter() # type: Dict[str, str]
def requires(self):
return {table_id: _CallLr2irCacheApi("/bms_tables?url=" + url)
for table_id, url in self.targets.items()}
def save(self, output_path: str):
bms_tables = [{"id": table_id,
"url": self.targets[table_id],
**task.load()}
for table_id, task in self.requires().items()]
json.dump(bms_tables, open(output_path, "w"), indent=2, ensure_ascii=False)
class MakeCleansedBmsTableJson(_SafeOutputTaskBase):
bms_tables_original_json = luigi.Parameter() # type: str
def save(self, output_path: str):
json.dump(
list(map(cleanse_bms_table, json.load(open(self.bms_tables_original_json)))),
open(output_path, "w"), indent=2, ensure_ascii=False
)
def load(self) -> dict:
return json.load(open(self.output().path))
class MakeItemCsv(_SafeOutputTaskBase):
bmsmd5s = luigi.ListParameter() # type: List[str]
def requires(self):
return [_CallLr2irCacheApi("/items/" + bmsmd5)
for bmsmd5 in self.bmsmd5s]
def save(self, output_path: str):
(pd.DataFrame([task.load() for task in self.requires()])
[["bmsmd5", "type", "lr2_id", "title"]]
.to_csv(output_path, index=False))
@luigi.util.requires(_DownloadRankings)
class MakeRecordsCsv(_SafeOutputTaskBase):
def save(self, output_path: str):
# 生成物は比較的大きくなることが予想されるので、一度にメモリ上に展開せず逐次的に読み書きをする方針
for i, (bmsmd5, ranking) in enumerate(self.requires().rankings()):
( | pd.DataFrame(ranking) | pandas.DataFrame |
import pandas as pd
import numpy as np
import jinja2
import math
import re
class Plate_Desc:
def __init__(self, title, descrip, serialnum, date_time, datafname):
self.title = title
self.descrip = descrip
self.serialnum = serialnum
self.date_time = date_time
self.datafname = datafname
def clear(self):
self.title = ""
self.descrip = ""
self.serialnum = ""
self.date_time = ""
self.datafname = ""
return self
def new(self):
self.__init__("", "", "", "", "" )
return self
# open the file
datafname = "Z:\Shared Folders\Data_Per\Prog\Proj-HaasMeas\Large_Top_755_772.out"
datafname_short = re.sub(r'\\.+\\', '', datafname)
datafname_short = re.sub(r'^(.*:)', '', datafname_short)
fin = open(datafname,"r")
rdplate_line = 0 #reading aline on a plate
plate_num = 0
plate_desc = Plate_Desc("","","","", datafname_short)
plate_hole_rec = [] # holds all the data for all the hole measurements on one plate
plate_hole_table = [] # all the holes for a single plate
plate_meas_table = [] # list of two dimension (plate desc + plate_holes_rec's)
hole_rec = []
hole_table = []
for line_in in fin:
line_out = ""
if line_in.find("%")>=0:
#nothing
line_out = ""
elif not line_in.strip():
#it was null
line_out = ""
elif line_in.find("()")>=0:
#it is the third line in
line_out = ""
else:
line_out=line_in.replace("\n","")
# anything but a blank line
if line_out != "":
if (rdplate_line==0):
if (line_out.find("HOLE ")>=0):
rdplate_line = 4 #there is another hole on the plate
else:
if plate_num ==0:
plate_num += 1
else:
#if not the first plate then must push to stack
plate_meas_rec = (plate_desc, plate_hole_table)
plate_meas_table.append(plate_meas_rec)
plate_desc = Plate_Desc("","","","", datafname_short)
plate_hole_table = []
plate_num += 1
# now, need to find out if a plate reading is in progress
if rdplate_line == 0:
#header
plate_desc = Plate_Desc("","","","", datafname_short)
rdplate_line = rdplate_line + 1
plate_desc.title = line_out.strip()
elif rdplate_line == 1:
#descrip #2
rdplate_line = rdplate_line + 1
plate_desc.descrip = line_out.strip()
elif rdplate_line == 2:
#serial number
if line_out.find("SERIAL")>= 0:
#it is serial number
plate_desc.serialnum = line_out.replace("SERIAL: ", "")
rdplate_line = rdplate_line + 1
elif rdplate_line == 3:
#time and date
tempstr = line_out.replace(" ", ",")
split_val_list = tempstr.split(",")
if len(split_val_list[1]) < 6:
split_val_list[1] = "0" + split_val_list[1]
date_str = split_val_list[0][2] + split_val_list[0][3] + "/" + split_val_list[0][4] + split_val_list[0][5] + "/" + "20" + split_val_list[0][0] + split_val_list[0][1]
time_str = split_val_list[1][0] + split_val_list[1][1] + ":" + split_val_list[1][2] + split_val_list[1][3] + ":" + split_val_list[1][4] + split_val_list[1][5]
plate_desc.date_time = date_str + " " + time_str
rdplate_line = rdplate_line + 1
elif rdplate_line == 4:
#hole number
if line_out.find("HOLE")>= 0:
#it is serial number
tempstr = line_out.replace("HOLE ", "")
plate_hole_rec = []
plate_hole_rec.append(tempstr)
rdplate_line = rdplate_line + 1
elif rdplate_line == 5:
#X pos
if line_out.find("X_TH")>= 0:
tempstr1 = line_out.replace("X_TH:", "")
tempstr2 = tempstr1.replace("X_MEA:", "")
tempstr3 = tempstr2.replace("X_DIF:", "")
tempstr4 = tempstr3.replace(" ", ",")
split_val_list = tempstr4.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
plate_hole_rec.append(split_val_list[2])
rdplate_line = rdplate_line + 1
elif rdplate_line == 6:
#Y pos
if line_out.find("Y_TH")>= 0:
tempstr1 = line_out.replace("Y_TH:", "")
tempstr2 = tempstr1.replace("Y_MEA:", "")
tempstr3 = tempstr2.replace("Y_DIF:", "")
tempstr4 = tempstr3.replace(" ", ",")
split_val_list = tempstr4.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
plate_hole_rec.append(split_val_list[2])
rdplate_line = rdplate_line + 1
elif rdplate_line == 7:
#Y pos
if line_out.find("Z_TH")>= 0:
tempstr1 = line_out.replace("Z_TH:", "")
tempstr2 = tempstr1.replace("Z_MEA:", "")
tempstr3 = tempstr2.replace("Z_DIF:", "")
tempstr4 = tempstr3.replace(" ", ",")
split_val_list = tempstr4.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
plate_hole_rec.append(split_val_list[2])
rdplate_line = rdplate_line + 1
elif rdplate_line == 8:
#DIAM
if line_out.find("DIAM")>= 0:
tempstr1 = line_out.replace("DIAM:", "")
tempstr2 = tempstr1.replace("DIA_ERR:", "")
tempstr3 = tempstr2.replace(" ", ",")
split_val_list = tempstr3.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
# last number read. next line blank but will be chopped up on top
plate_hole_table.append(plate_hole_rec)
rdplate_line = 0
else:
print(plate_desc)
print(plate_meas)
rdplate_line = 0
#print (line_out)
# lines all read in, store the last record in memory
plate_meas_rec = (plate_desc, plate_hole_table)
plate_meas_table.append(plate_meas_rec)
num_holes = len(plate_meas_table[0][1])
num_plates = len(plate_meas_table)
# summary at the top
col1 = pd.Index(['plates', 'descrip', 'data file', 'start', 'stop', 'operator', '# plates', 'start s/n', 'stop s/n','# holes'])
col2 = pd.Index([plate_meas_table[0][0].title, plate_meas_table[0][0].descrip, plate_meas_table[0][0].datafname, plate_meas_table[0][0].date_time, plate_meas_table[len(plate_meas_table)-1][0].date_time, '<NAME>', str(len(plate_meas_table)).strip(), plate_meas_table[0][0].serialnum, plate_meas_table[len(plate_meas_table)-1][0].serialnum, str(num_holes).strip() ])
df_head = pd.DataFrame(col2, columns=[''], index=col1)
print(df_head)
def color_negative_red(val):
#color = 'red' if val < 0 else 'black'
color = 'black'
return f'color: {color}'
def color_spec_dif_01(val):
color = 'red' if float(val) > 0.001 else 'black'
return f'color: {color}'
def color_spec_dif_02(val): #warning if above 0.002
color = 'blue' if float(val) > 0.002 or float(val) < -0.002 else 'black'
return f'color: {color}'
def color_spec_dif_03(val): #red if above 0.002
color = 'red' if float(val) > 0.002 or float(val) < -0.002 else 'black'
return f'color: {color}'
def color_spec_dia(val):
color = 'red' if float(val) > 0.4910 or float(val) < 0.4900 else 'black'
return f'color: {color}'
head_styler = df_head.style.applymap(color_negative_red)
#create serial numbers
meas_tab_serial_num = pd.Index(['spec'], dtype='object')
meas_tab_num = pd.Index([' '])
i=0
for lp in plate_meas_table:
meas_tab_serial_num = meas_tab_serial_num.append( | pd.Index([plate_meas_table[i][0].serialnum]) | pandas.Index |
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process
from xgboost import XGBClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score, GridSearchCV, train_test_split
from sklearn.linear_model import LogisticRegressionCV
from sklearn.feature_selection import RFECV
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler
from sklearn import feature_selection
from sklearn import metrics
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
def predict_round(pred_round):
def load_afl_data(pred_round):
df_2017 = pd.read_csv("../data/afl_results_2017.csv")
#print(df_2017.shape)
df_2018 = pd.read_csv("../data/afl_results_2018.csv")
#print(df_2018.shape)
df_2019 = pd.read_csv("../data/afl_results_2019.csv")
#print(df_2019.shape)
df_2020 = pd.read_csv("../data/afl_results_2020.csv")
#print(df_2020.shape)
df_2021 = pd.read_csv("../data/afl_results_2021.csv")
#print(df_2021.shape)
df_2022 = pd.read_csv("../data/afl_results_2022.csv")
pred_round_results = df_2022[df_2022['round.roundNumber'] == pred_round]
df_2022 = df_2022[df_2022['round.roundNumber'] < pred_round]
#print(df_2022.shape)
df_all = pd.concat([df_2017, df_2018, df_2019, df_2020, df_2021,df_2022], axis=0)
df_all['Date'] = pd.to_datetime(df_all['match.date']).dt.strftime("%Y-%m-%d")
df_players_2017 = pd.read_csv("../data/afl_players_stats_2017.csv")
#print(df_players_2017.shape)
df_players_2018 = pd.read_csv("../data/afl_players_stats_2018.csv")
#print(df_players_2018.shape)
df_players_2019 = pd.read_csv("../data/afl_players_stats_2019.csv")
#print(df_players_2019.shape)
df_players_2020 = pd.read_csv("../data/afl_players_stats_2020.csv")
#print(df_players_2020.shape)
df_players_2021 = pd.read_csv("../data/afl_players_stats_2021.csv")
#print(df_players_2021.shape)
df_players_2022 = pd.read_csv("../data/afl_players_stats_2022.csv")
df_players_2022 = df_players_2022[df_players_2022['Round'] < pred_round]
#print(df_players_2022.shape)
df_players = pd.concat([df_players_2017, df_players_2018, df_players_2019,df_players_2020,df_players_2021,df_players_2022], axis=0)
#print(df_players.shape)
#df_players.columns
df_fixture = pd.read_csv("../data/fixture_2022.csv")
df_next_games_teams = df_fixture[(df_fixture['round.roundNumber'] == pred_round)]
df_next_games_teams = df_next_games_teams[['home.team.name','away.team.name','venue.name','compSeason.year','round.roundNumber']]
df_next_games_teams = df_next_games_teams.rename(columns={'home.team.name': 'match.homeTeam.name', 'away.team.name': 'match.awayTeam.name','compSeason.year':'round.year'})
df_next_games_teams['match.matchId'] = np.arange(len(df_next_games_teams))
return df_all, df_players, df_fixture, df_next_games_teams, pred_round_results
def get_aggregate_player_stats(df=None):
agg_stats = (df.rename(columns={ # Rename columns to lowercase
'Home.team': 'match.homeTeam.name',
'Away.team': 'match.awayTeam.name',
})
.groupby(by=['Date', 'Season', 'match.homeTeam.name', 'match.awayTeam.name'], as_index=False) # Groupby to aggregate the stats for each game
.sum()
#.drop(columns=['DE', 'TOG', 'Match_id']) # Drop columns
.assign(date=lambda df: pd.to_datetime(df.Date, format="%Y-%m-%d")) # Create a datetime object
.sort_values(by='Date')
.reset_index(drop=True))
return agg_stats
df_all, df_players, df_fixture, df_next_games_teams, pred_round_results = load_afl_data(pred_round)
agg_player = get_aggregate_player_stats(df_players)
afl_df = df_all.merge(agg_player, on=['Date', 'match.homeTeam.name', 'match.awayTeam.name'], how='left')
# Add average goal diff for home and away team rolling 4 games
afl_df['HTGDIFF'] = afl_df['homeTeamScore.matchScore.goals'] - afl_df['awayTeamScore.matchScore.goals']
afl_df['ATGDIFF'] = afl_df['awayTeamScore.matchScore.goals'] - afl_df['homeTeamScore.matchScore.goals']
def from_dict_value_to_df(d):
"""
input = dictionary
output = dataframe as part of all the values from the dictionary
"""
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
# Choose GBDT Regression model as baseline
# my_model = GradientBoostingRegressor()
# Training Step
def my_train_func(station):
train_data = pd.read_csv('train-dataset/point_date_' + station + '.csv')
train_data_Y = train_data['actualPowerGeneration']
# Drop some non-relative factors
drop_columns = ['longitude', 'latitude', 'RadiationHorizontalPlane', 'Temperature', 'actualPowerGeneration',
'Humidity', 'atmosphericPressure', 'windDirection', 'scatteredRadiation']
train_data_X = train_data.drop(axis=1, columns=drop_columns)
train_data_X['month'] = pd.to_datetime(train_data_X.Time).dt.month
train_data_X['day'] = pd.to_datetime(train_data_X.Time).dt.day
train_data_X['hour'] = pd.to_datetime(train_data_X.Time).dt.hour
train_data_X = train_data_X.drop(axis=1, columns=['Time'])
# Validation
X_train, X_test, Y_train, Y_test = train_test_split(train_data_X, train_data_Y, test_size=0.2, random_state=40)
myGBR = GradientBoostingRegressor(n_estimators=500,max_depth=7)
myGBR.fit(X_train, Y_train)
Y_pred = myGBR.predict(X_test)
# Output model to global variation
# my_model = myGBR
_ = joblib.dump(myGBR, 'model/' + station + '_model.pkl', compress=9)
print('Training completed. MSE on validation set is {}'.format(mean_squared_error(Y_test, Y_pred)))
print('Factors below are used: \n{}'.format(list(X_train.columns)))
def my_spredict_func(station, input_file, output_file):
# Clean test data
columns = 'Time,longitude,latitude,directRadiation,scatterdRadiation,windSpeed,airTransparency,airDensity'
columns = list(columns.split(','))
test_data = pd.read_csv('test-dataset/' + input_file, names=columns)
drop_columns = ['longitude', 'latitude', 'airTransparency', 'airDensity']
test_data = test_data.drop(axis=1, columns=drop_columns)
test_data['month'] = pd.to_datetime(test_data.Time).dt.month
test_data['day'] = pd.to_datetime(test_data.Time).dt.day
test_data['hour'] = pd.to_datetime(test_data.Time).dt.hour
test_data['min'] = pd.to_datetime(test_data.Time).dt.minute
# Find the time point we need to start with
test_data = test_data.sort_values(by='Time')
# Find the latest time point
time_point = test_data[test_data['hour'] == 0][test_data['min'] == 0].index.tolist()[0]
start_point = test_data.loc[time_point]['Time']
observation_period = pd.date_range(start=start_point, periods=96, freq='15T').strftime("%Y-%m-%d %H:%M:%S").tolist()
test_data = test_data.drop(axis=1, columns=['Time', 'min'])
# Simply fill the NaN values, need more discussion
test_data = test_data.fillna(method='ffill')
test_data = test_data.fillna(0)
test_data = test_data.iloc[time_point:time_point + 96]
try:
my_model = joblib.load('model/' + station + '_model.pkl')
print('Find pretrained model!\n')
except:
print('Need train first!')
exit(0)
result = my_model.predict(test_data)
# result = [at_least(x) for x in result]
two_columns = ['Time', 'Short Predict']
result = pd.DataFrame(data={two_columns[0]: observation_period, two_columns[1]: result})
print('Short prediction of power generation in nearest 96 time points:\n{}'.format(result))
result.to_csv('output/short/' + station + '_' + output_file, sep=',')
def my_sspredict_func(station, input_file, output_file):
columns = 'Time,longitude,latitude,directRadiation,scatterdRadiation,windSpeed,airTransparency,airDensity'
columns = list(columns.split(','))
test_data = pd.read_csv('test-dataset/' + input_file, names=columns)
drop_columns = ['longitude', 'latitude', 'airTransparency', 'airDensity']
test_data = test_data.drop(axis=1, columns=drop_columns)
test_data['month'] = pd.to_datetime(test_data.Time).dt.month
test_data['day'] = pd.to_datetime(test_data.Time).dt.day
test_data['hour'] = pd.to_datetime(test_data.Time).dt.hour
test_data = test_data.sort_values(by='Time')
start_point = test_data.iloc[0]['Time']
observation_period = pd.date_range(start=start_point, periods=16, freq='15T').strftime(
"%Y-%m-%d %H:%M:%S").tolist()
test_data = test_data.drop(axis=1, columns=['Time'])
test_data = test_data.fillna(method='ffill')
test_data = test_data.fillna(0)
test_data = test_data[:16]
try:
my_model = joblib.load('model/' + station + '_model.pkl')
except:
print('Need train first!')
exit(0)
result = my_model.predict(test_data)
# result = [at_least(x) for x in result]
two_columns = ['Time', 'Short Predict']
result = | pd.DataFrame(data={two_columns[0]: observation_period, two_columns[1]: result}) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_key_driver_analysis.ipynb (unless otherwise specified).
__all__ = ['KeyDriverAnalysis']
# Cell
import numpy as np
import pandas as pd
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
)
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
result = idx._simple_new(idx, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
result = idx._simple_new(idx, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ["%dQ%d" % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize(
"func, warning", [(PeriodIndex, FutureWarning), (period_range, None)]
)
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end="2014-01-01 17:00", freq="4H", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4H"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it" " represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize("freq", ["A", "M", "D", "T", "S"])
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start="2014-04-01", freq=freqstr, periods=10)
expected = date_range(start="2014-04-01", freq=freqstr, periods=10).to_period(
freqstr
)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1H", "1H1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25H")
for freq in ["1D1H", "1H1D"]:
pidx = | period_range(start="2016-01-01", periods=2, freq=freq) | pandas.period_range |
# Built in imports.
import json
import asyncio
# Third Party imports.
from channels.exceptions import DenyConnection
from channels.generic.websocket import AsyncWebsocketConsumer
from . import plotting
from analysis.analysis import Analysis
from analysis.util import *
from registry.util import get_samples, get_measurements
from registry.models import Signal, Chemical
from plotly.subplots import make_subplots
import plotly
import pandas as pd
import time
import math
group_fields = {
'Vector': 'Vector',
'Study': 'Study',
'Signal': 'Signal',
'Assay': 'Assay',
'Media': 'Media',
'Strain': 'Strain',
'Supplement': 'Supplement'
}
class PlotConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.user = self.scope["user"]
print(self.user)
await self.accept()
await self.channel_layer.group_add(
"asd",
self.channel_name
)
async def plot(self, df,
mean=False,
std=False,
normalize=False,
groupby1=None,
groupby2=None,
font_size=10,
xlabel='Time',
ylabel='Measurement',
xcolumn='Time',
ycolumn='Measurement',
plot_type='timeseries'):
'''
Generate plot data for frontend plotly plot generation
'''
n_measurements = len(df)
if n_measurements == 0:
return None
traces = []
colors = {}
colidx = 0
subplot_index = 0
groupby1 = group_fields[groupby1]
groupby2 = group_fields[groupby2]
grouped = df.groupby(groupby1)
n_subplots = len(grouped)
ncolors = len(plotting.palette)
progress = 0
# Compute number of rows and columns
n_sub_plots = len(grouped)
rows,cols = plotting.optimal_grid(n_sub_plots)
# Construct subplots
start = time.time()
fig = make_subplots(
rows=rows, cols=cols,
subplot_titles=[name for name,g in grouped],
shared_xaxes=True, shared_yaxes=False,
vertical_spacing=0.1, horizontal_spacing=0.1
)
end = time.time()
# Add traces to subplots
print('make_subplots took %g'%(end-start), flush=True)
for name1,g1 in grouped:
for name2,g2 in g1.groupby(groupby2):
# Choose color and whether to show in legend
if name2 not in colors:
colors[name2] = plotting.palette[colidx%ncolors]
colidx += 1
show_legend_group = True
else:
show_legend_group = False
# Which position the subplot is in
row = 1 + subplot_index//cols
col = 1 + subplot_index%cols
# Decide color for line, use signal color if appropriate
color = colors[name2]
if groupby2=='Signal':
color_string = g2['Color'].values[0]
try:
color_int = int(color_string[1:], 16)
color = color_string
except:
if color_string in plotting.plotly_colors:
color = color_string
# Add traces to figure
if plot_type == 'timeseries':
fig = plotting.make_timeseries_traces(
fig,
g2,
color=color,
mean=mean,
std=std,
normalize=normalize,
show_legend_group=show_legend_group,
group_name=str(name2),
row=row, col=col,
ycolumn=ycolumn
)
elif plot_type == 'bar':
fig = plotting.make_bar_traces(
fig,
g2,
color=color,
mean=mean,
std=std,
normalize=normalize,
show_legend_group=show_legend_group,
group_name=str(name2),
row=row, col=col,
xcolumn=groupby2,
ycolumn=ycolumn
)
elif plot_type == 'induction':
fig = plotting.make_induction_traces(
fig,
g2,
color=color,
mean=mean,
std=std,
normalize=normalize,
show_legend_group=show_legend_group,
group_name=str(name2),
row=row, col=col,
ycolumn=ycolumn
)
elif plot_type == 'heatmap':
fig = plotting.make_heatmap_traces(
fig,
g2,
mean=mean,
std=std,
normalize=normalize,
show_legend_group=show_legend_group,
group_name=str(name2),
row=row, col=col,
ycolumn=ycolumn
)
elif plot_type == 'kymograph':
fig = plotting.make_kymograph_traces(
fig,
g2,
mean=mean,
std=std,
normalize=normalize,
show_legend_group=show_legend_group,
group_name=str(name2),
row=row, col=col,
ycolumn=ycolumn
)
else:
print('Unsupported plot type, ', plot_type, flush=True)
# Format axes
plotting.format_axes(fig,
row, col, rows,
xlabel=xlabel,
ylabel=ylabel,
font_size=font_size)
# Update progress bar
progress += len(g2)
await self.send(text_data=json.dumps({
'type': 'progress_update',
'data': {'progress': int(50 + 50 * progress / n_measurements)}
}))
await asyncio.sleep(0)
# Next subplot
subplot_index += 1
if plot_type=='kymograph' or plot_type=='induction':
xaxis_type, yaxis_type = 'log', None
elif plot_type=='heatmap':
xaxis_type, yaxis_type = 'log', 'log'
else:
xaxis_type, yaxis_type = None, None
plotting.layout_screen(fig, xaxis_type=xaxis_type, yaxis_type=yaxis_type, font_size=font_size)
return fig
async def run_analysis(self, df, analysis):
if len(df)==0:
return df
grouped = df.groupby('Sample')
result_dfs = []
n_samples = len(grouped)
progress = 0
for id,g in grouped:
result_df = analysis.analyze_data(g)
result_dfs.append(result_df)
progress += 1
await self.send(text_data=json.dumps({
'type': 'progress_update',
'data': {'progress': int(50 * progress / n_samples)}
}))
await asyncio.sleep(0)
df = | pd.concat(result_dfs) | pandas.concat |
#!/usr/bin/env python3
# Copyright Toolkit Authors (<NAME>)
import argparse
import pandas as pd
import os
import pickle
import shutil
def concat_df(pkl_files, out_pkl):
"""Concatinate some pickle files.
Args:
pkl_files (list): List of picke files to concatinate.
out_pkl (str): Output pickle file.
"""
for i, pkl in enumerate(pkl_files):
with open(pkl, "rb") as f:
df_dict = pickle.load(f)
if i == 0:
file_df = df_dict['file_df']
content_df = df_dict['content_df']
record_id_df = df_dict['record_id_df']
else:
file_df = pd.concat([file_df, df_dict['file_df']])
content_df = pd.concat([content_df, df_dict['content_df']])
record_id_df = | pd.concat([record_id_df, df_dict['record_id_df']]) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from catboost import CatBoostRegressor
from scipy.stats import skew
from sklearn.dummy import DummyRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.isotonic import IsotonicRegression
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.model_selection.tests.test_validation import test_validation_curve_cv_splits_consistency
from sklearn.neighbors import KNeighborsRegressor, RadiusNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import Imputer, FunctionTransformer, StandardScaler, PolynomialFeatures
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve
from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor, \
RandomForestRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, make_scorer
import keras
from keras import Sequential
from keras.layers import Dense, Dropout, LeakyReLU, BatchNormalization, LSTM
from keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler, Imputer, StandardScaler
import sklearn
from sklearn.feature_selection import SelectFromModel, SelectKBest, f_regression
from sklearn.linear_model import LassoCV, BayesianRidge, LinearRegression, RidgeCV, LassoLarsCV, ElasticNet, \
ElasticNetCV, OrthogonalMatchingPursuitCV, ARDRegression, LogisticRegression, LogisticRegressionCV, SGDRegressor, \
PassiveAggressiveRegressor, RANSACRegressor, TheilSenRegressor, HuberRegressor
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import KFold
import os
import sys
import warnings
from sklearn.metrics import mean_squared_log_error, mean_squared_error, mean_absolute_error
from sklearn.svm import LinearSVR, NuSVR, SVR
from sklearn.tree import DecisionTreeRegressor
if not sys.warnoptions:
warnings.simplefilter("ignore")
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import KFold
import lightgbm as lgb
from mlxtend.regressor import StackingRegressor
import seaborn as sns
print(os.listdir("data"))
def get_cat_cols(df):
return [col for col in df.columns if df[col].dtype == 'object']
def rmsle_cv(model, x, y):
kf = KFold(10, shuffle=True, random_state=1).get_n_splits(x)
rmse = np.sqrt(-cross_val_score(model, x, y, scoring="neg_mean_squared_error", cv=kf, verbose=0))
return (rmse)
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
to_str = ['YearBuilt','LotArea','MasVnrArea','BsmtFinSF1','1stFlrSF','2ndFlrSF','LotFrontage']
# to_str = ['YearBuilt']
to_few = ['Street','Utilities','LandSlope','Condition2']
for column in train_data.columns:
print(train_data[column].head(5))
if column == 'Id':
continue
df = pd.DataFrame(columns=[column, 'SalePrice'])
df['SalePrice'] = train_data.SalePrice
if train_data[column].dtype != 'object':
train_data[column] = train_data[column].fillna(train_data[column].mean())
if column in to_str:
plt.scatter(train_data[column], train_data.SalePrice)
plt.xlabel(column)
plt.ylabel('sale price')
plt.plot(np.linspace(min(train_data[column]), max(train_data[column]), len(train_data[column])),
np.linspace(min(train_data.SalePrice), max(train_data.SalePrice), len(train_data[column])),
color='black')
plt.show()
if train_data[column].dtype == 'float64':
train_data[column] = train_data[column].astype('int')
train_data[column] = train_data[column].astype('object')
if train_data[column].dtype == 'int64':
plt.scatter(train_data[column], train_data.SalePrice)
plt.xlabel(column)
plt.ylabel('sale price')
plt.plot(np.linspace(min(train_data[column]), max(train_data[column]), len(train_data[column])),
np.linspace(min(train_data.SalePrice), max(train_data.SalePrice), len(train_data[column])),
color='black')
plt.show()
train_data[column] = train_data[column].astype('object')
if train_data[column].dtype == 'object':
train_data[column] = train_data[column].fillna('NotAvailable')
df[column] = LabelEncoder().fit_transform(train_data[column])
else:
df[column] = train_data[column]
plt.scatter(df[column], df.SalePrice)
plt.xlabel(column)
plt.ylabel('sale price')
plt.plot(np.linspace(min(df[column]), max(df[column]), len(df[column])),
np.linspace(min(df.SalePrice), max(df.SalePrice), len(df[column])),
color='black')
plt.show()
exit(1)
y = np.log1p(train_data.SalePrice)
# test is meant for predictions and doesn't contain any price data. I need to provide it.
cand_train_predictors = train_data.drop(['Id', 'SalePrice'], axis=1)
cand_test_predictors = test_data.drop(['Id'], axis=1)
cat_cols = get_cat_cols(cand_train_predictors)
cand_train_predictors[cat_cols] = cand_train_predictors[cat_cols].fillna('NotAvailable')
cand_test_predictors[cat_cols] = cand_test_predictors[cat_cols].fillna('NotAvailable')
encoders = {}
for col in cat_cols:
encoders[col] = LabelEncoder()
val = cand_train_predictors[col].tolist()
val.extend(cand_test_predictors[col].tolist())
encoders[col].fit(val)
cand_train_predictors[col] = encoders[col].transform(cand_train_predictors[col])
cand_test_predictors[col] = encoders[col].transform(cand_test_predictors[col])
cand_train_predictors.fillna(cand_train_predictors.mean(), inplace=True)
cand_test_predictors.fillna(cand_test_predictors.mean(), inplace=True)
| pd.set_option("use_inf_as_na", True) | pandas.set_option |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions for Hydra - learning ddGoffset values for free energy perturbations.
"""
# TF-related imports & some settings to reduce TF verbosity:
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1" # current workstation contains 4 GPUs; exclude 1st
import tensorflow as tf
from tensorflow import keras
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
# hyperparameter optimisation:
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
from tensorflow.python.keras import backend as K
from skopt.utils import use_named_args
# featurisation:
from mordred import Calculator, descriptors
from rdkit import Chem
from rdkit.Chem import AllChem, rdmolfiles
# general imports:
import pandas as pd
import numpy as np
import csv
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn import preprocessing, decomposition
from sklearn.model_selection import train_test_split
from scipy import stats
from tqdm import tqdm
import glob
import pickle
# global startpoint for SKOPT optimisation:
startpoint_error = np.inf
###################################################
###################################################
###################### UTILS ######################
###################################################
###################################################
def retrieveMoleculePDB(ligand_path):
"""
Returns RDKit molecule objects for requested path PDB file.
-- args
ligand_path (str): path leading to molecule pdb file
-- returns
RDKit molecule object
"""
mol = rdmolfiles.MolFromPDBFile(
ligand_path,
sanitize=True
)
return mol
def readHDF5Iterable(path_to_trainingset, chunksize):
"""
Read in a training set using pandas' HDF5 utility
--args
path_to_trainingset (str): path to training set (HDF5) to read from
chunksize (int): number of items to read in per increment (recommended 5000 for large datasets)
--returns
training_set (iterable)
"""
training_set = pd.DataFrame()
# use chunksize to save memory during reading:
training_set_iterator = pd.read_hdf(path_to_trainingset, chunksize=chunksize)
return training_set_iterator
###################################################
###################################################
################## FEATURISERS ####################
###################################################
###################################################
###################################################
### Molecular properties: ###
###
###
def computeLigMolProps(
transfrm_path="transformations/",
working_dir="features/MOLPROPS/",
target_columns=None,
verbose=False):
"""
Compute molecular properties for the molecules in given transfrm_path and write to file.
--args
transfrm_path (str): path to directory containing ligand files
working_dir (str): path to directory to pickle into
verbose (bool): whether or not to print featurisation info to stdout
--returns
molprops_set (pandas dataframe): set of molecules with molecular properties
"""
mol_paths = glob.glob(transfrm_path+"*")
# generate RDKit mol objects from paths:
mols_rdkit = [ retrieveMoleculePDB(mol) for mol in mol_paths ]
# generate molecule name from paths for indexing:
mols_names = [ mol.replace(transfrm_path, "").split(".")[0] for mol in mol_paths ]
# generate all descriptors available in mordred:
calc = Calculator(descriptors, ignore_3D=False)
print("Computing molecular properties:")
molprops_set = calc.pandas(mols_rdkit)
# remove columns with bools or strings (not fit for subtraction protocol):
if target_columns is not None:
# if variable is input the function is handling a testset and must
# keep the same columns as train dataset:
molprops_set = molprops_set[target_columns]
else:
# if making a training dataset, decide which columns to retain:
molprops_set = molprops_set.select_dtypes(include=["float64", "int64"])
molprops_set.index = mols_names
# pickle dataframe to specified directory:
molprops_set.to_pickle(working_dir+"molprops.pickle")
if verbose:
print(molprops_set)
return molprops_set
def computePertMolProps(
perturbation_paths,
molprops_set=None,
free_path="SOLVATED/",
working_dir="features/MOLPROPS/"):
"""
Read featurised FEP molecules and generate matches based on user input perturbations.
Writes each perturbation features by appending it to the features.csv file.
--args
perturbation_paths (list): nested list of shape [[A~B],[C~D]] with strings describing
the perturbations. These combinations will be used to make pairwise extractions
from molprops_set.
molprops_set (pandas dataframe; optional): dataframe object that contains the
featurised FEP dataset. If None, will attempt to pickle from working_dir
free_path (str): path to directory containing perturbation directories
working_dir (str): path to directory to pickle dataset from
--returns
None
"""
# test if input is there:
if molprops_set is None:
try:
molprops_set = pd.read_pickle(working_dir+"molprops.pickle")
except FileNotFoundError:
print("Unable to load pickle file with per-ligand molprop data in absence of molprops_set function input.")
# clean slate featurised perturbations dataset; write column names:
open(working_dir+"featurised_molprops.h5", "w").close()
store = | pd.HDFStore(working_dir+"featurised_molprops.h5") | pandas.HDFStore |
import pandas as pd
import os
import sys
def tail_1(f):
stdin,stdout = os.popen2("tail -n 1 "+f)
stdin.close()
lines = stdout.readlines()
stdout.close()
return lines[0]
def main(arg):
directory = arg
if arg[-1] == '/':
arg = arg[:-1]
print("[INFO] Directory: %s" %arg)
columns_names = ['skill_id', 'mean_acc', 'mean_auc', 'mean_log_loss']
df = pd.DataFrame(columns=columns_names)
for filename in os.listdir(directory):
if filename.endswith('output-estimation'):
print('[INFO] File %s' %filename)
skill_id = int(filename.split('-')[0].split('_')[-1])
print('[INFO] Skill id = %d' %skill_id)
line = tail_1(directory+'/'+filename)
if len(line.split(' '))==12:
accuracy = float(line.split(' ')[6])
auc = float(line.split(' ')[8])
log_loss = float(line.split(' ')[11][:-1])
print('[INFO] ACC %.2f, AUC %.2f, L_L %.2f' %(accuracy, auc, log_loss))
df = df.append( | pd.Series([skill_id, accuracy, auc, log_loss]) | pandas.Series |
#!/usr/bin/env python
import torch
from torchvision import transforms
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import MinMaxScaler
from sklearn import preprocessing
import xgboost as xgb
import matplotlib.pyplot as plt
import shap
import os
import json
import pickle
import os
import time
import sys
import argparse
import pandas as pd
import numpy as np
from AE import *
###Some help functions
# get the computation device
def get_device():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
return device
def AE4EXP(epochs,reg_param,add_sparsity,tumor,learning_rate,batch_size,patience,pathwayID):
## Data preprocessing
tumors_list_stratify={"BRCA":True,"COAD":False,"KIRC":False,"LUAD":True,"PRAD":False,"THCA":True} ## Flase indcating there is only one sample in any of these groups
PATH_TO_DATA = '../data/TCGA_'+tumor+'_TPM_Regression_tumor.csv' # path to original data
genes_df = pd.read_csv(PATH_TO_DATA, index_col=0, header=0) # data quality control
phenos = np.array(genes_df)[:,-1]
if pathwayID != "null":
pathway_genes = pd.read_csv("../data/Pathways_GeneIDs_Overlapped_Genes.csv",header=0)
pathway_names = list(pathway_genes["PathwayID"])
one_pathway_genes_df = pathway_genes[pathway_genes["PathwayID"]==pathwayID]
one_pathway_genes_df_names = list(one_pathway_genes_df.columns)
one_pathway_genes_df_names_index = one_pathway_genes_df_names.index("Tumor_Genes_ID")
one_pathway_genes = one_pathway_genes_df.iloc[0,one_pathway_genes_df_names_index].split(";")
genes_pathway_df = genes_df[one_pathway_genes]
genes = np.array(genes_pathway_df)
genes_name_np = np.array(list(genes_pathway_df.columns))
else:
genes = np.array(genes_df)[:,:-1]
genes_name_np = np.array(list(genes_df.columns)[:-1])
genes_np = genes.astype(float)
if tumors_list_stratify.get(tumor):
genes_train, genes_test, phenos_train, phenos_test = train_test_split(genes_np, phenos, test_size=0.2, random_state=44, stratify=phenos)
else:
genes_train, genes_test, phenos_train, phenos_test = train_test_split(genes_np, phenos, test_size=0.2, random_state=44)
genes_train_median_above_1_column_mask = np.median(genes_train, axis=0) > 1
genes_train_median_above_1 = genes_train[:, genes_train_median_above_1_column_mask]
genes_test_median_above_1 = genes_test[:, genes_train_median_above_1_column_mask]
genes_train_test_median_above_1 = genes_np[:, genes_train_median_above_1_column_mask]
genes_name_np_above_1 = genes_name_np[genes_train_median_above_1_column_mask]
genes_train_log2TPM = np.log2(genes_train_median_above_1 + 0.25)
genes_test_log2TPM = np.log2(genes_test_median_above_1 + 0.25)
genes_train_test_log2TPM = np.log2(genes_train_test_median_above_1 + 0.25)
scaler = MinMaxScaler()
scaler.fit(genes_train_log2TPM)
genes_train_log2TPM_MinMaxScaler = scaler.transform(genes_train_log2TPM)
genes_test_log2TPM_MinMaxScaler = scaler.transform(genes_test_log2TPM)
genes_train_test_log2TPM_MinMaxScaler = scaler.transform(genes_train_test_log2TPM)
## Define some parameters
genes_num = genes_train_log2TPM_MinMaxScaler.shape[1]
train_batch_size = genes_train_log2TPM_MinMaxScaler.shape[0] if batch_size==0 else batch_size
test_batch_size = genes_test_log2TPM_MinMaxScaler.shape[0] if batch_size==0 else batch_size
train_test_batch_size = genes_train_test_log2TPM_MinMaxScaler.shape[0] if batch_size==0 else batch_size
genes_train_shape = genes_train_log2TPM_MinMaxScaler.shape
genes_test_shape = genes_test_log2TPM_MinMaxScaler.shape
genes_train_test_shape = genes_train_test_log2TPM_MinMaxScaler.shape
smallest_layer=32 if pathwayID == "null" else 8
genes_num = genes_train_shape[1]
early_stopping_epoch_count=0
best_res_r2=None
#trainloader
trainloader = DataLoader(genes_train_log2TPM_MinMaxScaler, batch_size=train_batch_size, shuffle=False, num_workers=2)
#testloader
testloader = DataLoader(genes_test_log2TPM_MinMaxScaler, batch_size=test_batch_size, shuffle=False, num_workers=2)
#train test loader
allloader = DataLoader(genes_train_test_log2TPM_MinMaxScaler,batch_size=train_test_batch_size, shuffle=False, num_workers=2)
#create AE model
device = get_device()
if pathwayID != "null":
if genes_num < 100:
model = Auto_PathM_Exp(genes_num).to(device)
else:
model = Auto_PathL_Exp(genes_num).to(device)
else:
model = Auto_Exp(genes_num).to(device)
#The loss function
distance = nn.MSELoss()
#The optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.8) # lr=lr*gamma*(epoch/step_size)
#Creat ae_res folder
output_dir = '../ae_res'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if pathwayID != "null":
log_fw=open("../ae_res/ae_train_test_"+tumor+"_"+pathwayID+".log","w")
else:
log_fw=open("../ae_res/ae_train_test_"+tumor+".log","w")
#Train and validate the autoencoder neural network
train_loss = []
val_loss = []
do_train=do_test=True
for epoch in range(epochs):
if do_train:
train_sum_loss = 0
model.train()
output_pheno_train = np.zeros(genes_train_shape)
input_pheno_train = np.zeros(genes_train_shape)
coder_train = np.zeros([genes_train_shape[0], smallest_layer])
for batch_count, geno_data in enumerate(trainloader):
train_geno = Variable(geno_data).float().to(device)
# =======forward========
train_output, coder = model.forward(train_geno)
mse_loss = distance(train_output, train_geno)
# =======add sparsity===
if add_sparsity :
model_children = list(model.children())
l1_loss=0
values=train_geno
for i in range(len(model_children)):
values = F.leaky_relu((model_children[i](values)))
l1_loss += torch.mean(torch.abs(values))
# add the sparsity penalty
train_loss = mse_loss + reg_param * l1_loss
else:
train_loss = mse_loss
train_sum_loss += train_loss.item()
# ======get coder and output======
train_output2 = train_output.cpu().detach().numpy()
start_ind = batch_count * train_batch_size
end_ind = batch_count * train_batch_size + train_output2.shape[0]
output_pheno_train[start_ind:end_ind] = train_output2
input_pheno_train[start_ind:end_ind] = geno_data.cpu().numpy()
coder_train[start_ind:end_ind] = coder.cpu().detach().numpy()
# ======backward========
optimizer.zero_grad()
train_loss.backward(retain_graph=True)
optimizer.step()
scheduler.step()
# ===========log============
log_fw.write('LR: {:.6f}\n'.format(float(scheduler.get_last_lr()[0])))
log_fw.write('epoch[{}/{}], train loss:{:.4f}\n'.format(epoch + 1, epochs, train_sum_loss))
train_r2 = r2_score(input_pheno_train, output_pheno_train) # r2_score(y_true, y_pred)
log_fw.write('The average R^2 between y and y_hat for train phenotypes is: {:.4f}\n'.format(train_r2))
# ===========test==========
if do_test:
test_sum_loss = 0
output_pheno_test = np.zeros(genes_test_shape)
input_pheno_test = np.zeros(genes_test_shape)
coder_test = np.zeros([genes_test_shape[0], smallest_layer])
for batch_count, geno_test_data in enumerate(testloader):
test_geno = Variable(geno_test_data).float().to(device)
# =======forward========
test_output, coder = model.forward(test_geno)
test_loss = distance(test_output, test_geno)
test_sum_loss += test_loss.item()
# ======get code and ae_res======
test_output2 = test_output.cpu().detach().numpy()
start_ind = batch_count * test_batch_size
end_ind = batch_count * test_batch_size + test_output2.shape[0]
output_pheno_test[start_ind:end_ind] = test_output2
input_pheno_test[start_ind:end_ind] = test_geno.cpu().numpy()
coder_test[start_ind:end_ind] = coder.cpu().detach().numpy()
log_fw.write('LR: {:.6f}\n'.format(float(scheduler.get_last_lr()[0])))
log_fw.write('epoch[{}/{}], test loss:{:.4f}\n'.format(epoch + 1, epochs, test_sum_loss))
test_r2 = r2_score(input_pheno_test, output_pheno_test) # r2_score(y_true, y_pred)
log_fw.write('The average R^2 between y and y_hat for test phenotypes is: {:.4f}\n'.format(test_r2))
##Early stopping
if best_res_r2 is None:
best_res_r2 = test_r2
if pathwayID != "null":
torch.save(model.state_dict(), "../ae_res/AE."+tumor+"."+pathwayID+".pt")
else:
torch.save(model.state_dict(), "../ae_res/AE."+tumor+".pt")
elif test_r2 <= best_res_r2-0.0005:
early_stopping_epoch_count+=1
if (early_stopping_epoch_count>=patience) and (epoch>=500):
log_fw.write("Stop training as the test R2 does not increase in "+str(patience)+" rounds\nSaving hidden codes")
break #stop training and testing process
else:
best_res_r2=test_r2
if pathwayID != "null":
torch.save(model.state_dict(), "../ae_res/AE."+tumor+"."+pathwayID+".pt")
else:
torch.save(model.state_dict(), "../ae_res/AE."+tumor+".pt")
early_stopping_epoch_count=0
log_fw.write('The current best R^2 is: {:.4f}\n'.format(best_res_r2))
#Save AE hiddencodes and inputs for downstream SHAP analysis
train_test_coders_ae_res = np.zeros([genes_train_test_shape[0], smallest_layer])
for batch_count, geno_train_test_data in enumerate(allloader):
train_test_geno = Variable(geno_train_test_data).float().to(device)
# =======forward========
train_test_output, train_test_coder = model.forward(train_test_geno)
# ======get code and ae_res======
train_test_output2 = train_test_output.cpu().detach().numpy()
start_ind = batch_count * train_test_batch_size
end_ind = batch_count * train_test_batch_size + train_test_output2.shape[0]
train_test_coders_ae_res[start_ind:end_ind] = train_test_coder.cpu().detach().numpy()
train_test_coders_ae_res_df = pd.DataFrame(train_test_coders_ae_res)
train_test_inputs_df = pd.DataFrame(genes_train_test_log2TPM_MinMaxScaler)
train_test_inputs_df.columns = list(genes_name_np_above_1)
if pathwayID != "null":
train_test_coders_ae_res_df.to_csv("../ae_res/AE.hiddencodes."+tumor+"."+pathwayID+".csv", header=False, index=False)
train_test_inputs_df.to_csv("../ae_res/AE.imputs."+tumor+"."+pathwayID+".csv", header=True, index=False)
else:
train_test_coders_ae_res_df.to_csv("../ae_res/AE.hiddencodes."+tumor+".csv", header=False, index=False)
train_test_inputs_df.to_csv("../ae_res/AE.imputs."+tumor+".csv", header=True, index=False)
log_fw.close()
end_time = time.time()
def XAI4AE(tumor,pathwayID,critical_bound):
if pathwayID != "null":
if not os.path.exists("../ae_res/AE.hiddencodes."+tumor+"."+pathwayID+".csv"):
print("No hidden code file present in this folder, please run XAI4Exp.py again")
exit(0)
else:
if not os.path.exists("../ae_res/AE.hiddencodes."+tumor+".csv"):
print("No hidden code file present in this folder, please run XAI4Exp.py again")
exit(0)
if not os.path.exists("../shap_res/"+tumor):
os.makedirs("../shap_res/"+tumor)
if pathwayID != "null":
#Input path
PATH_TO_DATA_GENE_NAME = "../ae_res/AE.imputs."+tumor+"."+pathwayID+".csv" # path to cleaned data with gene annotation (not gene id) (after quatlity control)
PATH_TO_AE_RESULT = "../ae_res/AE.hiddencodes."+tumor+"."+pathwayID+".csv" # path to AutoEncoder results, alwarys the last epoch result
#Output path
PATH_TO_SAVE_BAR = '../shap_res/'+tumor+'/'+pathwayID+".bar" # path to save SHAP bar chart
PATH_TO_SAVE_SCATTER = '../shap_res/'+tumor+'/'+pathwayID+".scatter" # path to save SHAP scatter chart
PATH_TO_SAVE_GENE_MODULE = '../shap_res/'+tumor+'/'+pathwayID+".summary" # path to save gene module
else:
#Input path
PATH_TO_DATA_GENE_NAME = "../ae_res/AE.imputs."+tumor+".csv" # path to cleaned data with gene annotation (not gene id) (after quatlity control)
PATH_TO_AE_RESULT = "../ae_res/AE.hiddencodes."+tumor+".csv" # path to AutoEncoder results, alwarys the last epoch result
#Output path
PATH_TO_SAVE_BAR = '../shap_res/'+tumor+'/all.bar' # path to save SHAP bar chart
PATH_TO_SAVE_SCATTER = '../shap_res/'+tumor+'/all.scatter' # path to save SHAP scatter chart
PATH_TO_SAVE_GENE_MODULE = '../shap_res/'+tumor+'/all.summary' # path to save gene module
#Load data
gene_df = pd.read_csv(PATH_TO_DATA_GENE_NAME, index_col=None,header=0)
gene_np = np.array(gene_df)
gene_column_num = gene_np.shape[1]
hidden_vars_np = np.array(pd.read_csv(PATH_TO_AE_RESULT, header = None))
hid_column_num = hidden_vars_np.shape[1]
hid_sample_num = hidden_vars_np.shape[0]
gene_id = list(gene_df.columns)
gene_id_name_dict = json.load(open("../data/gencode.v26.annotation.3genes.ENSG.Symbol.short.json","r"))
gene_name = []
for gene in gene_id:
if gene in gene_id_name_dict:
gene_name.append(gene_id_name_dict.get(gene))
else:
print(gene+" does not exist in dict")
gene_name_np = np.array(gene_name)
R2_list=[]
to_writer = True
if to_writer:
writer = pd.ExcelWriter(PATH_TO_SAVE_GENE_MODULE+'.xlsx',engine='xlsxwriter')
shap_values_mean_x_R2=np.zeros(gene_column_num)
for i in range(hid_column_num):
X_train, X_test, Y_train, Y_test = train_test_split(gene_np,hidden_vars_np[:,i],test_size=0.2,random_state=42)
my_model = xgb.XGBRegressor(booster="gbtree",max_depth=20, random_state=42, n_estimators=100,objective='reg:squarederror')
my_model.fit(X_train, Y_train)
Y_predict=my_model.predict(X_test)
R2 = sklearn.metrics.r2_score(Y_test,Y_predict)
tmp=[]
tmp.append("HiddenNode_"+str(i+1))
tmp.append(R2)
R2_list.append(tmp)
explainer = shap.TreeExplainer(my_model)
shap_values = explainer.shap_values(X_test)
## generate gene module
shap_values_mean = np.sum(abs(shap_values),axis=0)/hid_sample_num #calcaute absolute mean across samples
gene_module = pd.DataFrame({'gene_id':np.array(gene_id),'gene_name':np.array(gene_name),'shap_values_mean':np.array(shap_values_mean)}) #generate a datafram
gene_module["shap_values_mean_times_R2"] = np.array(gene_module['shap_values_mean'])*R2
shap_values_mean_x_R2=shap_values_mean_x_R2+np.array(gene_module['shap_values_mean']*R2)
#gene_module = gene_module[gene_module['shap_values_mean']!=0] #remove genes which mean value equals to 0
#gene_module = gene_module.sort_values(by='shap_values_mean',ascending=False) #descending order, we are intrested in large shap values
#gene_module["ln"] = np.log(np.array(gene_module['shap_values_mean'])) #ln helps visualize very small shap values
gene_module.to_excel(writer, sheet_name="HiddenNode_"+str(i+1), na_rep="null",index=False)
## generate bar chart
shap.summary_plot(shap_values, X_test, feature_names=gene_name_np, plot_type='bar', plot_size = (15,10))
plt.savefig(PATH_TO_SAVE_BAR+"_HN"+str(i+1)+'.png', dpi=100, format='png')
plt.close()
## generate scatter chart
shap.summary_plot(shap_values, X_test, feature_names=gene_name_np, plot_size = (15,10))
plt.savefig(PATH_TO_SAVE_SCATTER+"_HN"+str(i+1)+'.png', dpi=100, format='png')
plt.close()
R2_df= | pd.DataFrame(R2_list) | pandas.DataFrame |
import re
import warnings
import numpy as np
import pandas as pd
from Amplo.Utils import clean_keys
class DataProcesser:
def __init__(self,
target: str = None,
float_cols: list = None,
int_cols: list = None,
date_cols: list = None,
cat_cols: list = None,
include_output: bool = False,
missing_values: str = 'interpolate',
outlier_removal: str = 'clip',
z_score_threshold: int = 4,
version: int = 1,
verbosity: int = 0,
):
"""
Preprocessing Class. Cleans a dataset into a workable format.
Deals with Outliers, Missing Values, duplicate rows, data types (floats, categorical and
dates), Not a Numbers, Infinities.
Parameters
----------
target str: Column name of target variable
num_cols list: Numerical columns, all parsed to integers and floats
date_cols list: Date columns, all parsed to pd.datetime format
cat_cols list: Categorical Columns. Currently all one-hot encoded.
missing_values str: How to deal with missing values ('remove', 'interpolate' or 'mean')
outlier_removal str: How to deal with outliers ('clip', 'quantiles', 'z-score' or 'none')
z_score_threshold int: If outlierRemoval='z-score', the threshold is adaptable, default=4.
folder str: Directory for storing the output files
version int: Versioning the output files
mode str: classification / regression
"""
# Tests
mis_values_algo = ['remove_rows', 'remove_cols', 'interpolate', 'mean', 'zero']
assert missing_values in mis_values_algo, \
'Missing values algorithm not implemented, pick from {}'.format(', '.join(mis_values_algo))
out_rem_algo = ['quantiles', 'z-score', 'clip', 'none']
assert outlier_removal in out_rem_algo, \
'Outlier Removal algorithm not implemented, pick from {}'.format(', '.join(out_rem_algo))
# Arguments
self.version = version
self.includeOutput = include_output
self.target = target if target is None else re.sub("[^a-z0-9]", '_', target.lower())
self.float_cols = [] if float_cols is None else [re.sub('[^a-z0-9]', '_', fc.lower()) for fc in float_cols]
self.int_cols = [] if int_cols is None else [re.sub('[^a-z0-9]', '_', ic.lower()) for ic in int_cols]
self.num_cols = self.float_cols + self.int_cols
self.cat_cols = [] if cat_cols is None else [re.sub('[^a-z0-9]', '_', cc.lower()) for cc in cat_cols]
self.date_cols = [] if date_cols is None else [re.sub('[^a-z0-9]', '_', dc.lower()) for dc in date_cols]
if self.target in self.num_cols:
self.num_cols.remove(self.target)
# Algorithms
self.missing_values = missing_values
self.outlier_removal = outlier_removal
self.z_score_threshold = z_score_threshold
# Fitted Settings
self.dummies = {}
self._q1 = None
self._q3 = None
self._means = None
self._stds = None
# Info for Documenting
self.is_fitted = False
self.verbosity = verbosity
self.removedDuplicateRows = 0
self.removedDuplicateColumns = 0
self.removedOutliers = 0
self.imputedMissingValues = 0
self.removedConstantColumns = 0
def fit_transform(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Fits this data cleaning module and returns the transformed data.
Parameters
----------
data [pd.DataFrame]: Input data
Returns
-------
data [pd.DataFrame]: Cleaned input data
"""
if self.verbosity > 0:
print('[AutoML] Data Cleaning Started, ({} x {}) samples'.format(len(data), len(data.keys())))
# Clean Keys
data = clean_keys(data)
# Remove Duplicates
data = self.remove_duplicates(data)
# Infer data-types
self.infer_data_types(data)
# Convert data types
data = self.convert_data_types(data, fit_categorical=True)
# Remove outliers
data = self.remove_outliers(data, fit=True)
# Remove missing values
data = self.remove_missing_values(data)
# Remove Constants
data = self.remove_constants(data)
# Convert integer columns
data = self.convert_float_int(data)
# Clean target
data = self.clean_target(data)
# Finish
self.is_fitted = True
if self.verbosity > 0:
print('[AutoML] Processing completed, ({} x {}) samples returned'.format(len(data), len(data.keys())))
return data
def transform(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Function that takes existing settings (including dummies), and transforms new data.
Parameters
----------
data [pd.DataFrame]: Input data
Returns
-------
data [pd.DataFrame]: Cleaned input data
"""
assert self.is_fitted, "Transform only available for fitted objects, run .fit_transform() first."
# Clean Keys
data = clean_keys(data)
# Impute columns
data = self._impute_columns(data)
# Remove duplicates
data = self.remove_duplicates(data, rows=False)
# Convert data types
data = self.convert_data_types(data, fit_categorical=False)
# Remove outliers
data = self.remove_outliers(data, fit=False)
# Remove missing values
data = self.remove_missing_values(data)
# Convert integer columns
data = self.convert_float_int(data)
return data
def get_settings(self) -> dict:
"""
Get settings to recreate fitted object.
"""
assert self.is_fitted, "Object not yet fitted."
return {
'num_cols': self.num_cols,
'float_cols': self.float_cols,
'int_cols': self.int_cols,
'date_cols': self.date_cols,
'cat_cols': self.cat_cols,
'missing_values': self.missing_values,
'outlier_removal': self.outlier_removal,
'z_score_threshold': self.z_score_threshold,
'_means': None if self._means is None else self._means.to_json(),
'_stds': None if self._stds is None else self._stds.to_json(),
'_q1': None if self._q1 is None else self._q1.to_json(),
'_q3': None if self._q3 is None else self._q3.to_json(),
'dummies': self.dummies,
'fit': {
'imputed_missing_values': self.imputedMissingValues,
'removed_outliers': self.removedOutliers,
'removed_constant_columns': self.removedConstantColumns,
'removed_duplicate_rows': self.removedDuplicateRows,
'removed_duplicate_columns': self.removedDuplicateColumns,
}
}
def load_settings(self, settings: dict) -> None:
"""
Loads settings from dictionary and recreates a fitted object
"""
self.num_cols = settings['num_cols'] if 'num_cols' in settings else []
self.float_cols = settings['float_cols'] if 'float_cols' in settings else []
self.int_cols = settings['int_cols'] if 'int_cols' in settings else []
self.cat_cols = settings['cat_cols'] if 'cat_cols' in settings else []
self.date_cols = settings['date_cols'] if 'date_cols' in settings else []
self.missing_values = settings['missing_values'] if 'missing_values' in settings else []
self.outlier_removal = settings['outlier_removal'] if 'outlier_removal' in settings else []
self.z_score_threshold = settings['z_score_threshold'] if 'z_score_threshold' in settings else []
self._means = None if settings['_means'] is None else pd.read_json(settings['_means'])
self._stds = None if settings['_stds'] is None else pd.read_json(settings['_stds'])
self._q1 = None if settings['_q1'] is None else pd.read_json(settings['_q1'])
self._q3 = None if settings['_q3'] is None else | pd.read_json(settings['_q3']) | pandas.read_json |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Make sure the following dependencies are installed.
#!pip install albumentations --upgrade
#!pip install timm
#!pip install iterative-stratification
__author__ = 'MPWARE: https://www.kaggle.com/mpware'
# In[ ]:
# Configure HOME and DATA_HOME according to your setup
HOME = "./"
DATA_HOME = "./data/"
TRAIN_HOME = DATA_HOME + "train/"
TRAIN_IMAGES_HOME = TRAIN_HOME + "images/"
IMAGE_SIZE = 512 # Image size for training
RESIZED_IMAGE_SIZE = 384 # For random crop
COMPOSE = None # For RGBY support
# Set to True for interactive session
PT_SCRIPT = True # True
# In[ ]:
import sys, os, random, math
import numpy as np
import h5py
import cv2
import torch
import torch.nn as nn
import operator
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
import albumentations as A
import torch.nn.functional as F
import functools
from collections import OrderedDict
import torch.nn.functional as F
from torch.optim import Adam, SGD
import timm
import iterstrat
# In[ ]:
LABEL = "Label"
ID = "ID"
EID = "EID"
IMAGE_WIDTH = "ImageWidth"
IMAGE_HEIGHT = "ImageHeight"
META = "META"
TOTAL = "Total"
EXT = "ext"
DEFAULT = "default"
# 19 class labels. Some rare classes: Mitotic spindle (0.37%), Negative: (0.15%)
class_mapping = {
0: 'Nucleoplasm', 1: 'Nuclear membrane', 2: 'Nucleoli', 3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles', 5: 'Nuclear bodies', 6: 'Endoplasmic reticulum', 7: 'Golgi apparatus', 8: 'Intermediate filaments',
9: 'Actin filaments', 10: 'Microtubules', 11: 'Mitotic spindle', 12: 'Centrosome', 13: 'Plasma membrane', 14: 'Mitochondria',
15: 'Aggresome', 16: 'Cytosol', 17: 'Vesicles and punctate cytosolic patterns', 18: 'Negative',
}
class_mapping_inv = {v:k for k,v in class_mapping.items()}
class_labels = [str(k) for k,v in class_mapping.items()]
class_names = [str(v) for k,v in class_mapping.items()]
LABELS_OHE_START = 3
# In[ ]:
def seed_everything(s):
random.seed(s)
os.environ['PYTHONHASHSEED'] = str(s)
np.random.seed(s)
# Torch
torch.manual_seed(s)
torch.cuda.manual_seed(s)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
torch.cuda.manual_seed_all(s)
# In[ ]:
def l1_loss(A_tensors, B_tensors):
return torch.abs(A_tensors - B_tensors)
class ComboLoss(nn.Module):
def __init__(self, alpha=1.0, beta=1.0, gamma=1.0, from_logits=True, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.from_logits = from_logits
print("alpha:", self.alpha, "beta:", self.beta, "gamma:", self.gamma)
self.loss_classification = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, y_pred, y_true, features_single=None, y_pred_tiles=None, features_tiles=None, y_pred_tiled_flatten=None):
loss_ = self.alpha * self.loss_classification(y_pred, y_true).mean()
if features_tiles is not None and self.beta > 0:
logits_reconstruction = y_pred_tiles
loss_tiles_class_ = self.loss_classification(logits_reconstruction, y_true).mean()
loss_ = loss_ + self.beta * loss_tiles_class_
if features_single is not None and features_tiles is not None and self.gamma > 0:
loss_reconstruction_ = l1_loss(features_single, features_tiles).mean()
loss_ = loss_ + self.gamma * loss_reconstruction_
return loss_
# In[ ]:
# Main configuration
class raw_conf:
def __init__(self, factory):
super().__init__()
self.inference = False
self.compose = COMPOSE
self.normalize = False if factory == "HDF5" else True
self.norm_value = None if factory == "HDF5" else 65535.0
# Dataset
self.image_size = None if factory == "HDF5" else IMAGE_SIZE
self.denormalize = 255
# Model
self.mtype = "siamese" # "regular"
self.backbone = 'seresnext50_32x4d' # 'gluon_seresnext101_32x4d' # 'cspresnext50' 'regnety_064'
self.pretrained_weights = "imagenet"
self.INPUT_RANGE = [0, 1]
self.IMG_MEAN = [0.485, 0.456, 0.406, 0.485] if self.compose is None else [0.485, 0.456, 0.406]
self.IMG_STD = [0.229, 0.224, 0.225, 0.229] if self.compose is None else [0.229, 0.224, 0.225]
self.num_classes = 19
self.with_cam = True
self.puzzle_pieces = 4
self.hpa_classifier_weights = None
self.dropout = None
# Model output
self.post_activation = "sigmoid"
self.output_key = "logits" if self.mtype == "regular" else "single_logits" # None
self.output_key_extra = "features" if self.mtype == "regular" else "single_features" # None
self.output_key_siamese = None if self.mtype == "regular" else "tiled_logits"
self.output_key_extra_siamese = None if self.mtype == "regular" else "tiled_features"
# Loss
self.alpha = 1.0 # Single image classification loss
self.beta = 0.0 if self.mtype == "regular" else 1.0 # Reconstructed image classification loss
self.gamma = 0.0 if self.mtype == "regular" else 0.5 # 0.25
self.loss = ComboLoss(alpha=self.alpha, beta=self.beta, gamma=self.gamma)
self.sampler = "prob"
self.sampler_cap = "auto" # None
self.fp16 = True
self.finetune = False
self.optimizer = "Adam" # "SGD"
self.scheduler = None if self.finetune is True or self.optimizer != "Adam" else "ReduceLROnPlateau" # "CosineAnnealingWarmRestarts"
self.scheduler_factor = 0.3
self.scheduler_patience = 8
self.lr = 0.0003
self.min_lr = 0.00005
self.beta1 = 0.9
self.train_verbose = True
self.valid_verbose = True
# Train parameters
self.L_DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.map_location = self.L_DEVICE
self.WORKERS = 0 if PT_SCRIPT is False else 8
self.BATCH_SIZE = 36 if self.mtype == "siamese" else 48
self.ITERATIONS_LOGS = 30
self.CYCLES = 1
self.EPOCHS_PER_CYCLE = 48 # 36
self.EPOCHS = self.CYCLES * self.EPOCHS_PER_CYCLE
self.WARMUP = 0
self.FOLDS = 4
self.METRIC_ = "min" # "max"
self.pin_memory = True
# In[ ]:
# Load CSV data, drop duplicates if any
def prepare_data(filename, ext_name=None):
train_pd = pd.read_csv(DATA_HOME + filename)
train_pd[LABEL] = train_pd[LABEL].apply(literal_eval)
train_pd[LABEL] = train_pd[LABEL].apply(lambda x: [int(l) for l in x])
if EXT not in train_pd.columns:
train_pd.insert(2, EXT, DEFAULT)
if ext_name is not None:
train_pd[EXT] = ext_name
train_pd = train_pd.drop_duplicates(subset=[ID]).reset_index(drop=True)
assert(np.argwhere(train_pd.columns.values == EXT)[0][0] == 2)
return train_pd
# In[ ]:
# Use PIL to support 16 bits, normalize=True to return [0-1.0] float32 image
def read_image(filename, compose=None, normalize=False, norm_value=65535.0, images_root=TRAIN_IMAGES_HOME):
filename = images_root + filename
filename = filename + "_red.png" if "_red.png" not in filename else filename
mt_, pi_, nu_, er_ = filename, filename.replace('_red', '_green'), filename.replace('_red', '_blue'), filename.replace('_red', '_yellow')
if compose is None:
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
pi = np.asarray(Image.open(pi_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
er = np.asarray(Image.open(er_)).astype(np.uint16)
ret = np.dstack((mt, pi, nu, er))
else:
if compose == "RGB":
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
pi = np.asarray(Image.open(pi_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
ret = np.dstack((mt, pi, nu))
elif compose == "RYB":
mt = np.asarray(Image.open(mt_)).astype(np.uint16)
er = np.asarray(Image.open(er_)).astype(np.uint16)
nu = np.asarray(Image.open(nu_)).astype(np.uint16)
ret = np.dstack((mt, er, nu))
elif compose == "RYGYB":
mt = np.asarray(Image.open(mt_))
pi = np.asarray(Image.open(pi_))
nu = np.asarray(Image.open(nu_))
er = np.asarray(Image.open(er_))
ret = np.dstack(((mt + er)/2.0, (pi + er/2)/1.5, nu))
else:
raise Exception("Unknown compose:", compose)
if normalize is True:
# Some images are np.uint16 but from 0-255 range!
if ret.max() > 255:
ret = (ret/norm_value).astype(np.float32)
else:
ret = (ret/255).astype(np.float32)
return ret
# Data available through raw PNG files
class DataFactory:
def __init__(self, paths, conf=None, verbose=False):
super().__init__()
self.paths = paths
self.conf = conf
self.verbose = verbose
print("PNGFile factory") if self.verbose is True else None
def read_image(self, uid, container=None):
images_path = self.paths
if container is not None and container != DEFAULT:
images_path = images_path.replace("images", container)
image = read_image(uid, compose=self.conf.compose, normalize=self.conf.normalize, norm_value=self.conf.norm_value, images_root=images_path)
return image
def cleanup(self):
pass
# Data available through HDF5 files
class HDF5DataFactory:
def __init__(self, paths, conf=None, verbose=False):
super().__init__()
self.paths = paths
self.hdf5_paths = None
self.conf = conf
self.verbose = verbose
self.initialized = False
print("HDF5 factory") if self.verbose is True else None
def initialize_hdf5(self):
if self.initialized is False:
self.hdf5_paths = h5py.File(self.paths, 'r') if isinstance(self.paths, str) else {k: h5py.File(v, 'r') for k, v in self.paths.items()}
self.initialized = True
print("initialize_hdf5", self.hdf5_paths) if self.verbose is True else None
def read_image(self, uid, container=DEFAULT):
self.initialize_hdf5()
hdf5_paths_ = self.hdf5_paths if isinstance(self.hdf5_paths, str) else self.hdf5_paths.get(container)
# Image is already resized, normalized 0-1.0 as float32
image = hdf5_paths_[uid][:,:,:]
if self.conf.compose is not None:
if self.conf.compose == "RGB":
image = image[:, :, [0,1,2]]
elif self.conf.compose == "RYB":
image = image[:, :, [0,3,2]]
elif self.conf.compose == "G":
image = np.dstack((image[:, :, 1], image[:, :, 1], image[:, :, 1]))
elif self.conf.compose == "RYGYB":
ret = np.dstack(((image[:, :, 0] + image[:, :, 3])/2.0, (image[:, :, 1] + image[:, :, 3]/2)/1.5, image[:, :, 2]))
else:
raise Exception("Unknown compose:", self.conf.compose)
return image
def cleanup(self):
if self.hdf5_paths is not None:
[v.close() for k, v in self.hdf5_paths.items()] if isinstance(self.hdf5_paths, dict) else self.hdf5_paths.close()
print("HDF5 factory cleaned") if self.verbose is True else None
# In[ ]:
# Dataset with all images
def zero(x, y=None):
return 0
class HPADataset(Dataset):
def __init__(self, df, factory, conf, subset="train", categoricals=None, augment=None, postprocess=None, modelprepare=None, classes=None, weights=False, dump=None, verbose=False):
super().__init__()
self.df = df
self.categoricals = categoricals
self.subset = subset
self.augment = augment
self.postprocess = postprocess
self.modelprepare = modelprepare
self.classes = classes
self.conf = conf
self.factory = factory
self.dump = dump
self.verbose = verbose
if subset == 'train':
self.get_offset = np.random.randint
elif subset == 'valid':
self.get_offset = zero
elif subset == 'ho':
self.get_offset = zero
elif subset == 'test':
self.get_offset = zero
else:
raise RuntimeError("Unknown subset")
# Compute weights
self.weights = self.compute_weights(self.df) if subset == "train" and weights is True else None
def prob_from_weight(self, labels_list, weights_dict_, cap=None):
labels_weights = np.array([weights_dict_[class_mapping[int(label_)]] for label_ in labels_list])
prob_ = np.nanmean(labels_weights)
if cap is not None:
prob_ = np.clip(prob_, 0, cap) # Clip to avoid too much single rare labels, for example: 95th percentile cut, or top K
return prob_
def compute_weights(self, df_):
weights_dict = {label: 1/df_[label].sum() for label in class_names}
cap_ = self.conf.sampler_cap
if cap_ is not None and cap_ == "auto":
top_weights = sorted(weights_dict.items(), key=operator.itemgetter(1), reverse=True)[:3]
print("top_weights", top_weights) if self.verbose is True else None
cap_ = top_weights[2][1] # Cap to the top 3rd weight
df_dist = df_[[ID, LABEL]].copy()
df_dist["prob"] = df_dist[LABEL].apply(lambda x: self.prob_from_weight(x, weights_dict, cap=cap_))
if self.verbose is True:
print("compute_weights completed, cap:", self.conf.sampler_cap, cap_)
for i, (k, v) in enumerate(weights_dict.items()):
print(i, k, v)
return df_dist[["prob"]]
def cleanup(self):
self.factory.cleanup()
def __len__(self):
return len(self.df)
def read_image(self, row):
uid = row[ID]
container = row[EXT]
# Load image
img = self.factory.read_image(uid, container=container)
# Scale image after cropping
if self.conf.image_size is not None and self.conf.image_size != img.shape[0]:
img = skimage.transform.resize(img, (self.conf.image_size, self.conf.image_size), anti_aliasing=True) # Works with float image
if self.conf.denormalize is not None:
img = (self.conf.denormalize * img).astype(np.uint8)
return img
def get_data(self, row, categoricals):
# Return image
img = self.read_image(row)
# Labels (OHE)
labels = np.zeros(self.conf.num_classes, dtype=np.uint8)
for l in row[LABEL]:
labels[l] = 1
sample = {
'image': img,
'label': labels,
}
if self.dump is not None:
sample[ID] = row[ID]
if EID in row:
sample[META] = np.array([row[EID], int(row[IMAGE_WIDTH]), int(row[IMAGE_HEIGHT])], dtype=np.int32)
# Optional augmentation on RGBY image (uint8)
if self.augment:
tmp = self.augment(image=sample['image'])
sample['image'] = tmp["image"] # Apply on full image
# Mandatory to feed model
if self.modelprepare: # Albumentations to normalize data
tmp = self.modelprepare(image=sample['image'])
sample['image'] = tmp["image"] # Apply on full image
return sample
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
row = self.df.iloc[idx]
sample = self.get_data(row, self.categoricals)
return sample
# In[ ]:
# (BS, CLASSES, 12, 12) - Between 0-1.0
# Adapted from: https://github.com/OFRIN/PuzzleCAM/blob/master/core/puzzle_utils.py
def make_cam(x, epsilon=1e-5):
x = F.relu(x) # (BS, CLASSES, 12, 12)
b, c, h, w = x.size() # (BS, CLASSES, 12, 21)
flat_x = x.view(b, c, (h * w)) # (BS, CLASSES, 12x12)
max_value = flat_x.max(axis=-1)[0].view((b, c, 1, 1))
return F.relu(x - epsilon) / (max_value + epsilon) # (BS, CLASSES, 12, 12)
# Input (BS, C, H, W), num_pieces = 4
# Return (BS*4, C, H//4, W//4)
# Adapted from: https://github.com/OFRIN/PuzzleCAM/blob/master/core/puzzle_utils.py
def tile_features(features, num_pieces):
_, _, h, w = features.size()
num_pieces_per_line = int(math.sqrt(num_pieces))
h_per_patch = h // num_pieces_per_line
w_per_patch = w // num_pieces_per_line
"""
+-----+-----+
| 1 | 2 |
+-----+-----+
| 3 | 4 |
+-----+-----+
+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 |
+-----+-----+-----+-----+
"""
patches = []
for splitted_features in torch.split(features, h_per_patch, dim=2):
for patch in torch.split(splitted_features, w_per_patch, dim=3):
patches.append(patch)
return torch.cat(patches, dim=0)
# Adapted from: https://github.com/OFRIN/PuzzleCAM/blob/master/core/puzzle_utils.py
def merge_features(features, num_pieces, batch_size):
"""
+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 |
+-----+-----+-----+-----+
+-----+-----+
| 1 | 2 |
+-----+-----+
| 3 | 4 |
+-----+-----+
"""
features_list = list(torch.split(features, batch_size))
num_pieces_per_line = int(math.sqrt(num_pieces))
index = 0
ext_h_list = []
for _ in range(num_pieces_per_line):
ext_w_list = []
for _ in range(num_pieces_per_line):
ext_w_list.append(features_list[index])
index += 1
ext_h_list.append(torch.cat(ext_w_list, dim=3))
features = torch.cat(ext_h_list, dim=2)
return features
# In[ ]:
# Add 4 channels support
def get_4channels_conv(stem_conv2d):
stem_conv2d_pretrained_weight = stem_conv2d.weight.clone()
stem_conv2d_ = nn.Conv2d(4,
stem_conv2d.out_channels, kernel_size=stem_conv2d.kernel_size, stride=stem_conv2d.stride, padding=stem_conv2d.padding, padding_mode=stem_conv2d.padding_mode, dilation=stem_conv2d.dilation,
bias=True if stem_conv2d.bias is True else False)
stem_conv2d_.weight = nn.Parameter(torch.cat([stem_conv2d_pretrained_weight, nn.Parameter(torch.mean(stem_conv2d_pretrained_weight, axis=1).unsqueeze(1))], axis=1))
return stem_conv2d_
class HPAModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.num_classes = cfg.num_classes
self.backbone = cfg.backbone
self.with_cam = cfg.with_cam
self.drop_rate = cfg.dropout
self.preprocess_input_fn = get_preprocessing_fn(cfg)
# Unpooled/NoClassifier (features only)
self.mfeatures = timm.create_model(self.backbone, pretrained=True, num_classes=0, global_pool='')
# Add one channel more
if cfg.compose is None:
if "regnet" in self.backbone:
self.mfeatures.stem.conv = get_4channels_conv(self.mfeatures.stem.conv)
elif "csp" in self.backbone:
self.mfeatures.stem[0].conv = get_4channels_conv(self.mfeatures.stem[0].conv)
elif "resnest" in self.backbone:
self.mfeatures.conv1[0] = get_4channels_conv(self.mfeatures.conv1[0])
elif "seresnext" in self.backbone:
self.mfeatures.conv1 = get_4channels_conv(self.mfeatures.conv1)
elif "densenet" in self.backbone:
self.mfeatures.features.conv0 = get_4channels_conv(self.mfeatures.features.conv0)
# Classifier
num_chs = self.mfeatures.feature_info[-1]['num_chs'] # 1296 # 2048
self.mclassifier = nn.Conv2d(num_chs, self.num_classes, 1, bias=False)
# self.mclassifier = timm.models.layers.linear.Linear(num_chs, self.num_classes, bias=True)
# Initialize weights
self.initialize([self.mclassifier])
print("Model %s, last channels: %d, classes: %d" % (cfg.backbone, num_chs, self.num_classes))
# Pooling
def adaptive_avgmax_pool2d(self, x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
# Average pooling 2d
def global_average_pooling_2d(self, x, keepdims=False):
x = torch.mean(x.view(x.size(0), x.size(1), -1), -1)
if keepdims:
x = x.view(x.size(0), x.size(1), 1, 1)
return x
def gap(self, x, keepdims=False):
return self.global_average_pooling_2d(x, keepdims=keepdims)
def initialize(self, modules):
for m in modules:
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
# ([BS, C, H, W])
x = self.mfeatures(x) # (BS, num_chs, 12, 12)
features = None
if self.with_cam is True:
if self.drop_rate is not None and self.drop_rate > 0.0:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
features = self.mclassifier(x) # (BS, CLASSES, 12, 12)
logits = self.gap(features) # (BS, CLASSES)
else:
x = self.gap(x, keepdims=True) # (BS, num_chs, 1, 1)
if self.drop_rate is not None and self.drop_rate > 0.0:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
logits = self.mclassifier(x).view(-1, self.num_classes) # (BS, CLASSES)
return {"logits": logits, "features": features} # (BS, CLASSES), (BS, CLASSES, 12, 12)
class HPASiameseModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.num_classes = cfg.num_classes
self.backbone = cfg.backbone
self.with_cam = cfg.with_cam
self.puzzle_pieces = cfg.puzzle_pieces
self.preprocess_input_fn = get_preprocessing_fn(cfg)
self.cnn1 = HPAModel(cfg)
if cfg.hpa_classifier_weights is not None:
if os.path.exists(cfg.hpa_classifier_weights):
print("Load regular HPA weights from: %s" % cfg.hpa_classifier_weights)
self.cnn1.load_state_dict(torch.load(cfg.hpa_classifier_weights, map_location=cfg.map_location))
print("Model %s" % (cfg.mtype))
def forward_once(self, x):
x = self.cnn1(x)
return x # {"logits": logits, "features": features}
def forward(self, x):
# ([BS, C, H, W])
bs, _, _, _ = x.shape
# Full image
x1 = self.forward_once(x)
single_logits, single_features = x1["logits"], x1["features"]
# Tiled image
tiled_x = tile_features(x, self.puzzle_pieces) # (BS*puzzle_pieces, C, H//puzzle_pieces, W//puzzle_pieces) # 2x memory
x2 = self.forward_once(tiled_x) # Shared weights
tiled_logits, tiled_features = x2["logits"], x2["features"]
tiled_features = merge_features(tiled_features, self.puzzle_pieces, bs) # (BS, CLASSES, 12, 12)
tiled_logits_reconstructed = self.cnn1.gap(tiled_features) # (BS, CLASSES)
return {
"single_logits": single_logits, "single_features": single_features,
"tiled_logits_flatten": tiled_logits, "tiled_features": tiled_features,
"tiled_logits": tiled_logits_reconstructed,
}
# In[ ]:
def build_model(cfg, device, encoder_weights=None):
if cfg.mtype == "siamese":
model = HPASiameseModel(cfg)
else:
model = HPAModel(cfg)
# Load weights
if (encoder_weights is not None) and ("imagenet" not in encoder_weights):
if os.path.exists(encoder_weights):
print("Load weights before optimizer from: %s" % encoder_weights)
model.load_state_dict(torch.load(encoder_weights, map_location=cfg.map_location))
model = model.to(device)
if cfg.optimizer == "Adam":
optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, betas=(cfg.beta1, 0.999))
elif cfg.optimizer == "SGD":
optimizer = SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, momentum=0.9)
# Loss
loss = cfg.loss
loss = loss.to(device)
return model, loss, optimizer
# In[ ]:
def format_logs(logs):
str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
s = ', '.join(str_logs)
return s
# Train loop
def train_loop_fn(batches, preprocessing, model, optimizer, criterion, tmp_conf, device, stage="Train", verbose=True, scaler=None):
model.train()
count, train_loss = 0, 0.0
all_predicted_probs, all_target_classes = None, None
with tqdm(batches, desc=stage, file=sys.stdout, disable=not(verbose)) as iterator:
for x, batch in enumerate(iterator, 1):
try:
for k, v in batch.items():
batch[k] = v.to(device)
samples_data, labels_data = batch.get("image"), batch.get("label")
optimizer.zero_grad() # reset gradient
# Model
with torch.cuda.amp.autocast(enabled=tmp_conf.fp16):
# Preprocessing
with torch.no_grad():
data = preprocessing(samples_data) if preprocessing is not None else samples_data
output = model(data) # forward pass
if tmp_conf.mtype == "siamese":
loss = criterion(output[tmp_conf.output_key], labels_data.float(),
features_single=output[tmp_conf.output_key_extra],
y_pred_tiles=output[tmp_conf.output_key_siamese],
features_tiles=output[tmp_conf.output_key_extra_siamese],
y_pred_tiled_flatten=output["tiled_logits_flatten"])
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
else:
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
# Compute loss
loss = criterion(output, labels_data.float())
if (tmp_conf.ITERATIONS_LOGS > 0) and (x % tmp_conf.ITERATIONS_LOGS == 0):
loss_value = loss.item()
if ~np.isnan(loss_value): train_loss += loss_value
else: print("Warning: NaN loss")
# backward pass
scaler.scale(loss).backward() if scaler is not None else loss.backward()
# Update weights
if scaler is not None:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
if (tmp_conf.ITERATIONS_LOGS > 0) and (x % tmp_conf.ITERATIONS_LOGS == 0):
# Labels predictions
predicted_probs = torch.sigmoid(output) if tmp_conf.post_activation == "sigmoid" else output
predicted_probs = predicted_probs.detach().cpu().numpy()
target_classes = labels_data.detach().cpu().numpy()
# Concatenate for all batches
all_predicted_probs = np.concatenate([all_predicted_probs, predicted_probs], axis=0) if all_predicted_probs is not None else predicted_probs
all_target_classes = np.concatenate([all_target_classes, target_classes], axis=0) if all_target_classes is not None else target_classes
count += 1
if verbose:
scores_str = {"train_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores_str["train_loss"] = (train_loss / count)
iterator.set_postfix_str(format_logs(scores_str))
except Exception as ex:
print("Training batch error:", ex)
scores = {"train_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores["train_loss"] = (train_loss / count)
return (scores, all_target_classes, all_predicted_probs)
# In[ ]:
# Valid loop
def valid_loop_fn(batches, preprocessing, model, criterion, tmp_conf, device, stage="Valid", verbose=True):
model.eval()
count, valid_loss = 0, 0.0
all_predicted_probs, all_target_classes = None, None
with tqdm(batches, desc=stage, file=sys.stdout, disable=not(verbose)) as iterator:
for batch in iterator:
try:
for k, v in batch.items():
batch[k] = v.to(device)
samples_data, labels_data = batch.get("image"), batch.get("label")
with torch.no_grad():
# NN model
with torch.cuda.amp.autocast(enabled=tmp_conf.fp16):
# Preprocessing
data = preprocessing(samples_data) if preprocessing is not None else samples_data
output = model(data) # forward pass
if tmp_conf.mtype == "siamese":
loss = criterion(output[tmp_conf.output_key], labels_data.float(),
features_single=output[tmp_conf.output_key_extra],
y_pred_tiles=output[tmp_conf.output_key_siamese],
features_tiles=output[tmp_conf.output_key_extra_siamese],
y_pred_tiled_flatten=output["tiled_logits_flatten"])
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
else:
output = output[tmp_conf.output_key] if tmp_conf.output_key is not None else output
# Compute loss
loss = criterion(output, labels_data.float())
loss_value = loss.item()
if ~np.isnan(loss_value): valid_loss += loss_value
else: print("Warning: NaN loss")
# Labels predictions
predicted_probs = torch.sigmoid(output) if tmp_conf.post_activation == "sigmoid" else output
predicted_probs = predicted_probs.detach().cpu().numpy()
target_classes = labels_data.detach().cpu().numpy()
# Concatenate for all batches
all_predicted_probs = np.concatenate([all_predicted_probs, predicted_probs], axis=0) if all_predicted_probs is not None else predicted_probs
all_target_classes = np.concatenate([all_target_classes, target_classes], axis=0) if all_target_classes is not None else target_classes
count += 1
if verbose:
scores_str = {"valid_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores_str["valid_loss"] = (valid_loss / count)
iterator.set_postfix_str(format_logs(scores_str))
except Exception as ex:
print("Validation batch error:", ex)
scores = {"valid_%s" % m.__name__: m(all_target_classes, all_predicted_probs) for m in METRICS_PROBS}
scores["valid_loss"] = (valid_loss / count)
return (scores, all_target_classes, all_predicted_probs)
# In[ ]:
# Train one fold
def run_stage(X_train, X_valid, stage, fold, device):
# Build model
snapshot_path = "%s/fold%d/%s/snapshots" % (MODEL_PATH, fold, stage)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
cnn_model, criterion, optimizer = build_model(conf, device,
encoder_weights=os.path.join(snapshot_path.replace(stage, PRETRAINED_STAGE), MODEL_BEST) if PRETRAINED_STAGE is not None else None)
if RESUME == True:
resume_path = os.path.join(snapshot_path, MODEL_BEST)
if os.path.exists(resume_path):
cnn_model.load_state_dict(torch.load(resume_path, map_location=conf.map_location))
print("Resuming, model weights loaded: %s" % resume_path)
factory = DataFactory_(ALL_IMAGES, conf=conf)
# Datasets
train_dataset = HPADataset(X_train, factory, conf, subset="train", augment=image_augmentation_train, modelprepare=get_preprocessing(cnn_model.preprocess_input_fn), dump=None, weights=True, verbose=True)
valid_dataset = HPADataset(X_valid, factory, conf, subset="valid", augment=None, modelprepare=get_preprocessing(cnn_model.preprocess_input_fn), dump=None, verbose=False) if X_valid is not None else None
train_sampler = WeightedRandomSampler(weights=train_dataset.weights[conf.sampler].values, replacement=True, num_samples=len(train_dataset)) if conf.sampler is not None else None
print("Stage:", stage, "fold:", fold, "on:", device, "workers:", conf.WORKERS, "post_activation:", conf.post_activation, "batch size:", conf.BATCH_SIZE, "metric_:", conf.METRIC_,
"train dataset:", len(train_dataset), "valid dataset:", len(valid_dataset) if valid_dataset is not None else None, "num_classes:", conf.num_classes, "fp16:", conf.fp16, "aug:", image_augmentation_train,
"sampler:", train_sampler)
# Dataloaders
train_loader = DataLoader(train_dataset, batch_size=conf.BATCH_SIZE, sampler=train_sampler, num_workers=conf.WORKERS, drop_last = False, pin_memory=conf.pin_memory, shuffle=True if train_sampler is None else False)
valid_loader = DataLoader(valid_dataset, batch_size=conf.BATCH_SIZE, shuffle=False, num_workers=conf.WORKERS, drop_last = False, pin_memory=conf.pin_memory) if X_valid is not None else None
scheduler = None
if conf.scheduler is not None:
if conf.scheduler == "ReduceLROnPlateau":
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=conf.METRIC_, factor=conf.scheduler_factor, min_lr=0.000001, patience=conf.scheduler_patience, verbose=True)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, conf.EPOCHS_PER_CYCLE, T_mult=1, eta_min=conf.min_lr)
print(criterion, optimizer, scheduler)
metric = METRIC_NAME
valid_loss_min = np.Inf
metric_loss_criterion = np.Inf if conf.METRIC_ == "min" else -np.Inf
history = []
scaler = torch.cuda.amp.GradScaler(enabled=conf.fp16) if conf.fp16 is True else None
for epoch in tqdm(range(1, conf.EPOCHS + 1)):
lr = optimizer.param_groups[0]['lr'] if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) else scheduler.get_last_lr()[0] if scheduler is not None else optimizer.param_groups[0]['lr'] if isinstance(optimizer, torch.optim.SGD) or isinstance(optimizer, torch.optim.Adam) else optimizer.get_last_lr()
info = "[%d], lr=%.7f" % (epoch, lr)
# Train loop
train_scores, _, _ = train_loop_fn(train_loader, None, cnn_model, optimizer, criterion, conf, device, stage="Train%s" % info, verbose=conf.train_verbose, scaler=scaler)
# Validation loop
valid_scores, _, all_predicted_probs_ = valid_loop_fn(valid_loader, None, cnn_model, criterion, conf, device, stage="Valid%s" % info, verbose=conf.valid_verbose) if valid_loader is not None else ({"valid_%s" % metric: 0, "valid_loss": 0}, None, None)
# Keep track of loss and metrics
history.append({"epoch":epoch, "lr": lr, **train_scores, **valid_scores})
if conf.scheduler is not None:
scheduler.step(valid_scores["valid_%s" % metric]) if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) else scheduler.step()
metric_loss = valid_scores["valid_%s" % metric]
if (conf.METRIC_ == "min" and metric_loss < metric_loss_criterion and epoch > 1) or (conf.METRIC_ == "max" and metric_loss > metric_loss_criterion and epoch > 1):
print("Epoch%s, Valid loss from: %.4f to %.4f, Metric improved from %.4f to %.4f, saving model ..." % (info, valid_loss_min, valid_scores["valid_loss"], metric_loss_criterion, metric_loss))
metric_loss_criterion = metric_loss
valid_loss_min = valid_scores["valid_loss"]
torch.save(cnn_model.state_dict(), os.path.join(snapshot_path, MODEL_BEST))
# Save per image OOF
oof_pd = pd.DataFrame(all_predicted_probs_)
oof_pd = oof_pd.set_index(X_valid[ID].values)
oof_pd.to_csv("%s/oof_%d.csv" % (snapshot_path, fold))
factory.cleanup()
if history:
# Plot training history
history_pd = | pd.DataFrame(history[1:]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 21:56:08 2020
@author: <NAME>
"""
# STEP1----------------- # Importing the libraries------------
#-------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import scipy.signal as ss
import csv
import sklearn
from quilt.data.ResidentMario import missingno_data
import missingno as msno
import seaborn as sns
from sklearn.impute import SimpleImputer
# STEP2------------------# Importing the DATASET ------------
#------------------------------------------------------------
# Loading data from the iMotions the path to csv file directory
os.chdir("\\ML4TakeOver\\Data\\RawData")
directory = os.getcwd()
dataFrame_takeover = pd.read_csv('takeover_Alarm_Eye_Car_Data_10sec.csv')
dataFrame_takeover = dataFrame_takeover.drop(['Unnamed: 0','Unnamed: 0.1',
'CurrentGear','GazeVelocityAngle','GazeRightx', 'GazeRighty',
'AutoGear','AutoBrake','GazeLeftx', 'GazeLefty'], axis=1)
## CHANGING FALSE ALARM TO TRUE ALARM FOR FIRST few participants CHECK IF THEY SHOULD HA
searchforSeries = ['004','005','006','007','008']
dataFrame_takeover.loc[(dataFrame_takeover['Name'].str.contains('|'.join(searchforSeries))), 'Coming_AlarmType'] = 'TA'
# STEP5============================ Adding NoneDriving Task column ======================
#========================================================================================
### creat Task column
# map task to the alarm
TaskAlarm = {'Reading' : [16,84,103,339],
'Cell': [5, 259, 284, 323],
'Talk': [137, 178, 185, 332],
'Question': [213, 254, 191]}
dataFrame_takeover['NDTask'] = 'XXX'
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Reading']), 'NDTask'] = 'Reading' # reading task
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Cell']), 'NDTask'] = 'Cell' # cell task
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Talk']), 'NDTask'] = 'Talk' # talk task
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Question']), 'NDTask'] = 'Question' # question task
#================= Visualizing "TakeOver/Not-takeover" for each Alarm type ========================
#==========================================================================================================
# Remove 000 from data for visualization
# we don't have zero alarm type anymore
dataFrame_Alarm = dataFrame_takeover
# check the number of user's per alarm
tmp_result = pd.DataFrame(dataFrame_Alarm.groupby(['Coming_Alarm']).agg({'Name': 'unique'}).reset_index())
[len(a) for a in tmp_result['Name']]
tmp2 = pd.DataFrame(dataFrame_Alarm.groupby(['Name']).agg({'Coming_Alarm': 'unique'}).reset_index())
[len(a) for a in tmp2['Coming_Alarm']]
# How many takeover and not-takeover per alarm?
dataFrame_Alarm.groupby(['Coming_AlarmType','Takeover']).size().plot(kind = 'barh', legend = False) # Frequency Based
plt.show()
dataFrame_Alarm.groupby(['Coming_AlarmType','Takeover']).agg({"Name": lambda x: x.nunique()}).plot(kind = 'barh', legend = False)
# Takeover frequency per individuals
tmp_dataframe = pd.DataFrame(dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType','Takeover']).agg({"Coming_Alarm": lambda x: x.nunique()}))
tmp_dataframe.to_csv("UserComingAlarmType"+'.csv')
dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType']).agg({"Takeover": lambda x: x.nunique()})
dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType','Takeover']).size().unstack().plot(kind = 'bar', stacked = True)
dataFrame_AlarmIndividual = pd.DataFrame(dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType','Takeover']).size().reset_index(name = 'frequency'))
pd.DataFrame(tmp_dataframe).transpose().to_csv("UserComingAlarmType_2"+'.csv')
dataframe_tmp = | pd.DataFrame(tmp_dataframe) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.offline import plot,iplot
from scipy.stats import norm, kurtosis
import os
from scipy.signal import butter, lfilter, freqz
from scipy import signal
from sklearn.model_selection import train_test_split
from collections import Counter
import warnings
warnings.filterwarnings(action='once')
plt.rcParams["figure.figsize"] = 16,12
def create_labels():
labels = pd.read_csv('../data/RawData/labels.txt', sep=" ", header=None)
labels.columns = ['experiment','person','activity','start','end']
return labels
def read_data():
"""Read all data to a dataframe"""
list_df = [] #a list to collect the dataframes
for i in range(1,62):
if i < 10:
i = '0' + str(i)
else:
i = str(i)
for j in os.listdir('../data/RawData/'):
if "acc_exp" + i in j:
acc_path = "../data/RawData/" + j
elif "gyro_exp" + i in j:
gyro_path = "../data/RawData/" + j
acc_df = pd.read_csv(acc_path, sep = " ", names=['acc_x','acc_y','acc_z'])
gyro_df = | pd.read_csv(gyro_path, sep = " ", names=['gyro_x','gyro_y','gyro_z']) | pandas.read_csv |
import os
import json
import datetime
import pandas as pd
import boto3
from psycopg2 import connect
from predict import predict
from config import config
from utils.query import cursor, s3, con, pd_query
def score_predictions(validation, predictions, iou_thresh, concepts):
# Maintain a set of predicted objects to verify
detected_objects = []
obj_map = predictions.groupby("objectid", sort=False).label.max()
# group predictions by video frames
predictions = predictions.groupby("frame_num", sort=False)
predictions = [df for _, df in predictions]
# mapping frames to predictions index
frame_data = {}
for i, group in enumerate(predictions):
frame_num = group.iloc[0]["frame_num"]
frame_data[frame_num] = i
# group validation annotations by frames
validation = validation.groupby("frame_num", sort=False)
validation = [df for _, df in validation]
# initialize counters for each concept
true_positives = dict(zip(concepts, [0] * len(concepts)))
false_positives = dict(zip(concepts, [0] * len(concepts)))
false_negatives = dict(zip(concepts, [0] * len(concepts)))
# get true and false positives for each frame of validation data
for group in validation:
try: # get corresponding predictions for this frame
frame_num = group.iloc[0]["frame_num"]
predicted = predictions[frame_data[frame_num]]
except:
continue # False Negatives already covered
detected_truths = dict(zip(concepts, [0] * len(concepts)))
for index, truth in group.iterrows():
for index, prediction in predicted.iterrows():
if (
prediction.label == truth.label
and predict.compute_IOU(truth, prediction) > iou_thresh
and prediction.objectid not in detected_objects
):
detected_objects.append(prediction.objectid)
true_positives[prediction.label] += 1
detected_truths[prediction.label] += 1
# False Negatives (Missed ground truth predicitions)
counts = group.label.value_counts()
for concept in concepts:
count = counts[concept] if (concept in counts.index) else 0
false_negatives[concept] += count - detected_truths[concept]
# False Positives (No ground truth prediction at any frame for that object)
undetected_objects = set(obj_map.index) - set(detected_objects)
for obj in undetected_objects:
concept = obj_map[obj]
false_positives[concept] += 1
metrics = | pd.DataFrame() | pandas.DataFrame |
import hashlib
from io import StringIO
import os.path
from pathlib import Path
import requests
import pandas as pd
def file_hash(path_obj):
h = hashlib.sha256()
with path_obj.open(mode='rb', buffering=0) as f:
for b in iter(lambda: f.read(128 * 1024), b''):
h.update(b)
return str(h.hexdigest())
class Dataset:
def __init__(self, filename=None, nrows=None):
self.filename = filename
self.maybe_download()
self.dataframe = | pd.read_csv(self.filename, nrows=nrows) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Set pandas display settings
#pd.set_option('display.max_row', 5)
#pd.set_option('display.max_columns', 120)
pd.set_option('display.width', 1000)
data_path = "./udacity_DLNF/Part1.NeuralNetworks/Proj_1/first-neural-network/Bike-Sharing-Dataset/hour.csv"
rides = pd.read_csv(data_path)
# data is between 2011-01-01 ~ 2012-12-31 by "dteday"
# - workingday = weekend is not workingday (0), the others are (1)
# - temp = temperature
# - atemp = ??
# - hum = humidity
# print(rides.head())
# print(rides.tail())
# print("distinct value of 'workingday': {0}".format(
# rides['workingday'].unique()))
# print("distinct value of 'atemp'?? : {0}".format(
# rides['atemp'].unique()))
# show plot of 10 days (2011-01-01 ~ 2011-01-10)
#rides[:24 * 10].plot(x='dteday', y='cnt')
# plt.show()
##################################
# Dummy variables
#####
# 특정 변수의 값을 바이너리(0 또는 1) 변수 매트릭스로 변경하는 작업.
# 예를 들어, season 변수의 값은 1~4 사이의 값을 가지는데 이를 바이너리 매트릭스로 변경하면,
# season --> (season1, season2, season3, season4)
# 1 --> (1, 0, 0, 0)
# 2 --> (0, 1, 0, 0)
# 3 --> (0, 0, 1, 0)
# 4 --> (0, 0, 0, 1)
# 이렇게 변경된다.
# rides[:].plot(x='dteday', y='season')
# rides['season'].value_counts().plot(kind='bar')
# dummy_season = pd.get_dummies(rides['season'], prefix='season')
# dummy_season = pd.concat([rides['dteday'], dummy_season], axis=1)
# print(dummy_season.head())
# dummy_season[:].plot(x='dteday', y='season_1')
# dummy_season[:].plot(x='dteday', y='season_2')
# dummy_season[:].plot(x='dteday', y='season_3')
# dummy_season[:].plot(x='dteday', y='season_4')
# plt.show()
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
#print("distinct value of '{0}': {1}".format(each, rides[each].unique()))
dummies = | pd.get_dummies(rides[each], prefix=each, drop_first=False) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 10:14:53 2020
@author: <NAME>
"""
# to remove the warning in my code
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
# Reading the train and testing data
train_data = pd.read_csv("D:\\Studies\\Machine Learning\\Titanic Prediction\\data\\train.csv")
test_data = pd.read_csv("D:\\Studies\\Machine Learning\\Titanic Prediction\\data\\test.csv")
check=pd.read_csv("D:\\Studies\\Machine Learning\\Titanic Prediction\\data\\gender_submission.csv")
# calculating the null values
def print_null():
print("\nTRAIN")
print(train_data.isnull().sum())
print("\nTEST")
print(test_data.isnull().sum())
def print_shape():
print("Train:",train_data.shape)
print("\nTest:",test_data.shape)
def replacenull_train_embarked():
train_data['Embarked']=np.where((train_data.Pclass==1),'C',train_data.Embarked)
def fare_test_null():
test_data['Fare'].fillna((test_data['Fare'].mean()),inplace=True)
def process_age(df,cut_points,label_names):
df["Age"] = df["Age"].fillna(-0.5)
df["Age_categories"] = pd.cut(df["Age"],cut_points,labels=label_names)
return df
# we now drop the cabin which is of no use
def drop_Cabin():
test_data.drop(['Cabin'],axis=1)
train_data.drop(['Cabin'],axis=1)
def replace_malefemale(): # 1 is male and 0 is female
train_data['Sex']=np.where((train_data.Sex=='male'),1,train_data.Sex)
test_data['Sex']=np.where((test_data.Sex=='male'),1,test_data.Sex)
train_data['Sex']=np.where((train_data.Sex=='female'),0,train_data.Sex)
test_data['Sex']=np.where((test_data.Sex=='female'),0,test_data.Sex)
cut_points = [-1,0,5,12,18,35,60,100]
#label_names = ["Missing","Infant","Child","Teenager","Young Adult","Adult","Senior"]
label_names = [0,1,2,3,4,5,6]
train = process_age(train_data,cut_points,label_names)
test = process_age(test_data,cut_points,label_names)
def plot_agecategory():
pivot = train.pivot_table(index="Age_categories",values='Survived')
pivot.plot.bar()
plt.show()
def model_run():
fare_test_null()
drop_Cabin()
replacenull_train_embarked()
replace_malefemale()
# print_null()
# print_shape()
'''
Now we have our dataset free from the null values now we are going to
use various classifier by taking into an account of AGE, PClass ,Sex
'''
X=[]
model_run()
# Selecting the Age, pclass and sex from train and test as below
xtrain = train_data.iloc[:,[2,4,5,12]] # [2,4,5]
ytrain = train_data["Survived"]
xtest = test_data.iloc[:,[1,3,4,11]] # [1,3,5]
ytest = check["Survived"]
print(xtest.shape)
# Logistic Regression model
classifier = LogisticRegression(random_state = 0)
classifier.fit(xtrain, ytrain)
y_pred = classifier.predict(xtest)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(ytest, y_pred)
print ("Confusion Matrix : \n", cm)
from sklearn.metrics import accuracy_score
print ("Accuracy : ", accuracy_score(ytest, y_pred))
y_pred = | pd.DataFrame(y_pred, columns=['predictions']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import scipy
import scipy.io
import scipy.interpolate
import matplotlib.pyplot as plt # for DEBUG
import os
import glob
import pdb
import copy
INPUT_FILE = "./coutrot/coutrot_database1.mat"
OUTPUT_DIR = "./coutrot/clean_data_1/"
FREQUENCY = 25 # input frequence (Hz)
TARGET_FREQUENCY = 60 # 60 Hz
# 21 inch screen (47.6 x 26.8 cm),
# participant is 56 cm away
# Screen resolution (1024, 768)
# So, 1 visual degree is about 0.97748 cm on screen
# So, visual degree is 21 pixels per degree
VISUAL_DEGREES = 21 # pixels per visual angle
SCREEN_CENTER = (1024 // 2, 768 // 2)
OFF_SCREEN_MARGIN = 10 # number of degrees allowed to be off-screen
VIZ_DEBUG = False
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
NA_FLAG = np.nan
mat_data = scipy.io.loadmat(INPUT_FILE)
mat_data = mat_data["Coutrot_Database1"]
# X is in a complex nested array. We need DFS to extract the information.
# We know that actual data is in 3 dimension
fronts = [mat_data]
# There are 60 videos, and each is in 4 auditorial conditions
# So, there are total 60 * 4 = 240 stimuli
actual_data = []
count = 0
while len(fronts):
f = fronts.pop(0)
if type(f) is not np.ndarray and type(f) is not np.void:
continue
# If it is 3D, it is the actual data
if len(f.shape) == 3: # 3d
actual_data.append(copy.deepcopy(f))
continue
# Else, we need every elements in the array "f"
[fronts.append(_) for _ in f]
print("Count", count)
[print(_.shape) for _ in f]
count += 1
for i, data in enumerate(actual_data):
print("begin process", i)
data = data.astype(float)
# Screen Center to Zero
data[0, :, :] = data[0, :, :] - SCREEN_CENTER[0]
data[1, :, :] = data[1, :, :] - SCREEN_CENTER[1]
# Pixel to Visual Angle
data /= VISUAL_DEGREES
stimulus_time = np.round(data.shape[1] / FREQUENCY * 1000) # milisecond
# Extract Data, and Re-sample (interpolate) to 60 Hz
input_time_signal = np.arange(0, stimulus_time, step=1000 / FREQUENCY)
target_time_signal = np.arange(0,
input_time_signal.max(),
step=1000 / TARGET_FREQUENCY)
for pid in range(data.shape[2]):
x = scipy.interpolate.interp1d(x=input_time_signal, y=data[0, :, pid])
y = scipy.interpolate.interp1d(x=input_time_signal, y=data[1, :, pid])
x_signal = x(target_time_signal)
y_signal = y(target_time_signal)
# Create Output Data Frame
df_data = {"time": target_time_signal, "x": x_signal, "y": y_signal}
df = | pd.DataFrame(df_data) | pandas.DataFrame |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype( | ensure_object(result) | pandas.core.dtypes.common.ensure_object |
import sys
import os
import numpy as np
import scipy.io
import scipy.sparse
import numba
import random
import multiprocessing as mp
import subprocess
import cytoolz as toolz
import collections
from itertools import chain
import regex as re
import yaml
import logging
import time
import gzip
import pandas as pd
from functools import partial
from typing import NamedTuple
from pysam import AlignmentFile
from .util import compute_edit_distance, read_gene_map_from_gtf
from .fastq_io import read_fastq
from .barcode import ErrorBarcodeHash, ErrorBarcodeHashConstraint
from .estimate_cell_barcode import get_cell_whitelist
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
def format_fastq(*fastq, config, method, fastq_out, cb_count,
num_thread=4, max_num_cell=1000000):
"""
Merging fastq reads by putting the cell barcodes and UMI sequences
to the headers of the cDNA reads
:param config: the config file
:param method: the library preparation protocol, e.g., can be one of
10X, Drop-seq, InDrop, Seq-Well, CEL-seq2, sci-RNA-seq, SPLiT-seq,
you can add protocol to the configure file easily
by specifying the read structures.
A template configuration file is provided in scumi/config.yaml
:param fastq: input fastq files
:param fastq_out: the output fastq file
:param cb_count: an output file containing the # reads for each cell barcode
:param num_thread: int
the number of cpu cores to use
:param max_num_cell: int
the maximum number of cells
"""
with open(config, 'r') as stream:
config_dict = yaml.safe_load(stream)
config_dict = config_dict[method]
num_read = config_dict['num_read']
num_fastq = len(fastq)
if num_fastq != num_read:
logger.error(f'Error: the number of input fastq files {num_fastq} is different '
f'from the number of fastq files {num_read} detected in the config file')
sys.exit(-1)
read_regex_str, barcode_filter, read_regex_str_qual = \
zip(*[_extract_input_read_template('read' + str(i), config_dict)
for i in range(1, num_read + 1)])
barcode_filter_dict = dict()
for d in barcode_filter:
barcode_filter_dict.update(d)
read_template = _infer_read_template(read_regex_str)
# select
read_regex_list = [re.compile(z) for z in read_regex_str_qual]
format_read = partial(_format_read, read_regex_list=read_regex_list,
read_template=read_template.read_template,
cb_tag=read_template.cb_tag,
ub_len=read_template.ub_len,
barcode_filter_dict=barcode_filter_dict)
chunk_size = 8000
fastq_reader = [read_fastq(fastq_i) for fastq_i in fastq]
chunks = toolz.partition_all(chunk_size, zip(*fastq_reader))
num_cpu = mp.cpu_count()
num_thread = num_thread if num_cpu > num_thread else num_cpu
seq_chunk_obj = toolz.partition_all(num_thread, chunks)
fastq_out_all = [fastq_out + str(x) + '.gz' for x in range(num_thread)]
[gzip.open(x, 'wb').close() for x in fastq_out_all]
cb_count_all = [cb_count + str(x) + '.csv' for x in range(num_thread)]
[open(x, 'wt').close() for x in cb_count_all]
fastq_info = collections.defaultdict(collections.Counter)
iteration = 0
results = []
time_start = time.time()
pool = mp.Pool(num_thread)
for fastq_chunk in seq_chunk_obj:
res = pool.starmap_async(format_read, zip(fastq_chunk, fastq_out_all, cb_count_all))
results.append(res)
if len(results) == num_thread * 10:
results[0].wait()
while results and results[0].ready():
iteration += 1
if not (iteration % 10):
logger.info(f'Processed {iteration * chunk_size * num_thread:,d} reads!')
res = results.pop(0)
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
pool.close()
pool.join()
for res in results:
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
with open('.fastq_count.tsv', 'w') as f:
for k, v in fastq_info['read'].most_common():
f.write(f'{k}\t{v}\n')
cmd_cat_fastq = ' '.join(['cat'] + fastq_out_all + ['>'] + [fastq_out])
try:
subprocess.check_output(cmd_cat_fastq, shell=True)
[os.remove(fastq_file) for fastq_file in fastq_out_all]
except subprocess.CalledProcessError:
logger.info(f'Errors in concatenate fastq files')
sys.exit(-1)
except OSError:
logger.info(f'Errors in deleting fastq files')
sys.exit(-1)
time_used = time.time() - time_start
logger.info(f'Formatting fastq done, taking {time_used/3600.0:.3f} hours')
if not cb_count:
cb_count = fastq_out + '.cb_count'
df = _count_cell_barcode_umi(cb_count_all[0])
for cb_file in cb_count_all[1:]:
df1 = _count_cell_barcode_umi(cb_file)
df = pd.concat([df, df1], axis=0)
df = df.groupby(df.index).sum()
if df.shape[0] > max_num_cell * 2:
df = df.sort_values(by=df.columns[0], ascending=False)
df = df.iloc[:max_num_cell, :]
try:
[os.remove(cb_file) for cb_file in cb_count_all]
except OSError:
logger.info(f'Errors in deleting cell barcode files')
sys.exit(-1)
df = df.sort_values(by=df.columns[0], ascending=False)
if df.shape[0] > 0:
df.columns = [str(x) for x in range(df.shape[1])]
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count, sep='\t')
def _update_fastq_info(fastq_info, chunk_info):
for fastq_count in chunk_info:
fastq_info['read'].update(read_pass=fastq_count[0],
read_pass_barcode=fastq_count[1],
read_pass_polyt=fastq_count[2],
read_total=fastq_count[3])
def _count_cell_barcode_umi(cb_file, chunk_size=10 ** 7):
cb_reader = pd.read_csv(cb_file, header=None, iterator=True,
sep='\t', index_col=0)
chunks = cb_reader.get_chunk(chunk_size)
chunks = chunks.groupby(chunks.index).sum()
status = True
while status:
try:
chunk = cb_reader.get_chunk(chunk_size)
chunks = pd.concat([chunks, chunk], axis=0)
chunks = chunks.groupby(chunks.index).sum()
except StopIteration:
status = False
logger.info('Read cell barcode counts done.')
return chunks
def _extract_barcode_pos(barcode_dict, config):
barcode_reg = []
pos_all = []
barcode_filter = dict()
for barcode_and_pos in barcode_dict:
barcode, pos = barcode_and_pos
pos_all.append(pos)
barcode_reg.append('(?P<' + barcode + '>.{' +
str(pos[1] - pos[0] + 1) + '})')
try:
value = config[barcode + '_value']
barcode_filter.update({barcode: ErrorBarcodeHash(value, 1)})
except KeyError:
pass
return barcode_reg, pos_all, barcode_filter
def _extract_input_read_template(read, config):
read_name = '(@.*)\\n'
read_plus = '(\\+.*)\\n'
read_qual = '(.*)\\n'
filter_dict = dict()
seq = [(key, value) for key, value in config[read].items()
if key.startswith('cDNA')]
if seq:
read_name = '@(?P<name>.*)\\n'
read_seq = '(?P<seq>.*)\\n'
read_qual = '(?P<qual>.*)\\n'
read_template = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template
cell_barcode = [(key, value) for key, value in config[read].items()
if key.startswith('CB') and not key.endswith('value')]
umi = [(key, value) for key, value in config[read].items()
if key.startswith('UMI')]
poly_t = [(key, value) for key, value in config[read].items()
if key.startswith('polyT')]
cb_reg, cb_pos, cb_filter = _extract_barcode_pos(cell_barcode, config[read])
filter_dict.update(cb_filter)
umi_reg, umi_pos, _ = _extract_barcode_pos(umi, config[read])
umi_reg = [z.replace('UMI', 'UB') for z in umi_reg]
pt_reg, pt_pos, _ = _extract_barcode_pos(poly_t, config[read])
read_pos_start = [z[0] for z in cb_pos]
read_pos_start += [z[0] for z in umi_pos]
read_pos_start += [z[0] for z in pt_pos]
read_pos_end = [z[1] for z in cb_pos]
read_pos_end += [z[1] for z in umi_pos]
read_pos_end += [z[1] for z in pt_pos]
idx = sorted(range(len(read_pos_start)),
key=lambda k: read_pos_start[k])
barcode_tag = cb_reg + umi_reg + pt_reg
read_pos_start = [read_pos_start[i] for i in idx]
read_pos_end = [read_pos_end[i] for i in idx]
barcode_tag = [barcode_tag[i] for i in idx]
idx_skip = [read_pos_start[i+1] - read_pos_end[i] - 1
for i in range(0, len(read_pos_start)-1)]
barcode_skip = ['[ACGTN]{' + str(i) + '}' for i in idx_skip]
read_seq = barcode_tag[0]
for i in range(len(read_pos_start)-1):
if idx_skip[i] == 0:
read_seq += barcode_tag[i+1]
else:
read_seq += barcode_skip[i]
read_seq += barcode_tag[i+1]
filter_dict.update(_filter_ploy_t(read_seq))
if read_pos_start[0] > 1:
read_seq = '[ACGTN]{' + str(read_pos_start[0]-1) + '}'
read_seq += '[ACGTN]*'
read_seq = read_seq + '\\n'
read_template = read_name + read_seq + read_plus + read_qual
read_qual = re.sub('>', r'_qual>', read_seq)
read_qual = re.sub('\[ACGTN\]', '.', read_qual)
read_template_qual = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template_qual
def _filter_ploy_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t_filter = {'polyT': ErrorBarcodeHash('T' * z, 1) for z in poly_t_count}
return poly_t_filter
def _replace_poly_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t = ['(' + 'T'*z + ')' + '{s<=1}' for z in poly_t_count]
for z in range(len(match)):
read_seq = read_seq.replace(match[z], poly_t[z])
return read_seq
def _infer_read_template(reg_list):
class ReadInfo(NamedTuple):
cb: bool
cb_tag: list
cb_len: list
ub: bool
ub_tag: list
ub_len: list
read_template: str
cb = ub = False
cb_tag = ub_tag = []
cb_len = ub_len = []
read_template = '@'
reg = ''.join(k for k in reg_list)
if 'CB' in reg:
logger.info('Cell barcode in configure file')
cb = True
cb_seq_template = _accumulate_barcode('CB', reg)
cb_template = ':CB_' + cb_seq_template[1]
read_template += cb_template
cb_tag = cb_seq_template[0]
cb_len = cb_seq_template[2]
if 'UB' in reg:
logger.info('UMI in config file')
ub = True
ub_seq_template = _accumulate_barcode('UB', reg)
ub_template = ':UB_' + ub_seq_template[1]
read_template += ub_template
ub_tag = ub_seq_template[0]
ub_len = ub_seq_template[2]
read_template += ':{name}'
read_template += '\n{seq}\n+\n{qual}\n'
return ReadInfo(cb=cb, cb_tag=cb_tag, cb_len=cb_len,
ub=ub, ub_tag=ub_tag, ub_len=ub_len,
read_template=read_template)
def _accumulate_barcode(barcode, seq):
barcode_num = [sub_str[0] for sub_str in
seq.split('?P<' + re.escape(barcode))][1:]
status = '>' in barcode_num
barcode_num = ['0' if x == '>' else x for x in barcode_num]
barcode_num = sorted(barcode_num, key=int)
if status:
barcode_num[0] = ''
barcode_seq = [barcode + num for num in barcode_num]
barcode_template = ['{' + tag + '}' for tag in barcode_seq]
barcode_template = '-'.join(barcode_template)
str_split = 'P<' + barcode + '[0-9]*>.{'
barcode_len = [sub_str for sub_str in re.split(str_split, seq)][1:]
barcode_len = [int(re.findall(r'(\d+)', barcode_i)[0])
for barcode_i in barcode_len]
return barcode_seq, barcode_template, barcode_len
def _format_read(chunk, fastq_file, cb_count_file, read_regex_list,
read_template, cb_tag, ub_len, barcode_filter_dict):
reads = []
num_read = len(chunk)
num_read_pass = num_read_barcode = num_read_polyt = 0
num_regex = len(read_regex_list)
barcode_counter = collections.defaultdict(
partial(np.zeros, shape=(ub_len[0] + 1), dtype=np.uint32))
ignore_read = False
for read_i in chunk:
read_dict_list = []
for i, regex_i in enumerate(read_regex_list):
read_match = regex_i.match(read_i[i])
if not read_match:
ignore_read = True
break
read_dict_list.append(read_match.groupdict())
if ignore_read:
ignore_read = False
continue
read1_dict = read_dict_list[0]
if num_regex > 1:
for regex_id in range(1, num_regex):
read1_dict.update(read_dict_list[regex_id])
cb = [barcode_filter_dict[tag][read1_dict[tag]]
if tag in barcode_filter_dict.keys() else read1_dict[tag]
for tag in cb_tag]
if all(cb):
cb = '-'.join(cb)
num_read_barcode += 1
else:
ignore_read = True
ub = read1_dict['UB']
try:
poly_t = read1_dict['polyT']
if not barcode_filter_dict['polyT'][poly_t]:
ignore_read = True
else:
num_read_polyt += 1
except KeyError:
pass
if ignore_read:
ignore_read = False
continue
num_read_pass += 1
if len(read1_dict['seq']) >= 1:
read1_dict = read_template.format_map(read1_dict)
reads.append(read1_dict)
barcode_counter[cb] += [x == 'T' for x in 'T' + ub]
with gzip.open(fastq_file, 'ab') as fastq_hd:
for read in reads:
fastq_hd.write(bytes(read, 'utf8'))
df = pd.DataFrame.from_dict(barcode_counter, orient='index')
if df.shape[0] > 0:
df = df.sort_values(by=df.columns[0], ascending=False)
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count_file, sep='\t', mode='a', header=False)
return num_read_pass, num_read_barcode, num_read_polyt, num_read
def _construct_barcode_regex(bam):
read_mode = 'r' if bam.endswith('.sam') else 'rb'
bam_file = AlignmentFile(bam, mode=read_mode)
first_alignment = next(bam_file)
bam_file.close()
barcodes = set()
for barcode in ['CB_', 'UB_']:
if barcode in first_alignment.qname:
barcodes.add(barcode)
barcode_parser = '.*'
if 'CB_' in barcodes:
barcode_parser += ':CB_(?P<CB>[A-Z\-]+)'
if 'UB_' in barcodes:
barcode_parser += ':UB_(?P<UB>[A-Z\-]+)'
if barcode_parser == '.*':
logger.error('Error: no cell barcodes and UMIs.')
sys.exit(-1)
barcode_parser += ':*'
barcode_parser = re.compile(barcode_parser)
match = barcode_parser.match(first_alignment.qname)
cb = _extract_tag(match, 'CB')
return barcode_parser, cb, read_mode
def _extract_tag(match, tag):
try:
tag = match.group(tag)
except IndexError:
tag = None
return tag
def count_feature(*cb, bam, molecular_info_h5, gtf, cb_count, feature_tag='XT:Z',
expect_cell=False, force_cell=False, all_cell=False,
depth_threshold=1, cell_barcode_whitelist=None):
"""
Count the number of reads/UMIs mapped to each gene
:param bam: the input sam/bam file
:param molecular_info_h5: output the molecular info
:param cb: the input cell barcode files, can be empty or None
:param cell_barcode_whitelist: a file contain the selected cell barcodes
:param gtf: a GTF file
:param cb_count: a file containing the number of reads mapped to each cell barcode,
output from format_fastq
:param feature_tag: the tag representing genes in the input bam file
:param depth_threshold: only considering UMIs that have at least
depth_threshold reads support
:param expect_cell: the expected number of cells in the bam file
:param force_cell: force to return the number of cells set by expect_cell
:param all_cell: keep all cell barcodes - can be very slow
"""
barcode_parser, first_cb, read_mode = _construct_barcode_regex(bam)
num_cb = len(first_cb.split('-'))
num_cb_file = len(cb)
if 0 == num_cb_file:
cb = [None] * num_cb
elif num_cb != num_cb_file:
logger.error(f'Error: the number of input cell barcodes files {num_cb_file} '
f'is different from the number of cell barcodes {num_cb} '
f'detected in the bam file')
if num_cb > num_cb_file:
cb = cb + [None] * (num_cb - num_cb_file)
else:
cb = cb[:num_cb]
# TODO: no cell barcodes detected
correct_cb_fun, cb_list, cb_remove = _construct_cb_filter(
cb_count, cb, expect_cell, force_cell, all_cell, cell_barcode_whitelist)
gene_map_dict = read_gene_map_from_gtf(gtf)
logger.info('Counting molecular info')
time_start_count = time.time()
sam_file = AlignmentFile(bam, mode=read_mode)
_count_feature_partial = partial(_count_feature,
gene_map_dict=gene_map_dict,
barcode_parser=barcode_parser,
correct_cb_fun=correct_cb_fun,
sam_file=sam_file,
feature_tag=feature_tag)
track = sam_file.fetch(until_eof=True)
map_info, read_in_cell, molecular_info = _count_feature_partial(track)
time_count = time.time() - time_start_count
logger.info(f'Counting molecular info done - {time_count/3600.0:.3f} hours, '
f'{int(3600.0 * map_info["num_alignment"]/time_count):,d} '
f'alignments/hour\n')
# TODO: still output results
if len(molecular_info) == 0:
logger.error('Error: no reads mapped to features.')
sys.exit(-1)
name = ['cell',
'gene',
'umi',
'depth',
]
logger.info('Converting to a dataframe')
convert_time = time.time()
molecular_info = pd.Series(molecular_info).reset_index()
molecular_info.columns = name
for col in name[:3]:
molecular_info.loc[:, col] = molecular_info[col].astype('category')
convert_time = time.time() - convert_time
logger.info(f'Converting to a dataframe done, '
f'taking {convert_time/60.0:.3f} minutes\n')
molecular_info.columns = name
if num_cb > 1 and cb_list:
molecular_info = molecular_info.loc[molecular_info['cell'].isin(cb_list), :]
if cb_remove:
molecular_info = molecular_info.loc[~molecular_info['cell'].isin(cb_remove), :]
molecular_info = molecular_info.loc[molecular_info['depth'] >= 0.95, :]
molecular_info['depth'] = \
np.floor(molecular_info['depth'].values + 0.5).astype('uint32')
molecular_info = molecular_info.sort_values(name[:3])
molecular_info = molecular_info.reset_index(drop=True)
map_info = pd.Series(map_info)
read_in_cell = pd.DataFrame.from_dict(read_in_cell, orient='index')
logger.info('Writing molecular info')
write_time = time.time()
feature = gene_map_dict.values()
feature = pd.Series(index=set(feature))
feature = feature.sort_index()
with pd.HDFStore(molecular_info_h5, mode='w') as hf:
hf.put('molecular_info', molecular_info, format='table', data_columns=True)
hf.put('map_info', map_info)
hf.put('feature', feature)
hf.put('read_in_cell', read_in_cell)
del molecular_info
write_time = time.time() - write_time
logger.info(f'Writings molecular info done, '
f'taking {write_time/60.0:.3f} minutes\n')
_convert_count_to_matrix(molecular_info_h5, molecular_info_h5,
depth_threshold=depth_threshold)
def _count_feature(track, gene_map_dict, barcode_parser,
correct_cb_fun, sam_file, feature_tag='XT:Z'):
search_undetermined = re.compile('N').search
read_name = None
feature_tag_value_pre = None
filt_multiple_gene_barcode = False
count_read = False
cb_i = feature_tag_value = ub_i = None
num_aln_read = 0
pass_filter = False
map_info = collections.defaultdict(int)
read_in_cell = collections.Counter()
molecular_info = collections.defaultdict(int)
for aln in track:
if map_info['num_alignment'] and not map_info['num_alignment'] % 10000000:
logger.info(f'Parsed {map_info["num_alignment"]:,d} alignments.')
logger.info(f'{map_info["num_unique_read"]:,d} unique reads, '
f'{map_info["num_count_read"]:,d} reads kept.')
logger.info(f'{map_info["num_unmapped_read"]:,d} unmapped reads were filtered.')
logger.info(f'{map_info["num_barcode_with_na"]:,d} reads '
f'were filtered for including NA in barcodes.\n')
num_aln_read_pre = num_aln_read
filter_read_unmapped = False
filter_read_na = False
filter_read_barcode = False
map_info['num_alignment'] += 1
num_aln_read = aln.get_tag('NH')
new_read = aln.qname != read_name
if new_read:
read_name = aln.qname
if count_read:
map_info['num_count_read'] += 1
record_tuple = (cb_i, feature_tag_value, ub_i)
molecular_info[record_tuple] += 1
elif pass_filter and (num_aln_read_pre > 1):
map_info['num_barcode_with_na'] += 1
pass_filter = False
count_read = False
filt_multiple_gene_barcode = True
feature_tag_value_pre = None
map_info['num_unique_read'] += 1
if num_aln_read == 0:
map_info['num_unmapped_read'] += 1
filter_read_unmapped = True
# check cb
match = barcode_parser.match(aln.qname)
cb_i = _extract_tag(match, 'CB')
cb_i_list = cb_i.split('-')
num_na_in_cb = _count_not_specified(cb_i_list)
if any(num_na_in_cb > 1) or sum(num_na_in_cb) > len(num_na_in_cb):
filter_read_na = True
cb_i = correct_cb_fun(cb_i_list)
if cb_i:
read_in_cell[cb_i] += 1
elif not aln.is_unmapped:
map_info['num_barcode_with_na'] += 1
filter_read_barcode = True
if filter_read_unmapped or filter_read_na or filter_read_barcode:
count_read = False
continue
ub_i = _extract_tag(match, 'UB')
if ub_i and search_undetermined(ub_i):
map_info['num_barcode_with_na'] += 1
continue
if ub_i and ub_i == len(ub_i) * ub_i[0]:
map_info['num_barcode_with_na'] += 1
continue
try:
feature_tag_value = aln.get_tag(feature_tag)
except KeyError:
feature_tag_value = sam_file.getrname(aln.reference_id)
if aln.get_tag('XS:Z') == 'Unassigned_Ambiguity':
map_info['num_barcode_with_na'] += 1
continue
pass_filter = True
filt_multiple_gene_barcode = False
try:
feature_tag_value = gene_map_dict[feature_tag_value]
except KeyError:
if num_aln_read == 1:
map_info['num_barcode_with_na'] += 1
continue
feature_tag_value_pre = feature_tag_value
count_read = True
else:
if filt_multiple_gene_barcode:
continue
try:
feature_tag_value = aln.get_tag(feature_tag)
except KeyError:
feature_tag_value = sam_file.getrname(aln.reference_id)
if aln.get_tag('XS:Z') == 'Unassigned_Ambiguity':
filt_multiple_gene_barcode = True
count_read = False
continue
try:
feature_tag_value = gene_map_dict[feature_tag_value]
except KeyError:
feature_tag_value = feature_tag_value_pre
continue
if feature_tag_value_pre and feature_tag_value_pre != feature_tag_value:
filt_multiple_gene_barcode = True
count_read = False
continue
feature_tag_value_pre = feature_tag_value
count_read = True # with valid feature_tag_value
if count_read:
map_info['num_count_read'] += 1
record_tuple = (cb_i, feature_tag_value, ub_i)
molecular_info[record_tuple] += 1
return map_info, read_in_cell, molecular_info
def _construct_cb_filter(cb_count, cb, expect_cell, force_cell,
all_cell, cell_barcode_whitelist):
cb_list = []
cb_remove = []
if all_cell:
correct_cb_fun = _filter_tag_fun(cb, max_distance=1, correct=True)
else:
if cell_barcode_whitelist:
with open(cell_barcode_whitelist, 'r') as file_handle:
cb_list = [line.strip('\n') for line in file_handle]
else:
cb_list, cb_remove = _get_candidate_barcode(cb_count, cb,
expect_cell=expect_cell,
force_cell=force_cell)
num_cell = len(cb_list)
logger.info(f'Detected {num_cell:,d} candidate cell barcodes.')
if num_cell <= 0:
sys.exit(-1)
cb_list_split = [cb.split('-') for cb in cb_list]
cb_df = pd.DataFrame(cb_list_split)
cb_list_split = [''] * len(cb_df.columns)
for cb in cb_df:
cb_list_split[cb] = cb_df[cb].unique()
if len(cb_df.columns) > 1:
barcode_hash = _create_barcode_hash(cb_list)
cb_hash = [ErrorBarcodeHashConstraint(cb, barcode_hash[idx])
for idx, cb in enumerate(cb_list_split)]
correct_cb_fun = partial(_filter_tag_multi, tag_hash=cb_hash, correct=True)
else:
cb_hash = [ErrorBarcodeHash(cb) for cb in cb_list_split]
correct_cb_fun = partial(_filter_tag, tag_hash=cb_hash, correct=True)
return correct_cb_fun, cb_list, cb_remove
def _count_not_specified(barcode):
if not barcode:
return np.array([0])
na_count = [barcode_i.count('N') for barcode_i in barcode]
return np.array(na_count)
def _get_candidate_barcode(cb_count_file, cb_file,
plot_prefix='.',
expect_cell=False,
force_cell=False):
cb = pd.read_csv(cb_count_file, sep='\t')
cb_name = cb['cb'].str.split('-', expand=True)
cb_len = [len(z) for z in cb_name.iloc[0, :]]
num_cb = len(cb_len)
idx = False
for cb_idx in range(num_cb):
filt_cb = [cb_char * cb_len[cb_idx] for cb_char in ['N', 'G']]
idx = cb_name.iloc[:, 0].isin(filt_cb) | idx
cb = cb.loc[~idx, :]
cb = cb.reset_index(drop=True)
cb_count = dict(zip(cb.cb, cb.cb_count))
candidate_cb_whitelist = get_cell_whitelist(cb_count,
plot_prefix=plot_prefix,
expect_cell=expect_cell,
force_cell=force_cell)
candidate_cb_whitelist_refine = \
_refine_whitelist(list(candidate_cb_whitelist), cb_file)
merge_barcode = [x != 'None' and x for x in cb_file]
if any(merge_barcode):
merge_barcode = False
cb_list, cb_remove = _merge_del_barcode(candidate_cb_whitelist_refine,
barcode_count=cb, min_distance=1,
merge_barcode=merge_barcode)
with open('._detected_cb.tsv', 'wt') as file_handle:
for cb_whitelist in np.setdiff1d(cb_list, cb_remove):
file_handle.write(f'{cb_whitelist}\n')
return cb_list, cb_remove
def _refine_whitelist(cb_whitelist, cb_file=None, max_na_per_cb=1):
cb_hash = []
if cb_file is not None:
cb_file = list(cb_file)
cb_file = [None if cb_i == 'None' else cb_i for cb_i in cb_file]
cb_hash = _construct_hash(cb_whitelist, cb_file)
num_cb = len(cb_whitelist[0].split('-'))
cb_whitelist_corrected = collections.defaultdict(set)
for cell_barcode in list(cb_whitelist):
cell_barcode_list = cell_barcode.split('-')
na_count = _count_not_specified(cell_barcode_list)
if any(na_count > max_na_per_cb) or sum(na_count) > num_cb:
continue
cb = cell_barcode
if any(cb_hash):
cb = _correct_cell_barcode(cell_barcode_list, cb_hash)
if any(cb):
cb = '-'.join(cb)
else:
continue
cb_whitelist_corrected[cell_barcode].add(cb)
return cb_whitelist_corrected
def _construct_hash(cb_whitelist, tag_file):
num_tag = len(tag_file)
tag_hash = [''] * num_tag
# Add comments
for i in range(num_tag):
if tag_file[i]:
with open(tag_file[i], 'r') as file_handle:
tag_i = [line.rstrip('\n') for line in file_handle]
if len(tag_i) > 5000:
cell_barcode_list = []
for cell_barcode in list(cb_whitelist):
cell_barcode_list.append(cell_barcode.split('-')[i])
white_list_map = \
_generate_barcode_whitelist_map(cell_barcode_list, tag_i, 1)
tag_i = [list(v)[0] for k, v in white_list_map.items()]
tag_i = list(set(tag_i))
tag_hash[i] = ErrorBarcodeHash(tag_i, edit_distance=1)
return tag_hash
def _correct_cell_barcode(cell_barcode, cb_hash):
num_cb = len(cb_hash)
cb_corrected = cell_barcode
for i in range(num_cb):
if cb_hash[i]:
candidate_cb = cb_hash[i][cell_barcode[i]]
if candidate_cb:
cb_corrected[i] = candidate_cb
else:
return [None]
return cb_corrected
def _generate_barcode_whitelist_map(barcode, whitelist, min_distance=1):
barcode_to_whitelist = collections.defaultdict(set)
whitelist = set([str(x).encode('utf-8') for x in whitelist])
num_cpu = mp.cpu_count()
pool = mp.Pool(num_cpu)
_partial_map_single_barcode_to_whitelist = \
partial(_map_single_barcode_to_whitelist, whitelist=whitelist,
min_distance=min_distance)
corrected_barcode = pool.map(_partial_map_single_barcode_to_whitelist, barcode)
for idx, barcode_i in enumerate(corrected_barcode):
if barcode_i is not None:
barcode_to_whitelist[barcode[idx]].add(barcode_i)
return barcode_to_whitelist
def _map_single_barcode_to_whitelist(barcode, whitelist, min_distance=1):
match = None
barcode_in_bytes = str(barcode).encode('utf-8')
for white_barcode in whitelist:
if barcode_in_bytes in whitelist:
match = barcode
break
if compute_edit_distance(barcode_in_bytes, white_barcode) <= min_distance:
if match is not None:
logging.info(f'Warning: barcode {str(barcode)} can be '
f'mapped to more than one candidate barcodes')
match = None
break
else:
match = white_barcode.decode('utf-8')
return match
def _merge_del_barcode(barcode_dict, barcode_count, min_distance=1, merge_barcode=False):
barcode_list = list(barcode_dict.keys())
idx = barcode_count.cb.isin(barcode_list)
barcode_count_filt = barcode_count.loc[idx, :]
barcode_corr = [barcode_dict[x] for x in barcode_count_filt.cb]
idx = [len(x) > 0 for x in barcode_corr]
barcode_count_filt = barcode_count_filt.iloc[idx, :]
barcode_corr = list(chain(*barcode_corr))
barcode_count_filt.cb = barcode_corr
barcode_count_filt = barcode_count_filt.groupby('cb').sum()
umi_len = barcode_count_filt.shape[1]
barcode_count_filt_ratio = barcode_count_filt.iloc[:, 1:umi_len].div(
barcode_count_filt.cb_count, axis=0)
idx = barcode_count_filt_ratio.gt(0.80, axis=0)
idx = idx | barcode_count_filt_ratio.lt(0.005, axis=0)
count_indel = idx.sum(axis=1)
if sum(count_indel == 0) <= 100000 and merge_barcode:
barcode_whitelist = \
_merge_corrected_barcode(barcode_count_filt.loc[count_indel == 0, :])
else:
barcode_whitelist = barcode_count_filt_ratio.index[count_indel == 0].tolist()
barcode_correctable = barcode_count_filt_ratio.index[count_indel == 1].tolist()
whitelist_remove = []
if len(barcode_correctable) > 0:
barcode_corrected = _correct_del_barcode(
barcode_count_filt.loc[barcode_correctable, :], min_distance)
barcode_corrected_list = list(barcode_corrected.keys())
barcode_corrected_list_mut = [x[:-1]+'N' for x in barcode_corrected_list]
whitelist_dist = [_find_neighbour_barcode(x, barcode_corrected_list_mut, 1)
for x in barcode_whitelist]
whitelist_remove = [barcode_whitelist[k] for k, v in enumerate(whitelist_dist)
if len(v[0]) > 0]
barcode_whitelist.extend(barcode_corrected_list)
return barcode_whitelist, whitelist_remove
# N**2 complexity
def _merge_corrected_barcode(barcode_count):
barcode_count = barcode_count.sort_values('cb_count', ascending=False)
barcode = barcode_count.index.astype(str)
barcode_coverage = dict(zip(barcode, barcode_count.cb_count))
barcode_list = collections.deque()
barcode_list.append(barcode[0])
num_barcode = len(barcode)
if num_barcode <= 1:
return barcode_list
for barcode_i in barcode[1:]:
idx = _find_neighbour_barcode(barcode_i, barcode_list)
num_neighbour = len(idx[0])
if num_neighbour == 0:
barcode_list.append(barcode_i)
continue
elif num_neighbour == 1:
candidate_barcode_idx = idx[0][0]
candidate_barcode_coverage = \
barcode_coverage[barcode_list[candidate_barcode_idx]]
if barcode_coverage[barcode_i] > candidate_barcode_coverage / 10.0:
barcode_list.append(barcode_i)
continue
return barcode_list
def _find_neighbour_barcode(barcode, barcode_list, min_distance=1):
edit_dist = np.array([compute_edit_distance(barcode.encode('utf-8'), x.encode('utf-8'))
for x in barcode_list])
idx_filt = np.where(edit_dist <= min_distance)
return idx_filt
def _correct_del_barcode(barcode_count, min_distance=1):
barcode_count = barcode_count.sort_values('cb_count', ascending=False)
barcode_all = np.asarray(barcode_count.index.tolist())
barcode_whitelist = collections.defaultdict(set)
while len(barcode_all):
barcode_i = barcode_all[0]
barcode_whitelist[barcode_i].add(barcode_i)
barcode_all = barcode_all[1:]
idx = _find_neighbour_barcode(barcode_i, barcode_all, min_distance)
if len(idx[0]):
barcode_ = barcode_all[idx]
barcode_whitelist[barcode_i].update(list(barcode_))
barcode_all = np.delete(barcode_all, idx)
return barcode_whitelist
def _create_barcode_hash(barcode):
barcode_split = [barcode_i.split('-') for barcode_i in barcode]
barcode_df = pd.DataFrame(barcode_split, barcode)
num_barcode = len(barcode_split[0])
barcode_split_uniq = [''] * len(barcode_df.columns)
for barcode_i in barcode_df:
barcode_split_uniq[barcode_i] = barcode_df[barcode_i].unique()
barcode_hash = [collections.defaultdict(list) for _ in range(num_barcode)]
for i in range(num_barcode):
barcode_i = barcode_split_uniq[i]
for barcode_ii in barcode_i:
idx = barcode_df[i] == barcode_ii
barcode_hash[i][barcode_ii] = set(barcode_df.index[idx].tolist())
return barcode_hash
def convert_count_to_matrix(molecular_info, out_prefix, depth_threshold):
_convert_count_to_matrix(molecular_info, out_prefix, depth_threshold)
def _convert_count_to_matrix(molecular_info, out_prefix, depth_threshold):
feature = pd.read_hdf(molecular_info, key='feature')
molecular_info = pd.read_hdf(molecular_info, key='molecular_info')
logger.info('Collapsing UMIs')
write_time = time.time()
molecular_info = _collapse_umi(molecular_info)
write_time = time.time() - write_time
logger.info(f'Collapsing UMIs done, taking {write_time/60.0:.3f} minutes')
df = _generate_fake_count(molecular_info.iloc[0, :],
feature, depth=depth_threshold+1)
molecular_info = pd.concat([df, molecular_info], ignore_index=True)
num_gene = len(feature)
_transform_write_sparse_matrix(molecular_info, num_gene,
sum_type='umi', out_prefix=out_prefix,
depth_threshold=depth_threshold)
_transform_write_sparse_matrix(molecular_info, num_gene,
sum_type='transcript', out_prefix=out_prefix,
depth_threshold=depth_threshold)
def _transform_write_sparse_matrix(molecular_info, num_gene,
sum_type, out_prefix, depth_threshold):
logger.info('Converting to sparse matrix')
convert_time = time.time()
query_filer = f'depth >= {depth_threshold}'
if 'umi' == sum_type:
base_name = out_prefix + '_read'
count_collapsed = molecular_info.groupby(
['cell', 'gene'])
count_collapsed = count_collapsed['depth'].sum()
count_collapsed[:num_gene] -= (depth_threshold + 1)
count_collapsed += 0.5
count_collapsed = count_collapsed.astype(int)
else:
base_name = out_prefix + '_depth_' + str(depth_threshold) + '_transcript'
count_collapsed = molecular_info.query(query_filer).groupby(
['cell', 'gene'])
count_collapsed = count_collapsed['umi'].size()
count_collapsed[:num_gene] -= 1
del molecular_info
count, count_row_name, count_column_name = _convert_to_coo(count_collapsed)
del count_collapsed
convert_time = time.time() - convert_time
logger.info(f'Converting to a sparse matrix done, '
f'taking {convert_time/60.0:.3f} minutes')
logger.info('Output results')
write_time = time.time()
pd.Series(count_row_name).to_csv(base_name + '_gene.tsv',
index=False, header=False)
pd.Series(count_column_name).to_csv(base_name + '_barcode.tsv',
index=False, header=False)
if 'umi' == sum_type:
with open(base_name + '.mtx', 'w+b') as out_handle:
scipy.io.mmwrite(out_handle, count)
else:
with open(base_name + '.mtx', 'w+b') as out_handle:
scipy.io.mmwrite(out_handle, count)
write_time = time.time() - write_time
logger.info(f'Writing final results done, '
f'taking {write_time/60.0:.3f} minutes')
def _map_barcode_to_whitelist(barcode, whitelist, min_distance=1):
whitelist = set([str(x).encode('utf-8') for x in whitelist])
iter_i = 0
for barcode_i in barcode:
match = barcode_i
barcode_in_bytes = str(barcode_i).encode('utf-8')
for white_barcode in whitelist:
if barcode_in_bytes in whitelist:
break
if compute_edit_distance(barcode_in_bytes, white_barcode) <= min_distance:
match = white_barcode.decode('utf-8')
break
barcode[iter_i] = match
iter_i += 1
return barcode
def _collapse_barcode_edit(barcode, value, min_distance=1):
id_srt = value.argsort()[::-1]
barcode = barcode[id_srt]
value = value[id_srt]
max_barcode = value[0]
threshold = max_barcode * 0.1
if threshold < 2:
threshold = 2
elif threshold > 5:
threshold = 5
id_whitelist = value > threshold
whitelist_candidate = barcode[id_whitelist]
noise_candidate = barcode[~id_whitelist]
if len(noise_candidate) > 0 and len(whitelist_candidate) > 0:
corrected_noise = _map_barcode_to_whitelist(noise_candidate,
whitelist_candidate,
min_distance)
barcode[~id_whitelist] = corrected_noise
return barcode, value
def _collapse_umi(x, min_distance=1):
id_start = x.duplicated(['cell', 'gene'])
id_start = id_start[id_start == False].index.tolist()
id_end = id_start[1:]
id_end.append(x.shape[0])
value = x['depth'].values
umi = x['umi'].values.astype('str')
for gene in np.arange(len(id_end)):
id_gene = np.arange(id_start[gene], id_end[gene])
if len(id_gene) <= 1:
continue
umi_gene = umi[id_gene]
value_gene = value[id_gene]
umi_gene, _ = _collapse_barcode_edit(umi_gene, value_gene, min_distance)
umi[id_gene] = umi_gene
x['umi'] = pd.Categorical(umi)
x = x.groupby(['cell', 'gene', 'umi'], observed=True)['depth'].sum()
x = x.reset_index(drop=False)
return x
def _convert_to_coo(data_series):
data_sp = data_series.astype('Sparse')
data_sp, row_name, column_name = data_sp.sparse.to_coo(
column_levels=['cell'],
row_levels=['gene']
)
data_sp.eliminate_zeros()
data_sp = data_sp.astype(int)
coo_tuple = collections.namedtuple('coo_tuple', ['x', 'row_name', 'column_name'])
return coo_tuple(data_sp, row_name, column_name)
def _generate_fake_count(row_of_df, feature, depth=1.5):
index_name = row_of_df.index
df = pd.DataFrame(columns=index_name)
df['gene'] = feature.index.values
df['umi'] = 'N' * len(row_of_df['umi'])
df['depth'] = depth
for index_name_other in list(set(index_name) - {'gene', 'umi', 'depth'}):
df[index_name_other] = row_of_df[index_name_other]
return df
def down_sample(molecular_info, total_read=None, total_cell=None, mean_read=None,
out_prefix='.', depth_threshold=1, seed=0):
"""
Down-sampling the molecular_info such that each library has the same number of reads
:param molecular_info: molecular_info: the input molecular info data frame
:param total_read: the total number of reads for these libraries
:param total_cell: the total number of cells
:param mean_read: the expected number of reads per cell after down-sampling
:param out_prefix: the prefix of the output matrices
:param depth_threshold: the coverage threshold to consider
:param seed used for random sampling
"""
feature = pd.read_hdf(molecular_info, key='feature')
output_molecular_info = pd.read_hdf(molecular_info, key='molecular_info')
for col in output_molecular_info.columns[:3]:
output_molecular_info.loc[:, col] = output_molecular_info[col].astype('category')
output_molecular_info = output_molecular_info.loc[
output_molecular_info['depth'] >= 1, :]
output_molecular_info.reset_index(drop=True, inplace=True)
map_info = pd.read_hdf(molecular_info, key='map_info')
if total_read is None:
total_read = map_info['num_unique_read']
else:
total_read = max(total_read, map_info['num_unique_read'])
read_in_cell = pd.read_hdf(molecular_info, key='read_in_cell')
if total_cell is None:
total_cell = read_in_cell.shape[0]
else:
total_cell = min(total_cell, read_in_cell.shape[0])
if mean_read is None:
mean_read = (10000, )
elif not isinstance(mean_read, tuple):
mean_read = (mean_read, )
cell_vec = output_molecular_info['depth'].copy()
for mean_read_i in mean_read:
random.seed(seed)
seed += 1
_down_sample(output_molecular_info, feature, total_read, total_cell,
mean_read_i, out_prefix, depth_threshold=depth_threshold)
output_molecular_info['depth'] = cell_vec
def _down_sample(molecular_info, feature, total_read, total_cell, mean_read,
out_prefix, depth_threshold=1):
expect_read = mean_read * total_cell
if expect_read > total_read:
return ()
cell_vec = molecular_info['depth']
value = cell_vec.tolist()
id_end = np.cumsum(value)
id_start = np.append(0, id_end)
id_start = id_start[:-1]
read_num_subsample = (id_end[-1] / total_read * 1.0) * expect_read
read_num_subsample = int(read_num_subsample + 0.5)
id_keep = sorted(random.sample(range(id_end[-1]), read_num_subsample))
expanded_count = np.zeros(id_end[-1], dtype=np.int32)
expanded_count[id_keep] = 1
value = _add_umis(expanded_count, id_start, id_end)
molecular_info['depth'] = value
output_molecular_info = molecular_info.loc[molecular_info['depth'] >= 1, :].copy()
output_molecular_info.reset_index(drop=True, inplace=True)
logger.info('Collapsing UMIs')
write_time = time.time()
output_molecular_info = _collapse_umi(output_molecular_info)
write_time = time.time() - write_time
logger.info(f'Collapsing UMIs done, taking {write_time / 60.0:.3f} minutes')
out_prefix = out_prefix + '_sample_' + str(mean_read)
_calculate_cell_gene_matrix(output_molecular_info, feature,
out_prefix=out_prefix,
depth_threshold=depth_threshold)
def down_sample_cell(molecular_info, expect_read=None, out_prefix='', depth_threshold=1):
"""
Down-sampling the molecular_info such that each cell has the same number of reads
:param molecular_info: the input molecular info data frame
:param expect_read: each cell to have expect_read
:param out_prefix: the prefix of the output matrices
:param depth_threshold: the coverage threshold to consider
"""
feature = pd.read_hdf(molecular_info, key='feature')
output_molecular_info = pd.read_hdf(molecular_info, key='molecular_info')
for col in output_molecular_info.columns[:3]:
output_molecular_info.loc[:, col] = output_molecular_info[col].astype('category')
output_molecular_info = output_molecular_info.loc[
output_molecular_info['depth'] >= 1, :]
name = output_molecular_info.columns.tolist()
name = name[:-1]
output_molecular_info = output_molecular_info.sort_values(name) # already sorted?
read_in_cell = | pd.read_hdf(molecular_info, key='read_in_cell') | pandas.read_hdf |
#############################################################################################################################################################################################################
### PRINTED CIRCUIT BOARDS PRODUCTION - A TWO-STAGE HYBRID FLOWSHOP SCHEDULING PROBLEM ######################################################################################################################
#############################################################################################################################################################################################################
### Author: <NAME> (<EMAIL>) #########################################################################################################################################################
#############################################################################################################################################################################################################
### Libraries ###############################################################################################################################################################################################
import salabim as sim
import pandas as pd
import math
#############################################################################################################################################################################################################
### Global Variables ########################################################################################################################################################################################
# Visualization
ENV_W = 1200
ENV_H = 600
REF_WIDTH = 110
GLOB_BUFFER_WIDTH = REF_WIDTH
GLOB_PROCESS_WIDTH = REF_WIDTH
REF_HEIGHT = 60
GLOB_SOURCE_DRAIN_RADIUS = (REF_HEIGHT) / 2
GLOB_BUFFER_HEIGHT = REF_HEIGHT - 20
GLOB_PROCESS_HEIGHT = REF_HEIGHT
GLOB_FONTSIZE = 12
X_0 = 50
Y_0 = 300
Y_GLOB_SOURCE_DRAIN = Y_0 + GLOB_SOURCE_DRAIN_RADIUS
Y_GLOB_BUFFER = Y_0 + ((REF_HEIGHT - GLOB_BUFFER_HEIGHT) / 2)
Y_GLOB_PROCESS = Y_0 + ((REF_HEIGHT - GLOB_PROCESS_HEIGHT) / 2)
#############################################################################################################################################################################################################
### Modeling Objects ########################################################################################################################################################################################
class Job(sim.Component):
def setup(
self,
model,
job_id,
duedate,
family,
t_smd,
t_aoi,
duedate_scaled,
family_scaled,
t_smd_scaled,
t_aoi_scaled,
alloc_to_smd,
):
# model
self.model = model
# initial attributes
self.id = job_id
self.duedate = duedate
self.family = family
self.t_smd = t_smd
self.t_aoi = t_aoi
self.duedate_scaled = duedate_scaled
self.family_scaled = family_scaled
self.t_smd_scaled = t_smd_scaled
self.t_aoi_scaled = t_aoi_scaled
self.alloc_to_smd = alloc_to_smd
# flags
self.selected_smd = None
self.selected_aoi = None
# visualization
if self.model.env.animate() == True:
self.img = sim.AnimateCircle(
radius=10,
x=X_0,
y=Y_GLOB_SOURCE_DRAIN,
fillcolor="limegreen",
linecolor="black",
text=str(self.id),
fontsize=15,
textcolor="black",
parent=self,
screen_coordinates=True,
)
def process(self):
# enter job buffer
self.enter(self.model.job_buffer)
if self.model.env.animate() == True:
self.model.job_buffer.set_job_pos()
yield self.passivate()
# select and enter SMD buffer
self.model.job_buffer.remove(self)
if self.model.env.animate() == True:
self.model.job_buffer.set_job_pos()
self.selected_smd = self.model.smd_allocation_method(job=self)
self.model.unseized_smd_workload -= self.t_smd
self.selected_smd.buffer.workload += self.t_smd
if (
len(self.selected_smd.buffer) == 0
and self.family != self.selected_smd.setuptype
) or self.family != self.selected_smd.buffer[-1].family:
self.selected_smd.buffer.workload += 65
else:
self.selected_smd.buffer.workload += 20
if self.model.post_sequencing_function:
self.model.post_sequencing_method(job=self, smd=self.selected_smd)
else:
self.enter(self.selected_smd.buffer)
if self.model.env.animate() == True:
self.selected_smd.buffer.set_job_pos()
if self.selected_smd.ispassive() and self.selected_smd.state != "waiting":
self.selected_smd.activate()
yield self.passivate()
# select and enter AOI buffer
self.selected_aoi = self.model.aoi_allocation_method(job=self)
self.enter(self.selected_aoi.buffer)
self.selected_aoi.buffer.workload += 25 + self.t_aoi
if self.model.env.animate() == True:
self.selected_aoi.buffer.set_job_pos()
if self.selected_aoi.ispassive():
self.selected_aoi.activate()
yield self.passivate()
# calculate objectives and destroy job
self.model.jobs_processed += 1
if self.model.env.now() > self.duedate:
self.model.total_tardiness += self.model.env.now() - self.duedate
if self.model.env.animate() == True:
self.model.info_tardiness.text = "Total Tardiness: " + str(
self.model.total_tardiness
)
if self.model.jobs_processed == self.model.num_jobs:
self.model.makespan = self.model.env.now()
# Workaround for a bug in NEAT:
# * NEAT outputs an assertion error if the fitness is not of type float or integer
# * Due to the use of pandas, some KPIs (e.g. the Total Tardiness) have a numpy data type
# * Therefore, we cast all KPIs that could be relevant to measure the fitness to float or integer
self.model.makespan = float(self.model.makespan)
self.model.total_tardiness = float(self.model.total_tardiness)
self.model.num_major_setups = int(self.model.num_major_setups)
if self.model.env.animate() == True:
self.model.info_makespan.text = "Makespan: " + str(self.model.makespan)
if self.model.freeze_window_at_endsim:
self.model.env.an_menu()
if self.model.env.animate() == True:
self.img.x = self.model.drain.img[0].x
self.img.y = self.model.drain.img[0].y
yield self.hold(0)
del self
class Source(sim.Component):
def setup(
self,
model,
img_w=GLOB_SOURCE_DRAIN_RADIUS,
img_h=GLOB_SOURCE_DRAIN_RADIUS,
img_x=X_0,
img_y=Y_GLOB_SOURCE_DRAIN,
):
# model
self.model = model
# visualization
if self.model.env.animate() == True:
self.img_w = img_w
self.img_h = img_h
self.img_x = img_x
self.img_y = img_y
self.img = [
sim.AnimateCircle(
radius=img_w,
x=img_x,
y=img_y,
fillcolor="white",
linecolor="black",
linewidth=2,
layer=2,
arg=(img_x + img_w, img_y + img_h),
screen_coordinates=True,
),
sim.AnimateCircle(
radius=0.3 * img_w,
x=img_x,
y=img_y,
fillcolor="black",
linecolor="black",
layer=1,
screen_coordinates=True,
),
]
def process(self):
# generate jobs
for job in self.model.sequence:
Job(
model=self.model,
job_id=job["id"],
duedate=job["due date"],
family=job["family"],
t_smd=job["t_smd"],
t_aoi=job["t_aoi"],
duedate_scaled=job["scaled due date"],
family_scaled=job["scaled family"],
t_smd_scaled=job["scaled t_smd"],
t_aoi_scaled=job["scaled t_aoi"],
alloc_to_smd=job["alloc_to_smd"],
)
yield self.hold(0)
# step-by-step mode
if self.model.step_by_step_execution:
self.model.env.an_menu()
# activate jobs
for job in self.model.job_buffer:
job.activate()
yield self.hold(0)
class Queue(sim.Queue):
def setup(
self,
model,
predecessors,
img_w=None,
img_h=None,
img_x=None,
img_y=None,
img_slots=None,
):
# model
self.model = model
# initial attributes
self.predecessors = predecessors
self.img_w = img_w
self.img_h = img_h
self.img_x = img_x
self.img_y = img_y
self.img_slots = img_slots
# flags
self.workload = 0
# visualization
if self.model.env.animate() == True:
self.img = [
[
sim.AnimateRectangle(
spec=(0, 0, (self.img_w / self.img_slots), self.img_h),
x=self.img_x + i * (self.img_w / self.img_slots),
y=self.img_y,
fillcolor="white",
linecolor="white",
linewidth=1,
layer=2,
arg=(
self.img_x
+ (i * (self.img_w / self.img_slots))
+ (self.img_w / (self.img_slots * 2)),
self.img_y + (self.img_h / 2),
),
screen_coordinates=True,
)
for i in range(self.img_slots)
],
sim.AnimateRectangle(
spec=(0, 0, self.img_w, self.img_h),
x=self.img_x,
y=self.img_y,
fillcolor="white",
linecolor="black",
linewidth=2,
layer=1,
screen_coordinates=True,
),
]
self.predecessor_connections = [
sim.AnimateLine(
spec=(
predecessor.img_x + predecessor.img_w,
predecessor.img_y
if predecessor.__class__.__name__ == "Source"
else predecessor.img_y + predecessor.img_h / 2,
self.img_x,
self.img_y + self.img_h / 2,
),
linecolor="black",
linewidth=2,
layer=2,
screen_coordinates=True,
)
for predecessor in self.predecessors
]
self.info = sim.AnimateText(
text="# products: 0",
x=self.img[0][0].x,
y=self.img[0][0].y - 20,
fontsize=18,
textcolor="black",
screen_coordinates=True,
)
def set_job_pos(self):
if len(self) == 0:
self.info.text = "# products: 0"
else:
for job, spot in zip(self, reversed(self.img[0])):
job.img.visible = True
job.img.x = spot.arg[0]
job.img.y = spot.arg[1]
self.info.text = "# products: " + str(len(self))
if len(self) >= len(self.img[0]):
for i in range(len(self.img[0]), len(self)):
self[i].img.visible = False
self[i].img.x = self.img[0][0].arg[0]
self[i].img.y = self.img[0][0].arg[1]
class SMD(sim.Component):
def setup(
self,
model,
buffer,
img_x=None,
img_y=None,
img_w=GLOB_PROCESS_WIDTH,
img_h=GLOB_PROCESS_HEIGHT,
info_x=None,
info_y=None,
):
# model
self.model = model
# initial attributes
self.buffer = buffer
self.img_x = img_x
self.img_y = img_y
self.img_w = img_w
self.img_h = img_h
self.info_x = info_x
self.info_y = info_y
# flags
self.job = None
self.setuptype = 0
self.setuptype_scaled = 0
self.setup_to = 0
self.state = "idle"
# visualization
if model.env.animate() == True:
self.img = sim.AnimatePolygon(
spec=(
0,
0,
self.img_w - (self.img_w / 300) * 50,
0,
self.img_w,
self.img_h / 2,
self.img_w - (self.img_w / 300) * 50,
self.img_h,
0,
self.img_h,
(self.img_w / 300) * 50,
self.img_h / 2,
0,
0,
),
x=self.img_x,
y=self.img_y,
fillcolor="white",
linecolor="black",
linewidth=2,
text=self.name() + "\n\nsetuptype = 0\nidle",
fontsize=GLOB_FONTSIZE,
textcolor="black",
layer=1,
screen_coordinates=True,
)
self.buffer_connection = sim.AnimateLine(
spec=(
self.buffer.img_x + self.buffer.img_w,
self.buffer.img_y + self.buffer.img_h / 2,
self.img_x + 50,
self.img_y + self.img_h / 2,
),
linecolor="black",
linewidth=2,
layer=2,
screen_coordinates=True,
)
def process(self):
while True:
# idle state
if len(self.buffer) == 0:
self.state = "idle"
if (
self.setuptype != 0
and self.model.waiting_for_setuptype[self.setuptype]
):
released_setuptype = self.setuptype
self.setuptype = 0
self.model.waiting_for_setuptype[released_setuptype].pop(
0
).activate()
if self.model.env.animate() == True:
self.set_status(status=self.state)
yield self.passivate()
# pick next job from SMD buffer
self.job = self.buffer.pop()
self.buffer.workload -= self.job.t_smd
if self.model.env.animate() == True:
self.buffer.set_job_pos()
self.job.img.x = self.img_x + (self.img_w / 2)
self.job.img.y = self.img_y + (self.img_h / 2)
# setup state
if self.setuptype == self.job.family:
self.state = "minor setup"
self.buffer.workload -= 20
if self.model.env.animate() == True:
self.set_status(status=self.state)
yield self.hold(20)
else:
#####################################################################################################
# the following routine ensures that a specific setup kit is only mounted on SMD at the time
# (1) release current setup kit
# (2) reassign current setup kit, if other machine waits on it
# (3) check if other machine is right now using the demanding setup kit
# (3.1) if true: put SMD to waiting line for the demanded setup kit and passivate SMD
# (3.2) if false: eventually release setup kit from other SMD in idle state
released_setuptype = self.setuptype # (1)
self.setuptype = 0 # (1)
if (
released_setuptype != 0
and self.model.waiting_for_setuptype[released_setuptype]
): # (2)
self.model.waiting_for_setuptype[released_setuptype].pop(
0
).activate() # (2)
for smd in self.model.smds: # (3)
if (
smd.setuptype == self.job.family
or smd.setup_to == self.job.family
): # (3)
if smd.job is not None or len(smd.buffer) > 0: # (3.1)
self.model.waiting_for_setuptype[self.job.family].append(
self
) # (3.1)
self.state = "waiting" # (3.1)
if self.model.env.animate() == True: # (3.1)
self.set_status(status=self.state) # (3.1)
yield self.passivate() # (3.1)
break # (3.1)
else: # (3.2)
smd.setuptype = 0 # (3.2)
break # (3.2)
#####################################################################################################
self.state = "major setup"
self.buffer.workload -= 65
self.model.num_major_setups += 1
if self.model.env.animate() == True:
self.set_status(status=self.state)
self.model.info_setups.text = "Major Setups: " + str(
self.model.num_major_setups
)
self.setup_to = self.job.family
# for-loop to check setup violations (more than one machine is mounted with the same setup kit) due setup process
for smd in self.model.smds:
if self.name != smd.name:
if (
self.setup_to == smd.setuptype
or self.setup_to == smd.setup_to
):
self.model.n_setup_violations += 1
yield self.hold(65)
self.setuptype = self.job.family
self.setup_to = 0
# for-loop to check setup violations (more than one machine is mounted with the same setup kit) due setup process
for smd in self.model.smds:
if self.name != smd.name:
if (
self.setuptype == smd.setuptype
or self.setuptype == smd.setup_to
):
self.model.n_setup_violations += 1
# active state
self.state = "active"
if self.model.env.animate() == True:
self.set_status(status=self.state)
yield self.hold(self.job.t_smd)
self.job.activate()
self.job = None
def set_status(self, status):
dict_status = {
"idle": "white",
"active": "lime",
"minor setup": "yellow",
"major setup": "tomato",
"waiting": "violet",
}
self.img.fillcolor = dict_status.get(status)
self.img.text = (
self.name() + "\n\nsetuptype = " + str(self.setuptype) + "\n" + status
)
def calc_workload(self, called_from=None):
if self.state == "idle":
return self.buffer.workload
elif self.state == "active":
return self.remaining_duration() + self.buffer.workload
else:
return self.remaining_duration() + self.job.t_smd + self.buffer.workload
class AOI(sim.Component):
def setup(
self,
model,
buffer,
img_x=None,
img_y=None,
img_w=GLOB_PROCESS_WIDTH,
img_h=GLOB_PROCESS_HEIGHT,
):
# model
self.model = model
# initial attributes
self.buffer = buffer
self.img_x = img_x
self.img_y = img_y
self.img_w = img_w
self.img_h = img_h
# flags
self.job = None
self.state = "idle"
# visualization
if self.model.env.animate() == True:
self.img = sim.AnimatePolygon(
spec=(
0,
0,
self.img_w - (self.img_w / 300) * 50,
0,
self.img_w,
self.img_h / 2,
self.img_w - (self.img_w / 300) * 50,
self.img_h,
0,
self.img_h,
(self.img_w / 300) * 50,
self.img_h / 2,
0,
0,
),
x=self.img_x,
y=self.img_y,
fillcolor="white",
linecolor="black",
linewidth=2,
text=self.name() + "\n\nidle",
fontsize=GLOB_FONTSIZE,
textcolor="black",
layer=1,
screen_coordinates=True,
)
self.buffer_connection = sim.AnimateLine(
spec=(
self.buffer.img_x + self.buffer.img_w,
self.buffer.img_y + self.buffer.img_h / 2,
self.img_x + 50,
self.img_y + self.img_h / 2,
),
linecolor="black",
linewidth=2,
layer=2,
screen_coordinates=True,
)
def process(self):
while True:
# idle state
if len(self.buffer) == 0:
self.state = "idle"
if self.model.env.animate() == True:
self.set_status(status=self.state)
yield self.passivate()
# pick next job from AOIbuffer
self.job = self.buffer.pop()
self.buffer.workload -= self.job.t_aoi + 25
if self.model.env.animate() == True:
self.buffer.set_job_pos()
self.job.img.x = self.img_x + (self.img_w / 2)
self.job.img.y = self.img_y + (self.img_h / 2)
# setup state
self.state = "setting-up"
if self.model.env.animate() == True:
self.set_status(status=self.state)
yield self.hold(25)
# active state
self.state = "active"
if self.model.env.animate() == True:
self.set_status(status=self.state)
yield self.hold(self.job.t_aoi)
self.job.activate()
self.job = None
def set_status(self, status):
dict_status = {"idle": "white", "active": "lime", "setting-up": "yellow"}
self.img.fillcolor = dict_status.get(status)
self.img.text = self.name() + "\n\n" + status
def calc_workload(self):
if self.state == "idle":
return self.buffer.workload
elif self.state == "active":
return self.remaining_duration() + self.buffer.workload
else:
return self.remaining_duration() + self.job.t_aoi + self.buffer.workload
class Drain:
def __init__(
self,
model,
predecessors,
img_x=None,
img_y=None,
img_w=GLOB_SOURCE_DRAIN_RADIUS,
img_h=GLOB_SOURCE_DRAIN_RADIUS,
):
# initial attributes
self.model = model
self.predecessors = predecessors
self.img_x = img_x
self.img_y = img_y
self.img_w = img_w
self.img_h = img_h
# visualization
self.img = [
sim.AnimateCircle(
radius=img_w,
x=self.img_x,
y=self.img_y,
fillcolor="white",
linecolor="black",
linewidth=2,
layer=1,
screen_coordinates=True,
),
sim.AnimateLine(
spec=(
img_w * math.cos(math.radians(45)) * (-1),
img_w * math.sin(math.radians(45)) * (-1),
img_w * math.cos(math.radians(45)),
img_w * math.sin(math.radians(45)),
),
x=self.img_x,
y=self.img_y,
linecolor="black",
linewidth=2,
layer=1,
arg=(self.img_x + img_w, self.img_y + img_w),
screen_coordinates=True,
),
sim.AnimateLine(
spec=(
img_w * math.cos(math.radians(45)) * (-1),
img_w * math.sin(math.radians(45)),
img_w * math.cos(math.radians(45)),
img_w * math.sin(math.radians(45)) * (-1),
),
x=self.img_x,
y=self.img_y,
linecolor="black",
linewidth=2,
layer=1,
screen_coordinates=True,
),
]
self.predecessor_connections = [
sim.AnimateLine(
spec=(
predecessor.img_x + predecessor.img_w,
predecessor.img_y + predecessor.img_h / 2,
self.img_x - self.img_w,
self.img_y,
),
linecolor="black",
linewidth=2,
layer=2,
screen_coordinates=True,
)
for predecessor in self.predecessors
]
#############################################################################################################################################################################################################
### Simulation Model ########################################################################################################################################################################################
class Model:
def __init__(
self,
sequence,
dataset,
smd_allocation_function,
aoi_allocation_function,
post_sequencing_function=None,
smd_allocation_ann=None,
post_sequencing_ann=None,
animation=False,
step_by_step_execution=False,
freeze_window_at_endsim=False,
tracing=False,
):
# input data
self.sequence = sequence
self.dataset = dataset
# allocation functions
if smd_allocation_function is not None:
setattr(
self,
"smd_allocation_method",
smd_allocation_function.__get__(self, self.__class__),
)
self.smd_allocation_function = True
else:
self.smd_allocation_function = False
if aoi_allocation_function is not None:
setattr(
self,
"aoi_allocation_method",
aoi_allocation_function.__get__(self, self.__class__),
)
self.aoi_allocation_function = True
else:
self.aoi_allocation_function = False
# sequencing function
if post_sequencing_function is not None:
setattr(
self,
"post_sequencing_method",
post_sequencing_function.__get__(self, self.__class__),
)
self.post_sequencing_function = True
else:
self.post_sequencing_function = False
# artificial neural networks
self.smd_allocation_ann = smd_allocation_ann
self.post_sequencing_ann = post_sequencing_ann
# visualization
self.animation = animation
self.step_by_step_execution = step_by_step_execution
self.freeze_window_at_endsim = freeze_window_at_endsim
self.tracing = tracing
# component lists
self.smds = []
self.aois = []
# create queue dictionary for smds waiting for setuptype
setuptypes = (
| pd.DataFrame(dataset) | pandas.DataFrame |
import numpy as np
import sys
import pandas as pd
import glob
import os
# This script expects two command line arguments:
# - a glob-style pattern indicating which directories to analyze
# (remember to put quotation marks around it)
# - the prefix filename to store data in (main data fill get stored in filename.csv,
# trait data will get stored in filename_traits.csv)
# It will extract the path (x, y, and z coordinates) taken by the fittest
# lineage from the phylogeny_5000.csv file from each directory matched by
# the glob pattern provided in the first command-line argument.
def main():
glob_pattern = sys.argv[1]
outfilename = sys.argv[2]
update = sys.argv[3]
frames = []
trait_frames = []
# print(glob_pattern, glob.glob(glob_pattern))
print(glob.glob(glob_pattern))
for dirname in glob.glob(glob_pattern):
run_log = dirname + "/run.log"
print(dirname, run_log)
if not (os.path.exists(run_log)):
print("run log not found")
continue
local_data = {}
with open(run_log) as run_log_file:
for line in run_log_file:
if line.startswith("0"):
break
elif not line.startswith("set"):
continue
line = line.split()
local_data[line[1]] = line[2]
# Create function
fitness_df = pd.read_csv(dirname+"/fitness.csv", index_col="update")
systematics_df = pd.read_csv(dirname+"/systematics.csv", index_col="update")
population_df = pd.read_csv(dirname+"/population.csv", index_col="update")
trait_df = pd.read_csv(dirname+"/traits.dat")
if update != "all":
trait_df = trait_df[trait_df["update"] == int(update)]
# dominant_df = pd.read_csv(dirname+"/dominant.csv", index_col="update")
# lin_df = pd.read_csv(dirname+"/lineage_mutations.csv", index_col="update")
df = | pd.concat([fitness_df, systematics_df, population_df], axis=1) | pandas.concat |
"""ML-Experiments"""
import os
import pandas
from zipfile import ZipFile
class experiment:
def __init__(self, kaggle_api, dataset, dataset_target,
download_directory):
"""Experiment encapsulates a ML experiment
Arguments:
kaggle_api {KaggleApi} -- Instance of KaggleApi
dataset {str} -- <owner/resource>
dataset_target {str} -- <filename>.<ext>
download_directory {str} -- Path to place the downloaded dataset (relative to $VIRTUAL_ENV)
"""
self.kaggle_api = kaggle_api
self.dataset = dataset
self.dataset_target = dataset_target
self.download_directory = os.path.join(os.environ["VIRTUAL_ENV"],
download_directory)
self.dataset_file = os.path.join(self.download_directory,
dataset_target)
self.df = self.initialize_dataframe()
def initialize_dataframe(self):
"""Initialize a DataFrame from a Kaggle dataset"""
if not os.path.exists(self.dataset_file):
self.kaggle_api.dataset_download_file(
self.dataset,
self.dataset_target,
path=self.download_directory)
extract_target = f"{self.dataset_file}.zip"
ZipFile(extract_target).extractall(self.download_directory)
os.unlink(extract_target)
return pandas.read_csv(self.dataset_file)
def reassign_attribute(self, attribute, series):
"""Reassigns column in dataset using best practices
Arguments:
attribute {str} -- The attribute to reassign
series {Series} -- The Series to use for reassignment
"""
self.df.loc[:, attribute] = series.values
def identitify_non_numeric(self, attribute):
"""Identifies non-numeric values in given an attribute in the dataset
Arguments:
attribute {str} -- The attribute to investigate
Returns:
DataFrame -- Non-numeric DataFrame
"""
not_numeric = self.df[~self.df[attribute].str.isnumeric()]
return not_numeric
def convert_to_numeric(self, attribute):
"""Convert data associated to attribute to numeric
Arguments:
attribute {str} -- The attribute to target
"""
self.reassign_attribute(attribute,
| pandas.to_numeric(self.df[attribute]) | pandas.to_numeric |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from sqlalchemy import create_engine
import pandas as pd
import datetime
import config
import pmdarima as pm
import numpy as np
import arch
import statistics
import traceback
pd.set_option('display.max_columns', None)
def initializer(symbol):
# Get Data
engine = create_engine(config.psql)
num_data_points = 255
one_year_ago = (datetime.datetime.utcnow().date() - datetime.timedelta(days=num_data_points * 1.45)).strftime("%Y-%m-%d")
query = f"select distinct * from stockdata_hist where symbol = '{symbol}' and tdate > '{one_year_ago}' AND (CAST(tdate AS TIME) = '20:00') limit {num_data_points}"
df = pd.read_sql_query(query, con=engine).sort_values(by='tdate', ascending=True)
# Get Forecast Range
steps = 5
today = df['tdate'].iloc[-1]
end_prediction_date = today + datetime.timedelta(days=steps)
end_friday = end_prediction_date + datetime.timedelta((4-end_prediction_date.weekday()) % 7)
tomorrow = today+datetime.timedelta(days=1)
date_range = | pd.date_range(tomorrow, end_friday, freq="B") | pandas.date_range |
#pip install --upgrade pip
#pip install openpyxl # Manipular hojas de excel
#pip install pandas
#pip install xlwings
import os
import pandas as pd
import shutil
import xlwings as xw
import string
import random
import traceback
from AutomatizacionData import AutomatizacionData
class AutomatizacionEmpleado:
ruta = ''
indice = ''
files = []
def __init__(self, input: str, indice):
# @param: input tipo str; Obtiene ruta de la carpeta a procesar
### Inicializa variables globales con lista de archivos ordenados por nombre
self.ruta = input
self.files = os.listdir(self.ruta)
if indice == '':
self.copyXlsm(self.ruta)
else:
self.indice = indice
def separatePath(self, files):
"""
@param: files (List)
@return: nombres, extensiones ambos de tipo List
@modules: os
"""
nombres = []
extensiones = []
for x in files:
nombres.append(os.path.splitext(x)[0])
extensiones.append(os.path.splitext(x)[1])
return nombres, extensiones
def renameFiles(self, files, nombresExtensiones, ruta):
"""
@param: files, nombresExtensiones (List), ruta (string)
@modules: os
"""
for i in range(len(files)):
fulldirct = os.path.join(ruta, files[i])
if os.path.exists(fulldirct):
os.rename(fulldirct, os.path.join(ruta, nombresExtensiones[i]))
else:
try:
#number_of_strings = 3
length_of_string = 3
os.rename(ruta + chr(92) + files[i], ruta + chr(92) + os.path.splitext(nombresExtensiones[i])[0] + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length_of_string)) + os.path.splitext(nombresExtensiones[i])[1])
except:
print("Excepcion presentada: \n")
def copyXlsm(self, rutaFinal):
"""
@param: rutaFinal tipo string; contiene ruta expediente
@modules: os, shutil
"""
ruta = os.path.dirname(os.path.abspath(__file__)) + r"\assets\0000IndiceElectronicoC0.xlsm"
shutil.copy(ruta, rutaFinal)
self.indice = os.path.join(rutaFinal, '0000IndiceElectronicoC0.xlsm')
# Función pendiente de actualizar
def createDataFrame(self, files, ruta):
"""
@return: df (contiene los metadatos)
@modules: pandas
- Formatea nombres y los almacena en varias variables
- Renombra los archivos de la carpeta a procesar
- Obtiene metadatos de los archivos y carpetas a procesar en un df
- Adiciona informacion en columna de observaciones para el anexo que conste de un carpeta
- Renombra archivos de carpetas dentro del expediente
- Crea df con los datos a registrar en xlsm
"""
#*********************************************
#Separar instrucciones en funcion a parte
nombresExtensiones, nombres, extensiones, numeraciones, ban = self.obj1.formatNames(ruta, files)
if ban:
self.renameFiles(files, nombresExtensiones, ruta)
fullFilePaths = self.fullFilePath(nombresExtensiones, ruta)
fechamod, tama, cantidadpag, observaciones = self.obj1.getMetadata(fullFilePaths)
#*********************************************
df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# # Imports
# In[1]:
import pandas as pd
import os
import numpy as np
from rdkit.Chem import AllChem as Chem
import sys
sys.path.append('..')
# In[2]:
WORK_DIR = os.getcwd()
DATA_DIR = os.path.join(WORK_DIR,'original')
data_processed_dir = os.path.join(WORK_DIR,'processed')
if not os.path.exists(data_processed_dir):
os.makedirs(data_processed_dir)
# # Read Raw Data
# Read the raw data files, rename columns, drop NaN and extraneous information
# ### ChemFluor
# In[3]:
data_location = os.path.join(DATA_DIR, 'chem_fluor/Alldata_SMILES.xlsx')
chemfluor_df = pd.read_excel(data_location)
chemfluor_df.rename(columns={'SMILES':'smiles',"Absorption/nm":'peakwavs_max'}, inplace=True)
chemfluor_df = chemfluor_df[['smiles','solvent','peakwavs_max']].copy()
chemfluor_df.dropna(inplace=True)
chemfluor_df['source'] = 'chemfluor'
chemfluor_df
# ### DSSCDB
# In[4]:
data_location = os.path.join(DATA_DIR, 'dsscdb')
xlsx_files = [x for x in os.listdir(data_location) if x.endswith('.xlsx')]
dsscdb_df = pd.DataFrame()
for file in xlsx_files:
file_location = os.path.join(data_location, file)
dsscdb_df = dsscdb_df.append(pd.read_excel(file_location), ignore_index=True, sort=True)
dsscdb_df.rename(columns={'SMILES':'smiles','SOLVENT':'solvent','ABSORPTION_MAXIMA':'peakwavs_max'}, inplace=True)
dsscdb_df = dsscdb_df[['smiles','solvent','peakwavs_max']].copy()
dsscdb_df.dropna(inplace=True)
dsscdb_df['source'] = 'dsscdb'
dsscdb_df
# ### DyeAgg
# In[5]:
data_location = os.path.join(DATA_DIR, 'dye_agg/new_dssc_Search_results.csv')
dyeagg_df = pd.read_csv(data_location, sep=';')
dyeagg_df.rename(columns={'STRUCTURE':'smiles','SOLVENT':'solvent','PEAK_ABSORPTION_SOLUTION':'peakwavs_max'},
inplace=True)
dyeagg_df = dyeagg_df[['smiles','solvent','peakwavs_max']].copy()
dyeagg_df.dropna(inplace=True)
dyeagg_df['source'] = 'dyeagg'
dyeagg_df
# ### CDEx
# In[6]:
data_location = os.path.join(DATA_DIR, 'jcole/paper_allDB.csv')
jcole_df = | pd.read_csv(data_location) | pandas.read_csv |
import pandas as pd
import pyopenms as oms
from numpy import mean, max, min, abs
class OpenMSFFMetabo():
def __init__(self, progress_callback=None):
self._feature_map = None
self._progress = 0
self._progress_callback = progress_callback
def fit(self, filenames, max_peaks_per_file=1000):
try:
feature_map = oms.FeatureMap()
except:
pass
n_files = len(filenames)
for i, fn in enumerate(filenames):
self.progress = 100*(i+1)/n_files
feature_map += oms_ffmetabo_single_file(
fn, max_peaks_per_file=max_peaks_per_file
)
self._feature_map = feature_map
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, x):
self._progress = x
if self._progress_callback is not None:
self._progress_callback(x)
def transform(self, min_quality=1e-3, condensed=True,
max_delta_mz_ppm=10, max_delta_rt=0.1):
features = []
n_total = self._feature_map.size()
for i, feat in enumerate(self._feature_map):
self.progress = 100*(i+1)/n_total
quality = feat.getOverallQuality()
if ( min_quality is not None ) and ( quality < min_quality ):
continue
mz = feat.getMZ()
rt = feat.getRT() / 60
rt_width = feat.getWidth() / 60
rt_min = max([0, (rt - rt_width)])
rt_max = (rt + rt_width)
data = {'peak_label': f'mz:{mz:07.4f}-rt:{rt:03.1f}',
'mz_mean': mz, 'mz_width': 10,
'rt_min': rt_min, 'rt_max': rt_max, 'rt': rt,
'intensity_threshold': 0,
'oms_quality': quality,
'peaklist': 'OpenMSFFMetabo'}
features.append(data)
peaklist = pd.DataFrame(features)
peaklist = peaklist.reset_index(drop=True)
if condensed:
peaklist = condense_peaklist(peaklist,
max_delta_mz_ppm=max_delta_mz_ppm,
max_delta_rt=max_delta_rt,
progress_callback=self._progress_callback)
return peaklist
def fit_transform(self, filenames, max_peaks_per_file=1000,
min_quality=1e-3, condensed=True,
max_delta_mz_ppm=10, max_delta_rt=0.1,
progress_callback=None):
self.fit(filenames, max_peaks_per_file=max_peaks_per_file)
return self.transform(min_quality=1e-3, condensed=True,
max_delta_mz_ppm=10, max_delta_rt=0.1)
def oms_ffmetabo_single_file(filename, max_peaks_per_file=5000):
feature_map = oms.FeatureMap()
mass_traces = []
mass_traces_split = []
mass_traces_filtered = []
exp = oms.MSExperiment()
peak_map = oms.PeakMap()
options = oms.PeakFileOptions()
options.setMSLevels([1])
if filename.lower().endswith('.mzxml'):
fh = oms.MzXMLFile()
elif filename.lower().endswith('.mzml'):
fh = oms.MzMLFile()
else:
assert False, filename
fh.setOptions(options)
# Peak map
fh.load(filename, exp)
#for chrom in exp.getChromatograms():
# peak_map.addChrom(chrom)
for spec in exp.getSpectra():
peak_map.addSpectrum(spec)
mass_trace_detect = oms.MassTraceDetection()
mass_trace_detect.run(peak_map, mass_traces, max_peaks_per_file)
elution_peak_detection = oms.ElutionPeakDetection()
elution_peak_detection.detectPeaks(mass_traces, mass_traces_split)
feature_finding_metabo = oms.FeatureFindingMetabo()
feature_finding_metabo.run(
mass_traces_split,
feature_map,
mass_traces_filtered)
feature_map.sortByOverallQuality()
return feature_map
def condense_peaklist(peaklist, max_delta_mz_ppm=10, max_delta_rt=0.1, progress_callback=None):
cols = ['mz_mean', 'rt_min', 'rt_max', 'rt']
peaklist = peaklist.sort_values(cols)[cols]
n_before = len(peaklist)
n_after = None
while n_before != n_after:
n_before = len(peaklist)
new_peaklist = | pd.DataFrame(columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 28 14:43:33 2021
@author: <NAME>
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib.animation import FuncAnimation, PillowWriter
def makeTrajMovie2D(traj, sideLen, filename = 'LJtraj.gif'):
fig = plt.figure(figsize=(5,5))
plt.xlim(0 - .5, sideLen+.5)
plt.ylim(0 - .5, sideLen+.5)
graph, = plt.plot([], [], 'o')
def animate(i):
x = traj[i][:,0]
y = traj[i][:,1]
graph.set_data(x, y)
return(graph,)
ani = FuncAnimation(fig, animate, frames=len(traj), blit=True)
writergif = PillowWriter(fps=30)
ani.save(filename, writer=writergif)
def plotKEtotals(filepath):
KEdf = pd.read_csv(filepath, header=0)
KEdf.plot(x='t', y='KE')
def plotPEtotals(filepath):
PEdf = pd.read_csv(filepath, header=0)
PEdf.plot(x='t', y='PE')
def plotKEandPE(filepathKE, filepathPE):
KEdf = pd.read_csv(filepathKE, header=0)
PEdf = pd.read_csv(filepathPE, header=0)
df = pd.concat([KEdf, PEdf.PE], axis=1)
df.plot(x='t', y=['KE','PE'])
def plotTotalEnergy(filepathKE, filepathPE):
KEdf = | pd.read_csv(filepathKE, header=0) | pandas.read_csv |
from .model import Model
import pandas as pd
import sklearn
import pickle
import zlib
from algoneer.dataset.pandas import PandasDataset
from algoneer.dataset import Dataset
from algoneer.algorithm import Algorithm
from typing import Dict, Any, Union, Optional
class SklearnModel(Model):
def __init__(
self,
algorithm: Algorithm,
dataset: Optional[Dataset],
estimator: sklearn.base.BaseEstimator,
):
super().__init__(algorithm=algorithm, dataset=dataset)
self._estimator = estimator
def _predict_raw(self, dataset: Any) -> Any:
"""
Directly calls the `predict` method of the underlying estimator with an
arbitrary argument and returns the result without wrapping it into a
dataset. This is useful for interoperability with
"""
return self._estimator.predict(dataset)
@property
def data(self) -> Dict[str, Any]:
return {"pickle": zlib.compress(pickle.dumps(self._estimator))}
def predict(self, dataset: Union[Dataset, Any]) -> Dataset:
if not isinstance(dataset, Dataset):
# if we don't get a dataset we return the raw value
return self._predict_raw(dataset)
pd_dataset = PandasDataset.from_dataset(dataset)
# we get the attributes that have the "x" role assigned to them
x = pd_dataset.roles.x
columns = list(dataset.roles.y.schema.attributes.keys())
yr = self._estimator.predict(x.df)
# if the estimator returns a 1D array we reshape it
if len(yr.shape) == 1:
yr = yr.reshape((yr.shape[0], 1))
# we predict the value using an sklearn estimator
y = | pd.DataFrame(yr, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times = pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = | pd.Series([0., 23.06066166, 5.], index=times) | pandas.Series |
from sklearn.metrics import mean_squared_error
from sklearn.impute import KNNImputer
from sklearn.model_selection import cross_val_score
import numpy as np
import pandas as pd
def missing_val(df, method):
"""
Handles missing values.
Parameters
----------
df : pandas dataframe
Dataframe with missing values.
method : string
Method to handle missing values.
'delete', deletes row with missing values
'mean', replaces missing values with the averages
'knn', replaces missing values with nearest neighbour
Returns
-------
pandas dataframe
The dataframe without missing values.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 2, 3], [np.NaN, 5, 6], [7, 8, 9]]),
columns=['a', 'b', 'c'])
>>> missing_val(df, 'knn')
a b c
0 1 2 3
1 1 5 6
2 7 8 9
"""
# tests
if method not in ['delete', 'mean', 'knn']:
raise ValueError(
'valid methods only include "delete", "mean", and "knn"')
if not isinstance(
df,
pd.DataFrame) and not isinstance(
df,
np.ndarray) and not isinstance(
df,
pd.Series):
raise TypeError('df must be a dataframe, series, or array')
if df.empty: # edge case
raise ValueError('dataframe cannot be empty')
if all(df.dtypes != np.number): # edge case
raise ValueError('dataframe must have at least one numerical column')
for i in range(len(df.columns)): # edge case
if df.iloc[:, i].isnull().sum() == len(df):
raise ValueError('dataframe cannot columns with all NaN values')
# function
if method == 'delete':
df = df.dropna()
if method == 'mean':
df_num = df.select_dtypes(include=np.number)
df_cat = df.select_dtypes(exclude=np.number)
df_num = df_num.fillna(df.mean())
df = pd.concat([df_num, df_cat], axis=1)
if method == 'knn':
df_num = df.select_dtypes(include=np.number)
df_cat = df.select_dtypes(exclude=np.number)
imputer = KNNImputer(n_neighbors=2, weights="uniform")
df_num = pd.DataFrame(imputer.fit_transform(df_num))
df = pd.concat([df_num, df_cat], axis=1)
return df
def fit_and_report(model, X, y, Xv, yv, m_type='regression'):
"""
Fits a model and returns the train and validation errors as a list
Parameters
---------
model : sklearn classifier model
The sklearn model
X : numpy.ndarray
The features of the training set
y : numpy.ndarray
The target of the training set
Xv : numpy.ndarray
The feature of the validation set
yv : numpy.ndarray
The target of the validation set
m_type : str
The type for calculating error (default = 'regression')
Returns
-------
errors : list
A list containing train (on X, y) and validation (on Xv, yv) errors
Examples
--------
>>> iris = datasets.load_iris(return_X_y = True)
>>> knn_c = KNeighborsClassifier()
>>> knn_r = KNeighborsRegressor()
>>> X = iris[0][1:100]
>>> y =iris[1][1:100]
>>> Xv = iris[0][100:]
>>> yv = iris[1][100:]
>>> fit_and_report(knn_r, X,y, Xv,yv, 'regression')
[0.0, 1.0]
"""
if not isinstance(m_type, str):
raise TypeError('Input should be a string')
if "sklearn" not in str(type(model)):
raise TypeError('model should be from sklearn package')
if "numpy.ndarray" not in str(type(X)):
raise TypeError('Input X should be a numpy array')
if "numpy.ndarray" not in str(type(y)):
raise TypeError('Input y should be a numpy array')
if "numpy.ndarray" not in str(type(Xv)):
raise TypeError('Input Xv should be a numpy array')
if "numpy.ndarray" not in str(type(yv)):
raise TypeError('Input yv should be a numpy array')
model.fit(X, y)
if m_type.lower().startswith('regress'):
errors = [
mean_squared_error(
y, model.predict(X)), mean_squared_error(
yv, model.predict(Xv))]
if m_type.lower().startswith('classif'):
errors = [1 - model.score(X, y), 1 - model.score(Xv, yv)]
return errors
def ForSelect(
model,
data_feature,
data_label,
max_features=None,
problem_type='regression',
cv=3):
"""
Implementation of forward selection algorithm.
Search and score with mean cross validation score
using feature candidates and
add features with the best score each step.
Uses mean squared error for regression,
accuracy for classification problem.
Parameters
--------
model : object
sklearn model object
data_feature : object
pandas DataFrame object (features/predictors)
data_label : object
pandas Series object (labels)
max_features : integer
number of maximum features to select
problem_type : string
problem type {"classification", "regression"}
cv : integer
k for k-fold-cross-validation
Returns
--------
list
a list of selected column/feature names
Example
--------
>>> rf = RandomForestClassifier()
>>> iris = datasets.load_iris(return_X_y = True)
>>> X_train = pd.DataFrame(iris[0][1:100])
>>> y_train = pd.Series(iris[1][1:100])
>>> ForSelect(rf,
data_feature=X_train,
data_label=y_train,
max_features=5,
problem_type="classification",
cv=2)
"""
# Test Input Types
if "sklearn" not in str(type(model)):
raise TypeError("Your Model should be sklearn model")
if (not isinstance(max_features, int)) and (max_features is not None):
raise TypeError("Your max number of features should be an integer")
if not isinstance(cv, int):
raise TypeError("Your cross validation number should be an integer")
if not isinstance(data_feature, pd.DataFrame):
raise TypeError("Your data_feature must be a pd.DataFrame object")
if not isinstance(data_label, pd.Series):
raise TypeError("Your data_label must be a pd.Series object")
if problem_type not in ["classification", "regression"]:
raise ValueError(
"Your problem should be 'classification' or 'regression'")
if data_feature.shape[0] != data_label.shape[0]:
raise IndexError(
"Number of rows are different in training feature and label")
# Create Empty Feature list
ftr_ = []
# Define maximum amount of features
if max_features is None:
max_features = data_feature.shape[1]
# total list of features
total_ftr = list(range(0, data_feature.shape[1]))
# define scoring
if problem_type == "regression":
scoring = 'neg_mean_squared_error'
else:
scoring = 'accuracy'
# initialize error score
best_score = -np.inf
i = 0
while len(ftr_) < max_features:
# remove already selected features
features_unselected = list(set(total_ftr) - set(ftr_))
# Initialize potential candidate feature to select
candidate = None
# Iterate
for feature in features_unselected:
ftr_candidate = ftr_ + [feature]
eval_score = np.mean(
cross_val_score(
model,
data_feature[ftr_candidate],
data_label,
cv=cv,
scoring=scoring))
# If computed error score is better than our current best score
if eval_score > best_score:
best_score = eval_score # Overwrite the best_score
candidate = feature # Consider the feature as candidate
# Add the selected feature
if candidate is not None:
ftr_.append(candidate)
# Report Progress
i = i + 1
else:
# End process
break
# End Process
print("Final selected features: {}".format(ftr_))
return ftr_
def feature_splitter(data):
"""
Splits dataset column names into a tuple of categorical and numerical lists
Parameters
----------
x : DateFrame
Returns
-------
tuple
tuple of two lists
Example
-------
>>> df = {'Name': ['John', 'Micheal', 'Lindsey', 'Adam'],
'Age': [40, 22, 39, 15],
'Height(m)': [1.70, 1.82, 1.77, 1.69],
'Anual Salary(USD)': [40000, 65000, 70000, 15000],
'Nationality': ['Canada', 'USA', 'Britain', 'Australia'],
'Marital Status': ['Married', 'Single', 'Maried', 'Single']}
>>> df = pd.DataFrame(df)
>>> feature_splitter(df)
(['Age', 'Height(m)', 'Anual Salary(USD)'],
['Name', 'Nationality', 'Marital Status'])
"""
# Identify the categorical and numeric columns
assert data.shape[1] > 1 and data.shape[0] > 1, "Your data file in not valid, dataframe should have at least\
one column and one row"
if not isinstance(data, pd.DataFrame):
raise Exception('the input data should be a data frame')
d_types = data.dtypes
categorical = []
numerical = []
for data_type, features in zip(d_types, d_types.index):
if data_type == "object":
categorical.append(features)
else:
numerical.append(features)
assert len(numerical) + \
len(categorical) == data.shape[1], "categorical and numerical variable list must match\
df shape"
numerical = pd.DataFrame(numerical, columns=['Numerical'])
categorical = | pd.DataFrame(categorical, columns=['Categorical']) | pandas.DataFrame |
import glob
import json
import numpy as np
import pandas as pd
INDEX = ['Name', 'display_name', 'genre', 'title', 'ruler', 'date', 'provenience', 'line_count', 'word_count',
"Personal_Names", "aproximate_breaks", "number_of_dates", "number_of_eponyms", "places_of_dates_in_text", 'Relative_Path']
TEXTUS = pd.DataFrame(columns=INDEX, index=["Name"])
EXCEL_PATH = "data/Finished/PreWork/textdata.xlsx"
CSV_PATH = "data/Finished/PreWork/textdata.csv"
def set_project(project_list: list = ["rinap", "saao"], *project: str):
global METADATA, FILE_LIST, FILE_DICT
if project_list is None:
for name in project:
for gl in [*glob.glob(f"jsons_unzipped/{name}/catal*.json"),
*glob.glob(f"jsons_unzipped/{name}/*/catal*.json")]:
with open(gl, encoding="utf_8") as file:
METADATA.update(json.load(file)["members"])
FILE_LIST.append(glob.glob(f"jsons_unzipped/{name}/*/corpusjson/*.json", recursive=True))
elif len(project_list):
for name in project_list:
for gl in [*glob.glob(f"jsons_unzipped/{name}/catal*.json"),
*glob.glob(f"jsons_unzipped/{name}/*/catal*.json")]:
with open(gl, encoding="utf_8") as file:
METADATA.update(json.load(file)["members"])
FILE_LIST += glob.glob(f"jsons_unzipped/{name}/*/corpusjson/*.json", recursive=True)
else:
project_list += list(project)
for name in project_list:
for gl in [*glob.glob(f"jsons_unzipped/{name}/catal*.json"),
*glob.glob(f"jsons_unzipped/{name}/*/catal*.json")]:
with open(gl, encoding="utf_8") as file:
METADATA.update(json.load(file)["members"])
FILE_LIST += glob.glob(f"jsons_unzipped/{name}/*/corpusjson/*.json", recursive=True)
FILE_DICT = {filus[filus.rfind("\\")+1:filus.find(".json")]: filus for filus in FILE_LIST}
METADATA = {}
FILE_LIST = []
FILE_DICT = {}
def __from_json(data):
'''HELPER: reads the json data, if the __From_JSON didn't work very well
recursive function
:param data: the data from the call. can be a list or a dictionary, and this will try to extract from it
:type data: dict,list
:return: a __from_json(data) value, if it has 'cdl' inside the dictionary, or in the dictionaries in the list.
if it has no 'cdl' value, it returns a list of the words from the json
:rtype: dict, list
'''
if isinstance(data, list):
for pos in data:
if "cdl" in data:
return __from_json(pos)
return data
elif isinstance(data, dict):
return __from_json(data["cdl"])
def __From_JSON(file):
'''HELPER: reads the json data as a dictionary and tries to return the list of elements
:param file: json file that was open
:type file: dict
:return: list of the elements in the json, if it works, otherwise, tries __from_json
:rtype: list, __from_json
'''
try:
return file['cdl'][0]['cdl'][-1]['cdl'][0]['cdl']
except KeyError:
return __from_json(file)
def _get_data(file):
'''HELPER: reads the json data and return its word list
:param file: a json file path to read
:type file: str
:return: the list of elements from the json file
:rtype: list
'''
try:
with open(file, "r", encoding="utf_8") as file:
Json = __From_JSON(json.load(file))
return Json
except json.JSONDecodeError:
pass
def read_text(file: str):
'''reads the file list, and count the number of lines and words
:param file: the json file path
:type file: str
:return: counter of the words and the lines in the file
:rtype: list
'''
counter = [0, 0, ""]
try:
for j in _get_data(file):
if "label" in j:
counter[0] += 1
counter[2] = j["label"]
else:
counter[1] += 1
except TypeError:
pass
finally:
return counter
def count_personal_names(word: dict) -> int:
'''count the personal names in the file
:param word: a dictionary that might be personal name
:type word: dict
:return: 0 if the word is not personal name, and 1 if it is
:rtype: int
'''
try:
return 1 if word.get("f") and word["f"].get("pos") and word["f"]["pos"] == "PN" else 0
except TypeError:
pass
def aproximate_breaks(word: dict) -> int:
'''this function counts the APROXIMATE breaks in the texts. It does not concider the lenght of the break, size and shape, only if there is any
:param word: a file to check
:type file: str
:return: count of the breaks
:rtype: int
'''
try:
return 1 if word.get("f") and word["f"].get("gdl") and word["f"]["gdl"][0].get("break") else 0
except TypeError:
pass
def __checkable_date_word(word: dict) -> bool:
'''HELPER:__checkable_date_word checks if the word is checkable for get date
:param word: a word to check if all the parameters in the dictionary are included
:type word: dict
:return: true if everything is fine for checking, false otherwise
:rtype: bool
'''
return word.get("f") and word["f"].get("gw") and word["f"].get("pos")
def _detect_date(word: dict) -> int:
'''HELPER: _detect_date detects if there is a date paricle in the text. if there is, it returns what date particle it is.
:param word: a word dictionary to check
:type word: dict
:return: index of date particle, in the tuple (day, month, year), if it is a date, else, it returns false
:rtype: int
'''
if __checkable_date_word(word):
if word["f"]["gw"] == "day":
return 0
elif word["f"]["pos"] == "MN":
return 1
elif "eponym" in word['f']['gw']:
return 2
return False
def count_date_info(word: dict) -> np.array:
'''count_date_info counts the dates in the text
:param word: dict to check
:type word: dict
:return: the number of dates in the text in the array format of (day,month,year) as np.array
:rtype: np.array
'''
counter_date = np.array([0, 0, 0])
try:
if word.get("f") and word["f"].get("gw") and word["f"].get("pos") and _detect_date(word):
date_part = _detect_date(word)
counter_date[date_part] += 1 if date_part else 0
return counter_date
except TypeError:
pass
def place_of_dates(word: dict, line: str, last_line: str) -> str:
'''place_of_dates checks where the dates appear in the text
:param word: word to check if it is date
:type word: dict
:param line: line in the text
:type line: str
:param last_line: last line of the text
:type last_line: str
:return: place of the date in the line if it is not at the end, -1 if it is in the end
:rtype: str
'''
'''place_of_dates
:param file: file to check
:type file: str
:return: list of places of dates in the text. if there is a date in the last line of the text, it will be converted to -1
:rtype: list
'''
try:
if word.get("f") and word["f"].get("gw") and word["f"].get("pos") and _detect_date(word):
return line if line != last_line else -1
except TypeError:
pass
def count_eponyms(word: dict) -> int:
'''count_eponyms counts the appearnce of the eponyms in the text, except in līmu lists, which are eponym lists
:param word: word to check the number of eponyms
:type word: dict
:return: count of the eponyms mention in the text
:rtype: int
'''
return 1 if _detect_date(word) == 2 else 0
def _ratio(var1: int, var2: int) -> float:
'''ratio calculates the ratio between the two varibales
:param var1: the first variable
:type var1: int
:param var2: secound variable
:type var2: int
:return: the ratio if var2 != 0, 0 otherwise
:rtype: float
'''
return float(var1/var2) if var2 != 0 else 0
def main_data_loop(file: str) -> dict:
'''main_data_loop is the main loop that runs over the file, and categorize and manipulatize it, for editing and entering to the data table
:param file: path of the file for manipulize
:type file: str
:return: a dicitionary that contains all the data
:rtype: dict
'''
text_counter = read_text(file)
line_count = text_counter[0]
word_count = text_counter[1]
places_of_dates_in_text = []
eponyms = 0
number_of_dates = np.array([0, 0, 0])
Aproximate_breaks = 0
personal_names = 0
last_line = text_counter[2]
current_line = ""
data = _get_data(file) if _get_data(file) else []
for word in data:
if word.get("label"):
current_line = word["label"]
personal_names += count_personal_names(word)
Aproximate_breaks += aproximate_breaks(word)
if current_line and place_of_dates(word, current_line, last_line):
places_of_dates_in_text.append(place_of_dates(word, current_line, last_line))
if _detect_date(word):
number_of_dates += count_date_info(word)
eponyms += count_eponyms(word)
places_of_dates_in_text = places_of_dates_in_text if places_of_dates_in_text else None
return {"line_count": line_count, "word_count": word_count, "places_of_dates_in_text": places_of_dates_in_text,
"number_of_eponyms": eponyms, "number_of_dates": max(number_of_dates), "aproximate_breaks": Aproximate_breaks,
"Personal_Names": personal_names, 'Word_ratio': _ratio(personal_names, word_count), 'Line_ratio': _ratio(personal_names, line_count),
'Break_ratio': _ratio(personal_names, Aproximate_breaks), 'Fragmentary': _ratio(word_count, Aproximate_breaks)}
def dict_a_file(Key: str):
'''the function gets an entry from the user or the function: a key from the files.
the function adds the data to a temporal dict to fulfill the entry with any metadata and calculations of the words, lines,
personal names, and the ratio between the PN and the lines, and the PN and the words. finally the function will add the relative path
of the file to the dictionary, and will call to add the temporal_dictionary to the DataFrame TEXTUS.
if the key is not in the catalogue, it will skip the metadata import
:param Key: the name of the text
:type Key: str
'''
global METADATA, FILE_DICT, INDEX
data_dict = METADATA[Key] if METADATA.get(Key) and METADATA and FILE_DICT else None
temporal_dictionary = {ind: data_dict[ind] for ind in INDEX if ind in data_dict} if data_dict is not None else {}
temporal_dictionary.update(main_data_loop(FILE_DICT[Key]))
temporal_dictionary.update({"Name": Key})
temporal_dictionary['Relative_Path'] = FILE_DICT[Key].split("ancient-text-processing")[-1][1:].replace(
"\\", "/") if "ancient-text-processing" in FILE_DICT[Key] else FILE_DICT[Key].replace("\\", "/")
append_textus(temporal_dictionary)
def dict_file():
'''This function creates a loop that going throu all of the keys of the FILE_DICT, and calls "dict_a_file" function
'''
for filename in FILE_DICT:
dict_a_file(filename)
def dict_a_metadata(data: str):
'''the function gets an entry from the user or the function: a key from the metadata, which is NOT in the files.
the function adds the data to fulfill the entry with the metadata. finally the function adds the key name to the temporal_dictionary
and call the append_textus function to add it to the TEXTUS DataFrame
:param data: a key that in the METADATA dictionary
:type data: str
'''
global METADATA, INDEX, FILE_DICT
if data not in FILE_DICT:
metadata = METADATA[data]
temporal_dictionary = {ind: metadata[ind] for ind in INDEX if ind in metadata}
temporal_dictionary["Name"] = data
append_textus(temporal_dictionary)
def dict_metadata():
'''this function creates a loop that running throu all the keys of the METADATA dicionary and calls "dict_a_metadata" function
'''
global METADATA, INDEX, FILE_DICT
for data in METADATA:
dict_a_metadata(data)
def append_textus(dict_from_file: dict):
'''this function gets a dictionary, and appends it to the TEXTUS DataFrame
:param dict_from_file: a dictionary of the desired values to add to the DataFrame
:type dict_from_file: dict
'''
global TEXTUS
TEXTUS = TEXTUS.append(dict_from_file, True)
if __name__ == "__main__":
set_project()
dict_file()
dict_metadata()
path = TEXTUS['Relative_Path']
TEXTUS.drop('Relative_Path', 1, inplace=True)
TEXTUS.insert(TEXTUS.columns.size, 'Relative_Path', path)
with | pd.ExcelWriter(EXCEL_PATH) | pandas.ExcelWriter |
"""Daycount calculations, Schedule creation, and so on.
Only a small number of required daycount conventions are included here.
For full list, see the following:
https://developers.opengamma.com/quantitative-research/Interest-Rate-Instruments-and-Market-Conventions.pdf
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from pandas import Series
from datetime import date
def act365_fixed(start, end):
"""Compute accrual fraction using convention: Actual/365 Fixed"""
if isinstance(start, pd.DatetimeIndex):
start = Series(start)
if isinstance(end, pd.DatetimeIndex):
end = Series(end)
if isinstance(start, Series) or isinstance(end, Series):
return (end - start) / np.timedelta64(365, 'D')
else:
return (end - start).days / 365.0
def year_30360(dt):
"""Helper function for thirty360"""
return dt.year + dt.month / 12 + dt.day / 360
def thirty360(start, end):
"""Compute accrual fraction using convention: 30/360 Unadjusted
This version does not apply any End-Of-Month (EOM) rules.
==> It is rarely used in practice!
It is fast and simple, so valuable for development and testing.
"""
if isinstance(start, date):
start = year_30360(start)
else:
start = Series(start).map(year_30360)
if isinstance(end, date):
end = year_30360(end)
else:
end = Series(end).map(year_30360)
return end - start
daycount_conventions = {
'ACT365FIXED': act365_fixed,
'30360': thirty360,
}
def daycounter(name=None):
"""Function to compute accrual, given name from daycount_conventions"""
return daycount_conventions.get(name, thirty360)
# TODO - What is the following used for?
FREQ_TO_YEAR_FRAC = {
'D': 1/365,
'W': 1/52,
'WS': 1/52,
'M': 1/12,
'MS': 1/12,
'Q': 1/4,
'QS': 1/4,
'A': 1,
'AS': 1
}
def freq_to_frac(freq):
return FREQ_TO_YEAR_FRAC[freq]
def date_range(start, end, freq):
"""
Generate range of dates.
Parameters
----------
start: str, date, datetime
start date of range (exclusive)
end: str, date, datetime
end date of range(inclusive)
freq: str
D, W, M, Q, A for end (WS, MS, QS, AS for start)
If None, return DatetimeIndex with end.
Returns
-------
`DatetimeIndex`
"""
if freq is None:
return pd.DatetimeIndex([end])
if isinstance(end, str):
end = pd.to_datetime(end)
return pd.date_range(start, end, freq=freq)
def to_offset(s):
"""Pass in a string to get a date offset.
Arguments
---------
s: str
offset string which has format "<int> <freq>" where
frequency can be one of "days", "months", or "years".
"""
amount, freq = s.split(' ')
kwargs = {freq: int(amount)}
return pd.tseries.offsets.DateOffset(**kwargs)
if __name__ == '__main__':
# TODO Move this into tests
today = | pd.to_datetime('today') | pandas.to_datetime |
import ast
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score, accuracy_score
df = | pd.read_csv('sanitycheck/perfect_prune/result.csv') | pandas.read_csv |
"""
Once the CSV files of source_ids, ages, and references are assembled,
concatenate and merge them.
Date: May 2021.
Background: Created for v0.5 target catalog merge, to simplify life.
Contents:
AGE_LOOKUP: manual lookupdictionary of common cluster ages.
get_target_catalog
assemble_initial_source_list
verify_target_catalog
"""
import numpy as np, pandas as pd
import os
from glob import glob
from cdips.utils.gaiaqueries import (
given_source_ids_get_gaia_data, given_votable_get_df
)
from cdips.paths import DATADIR, LOCALDIR
clusterdatadir = os.path.join(DATADIR, 'cluster_data')
localdir = LOCALDIR
agefmt = lambda x: np.round(np.log10(x),2)
AGE_LOOKUP = {
# Rizzuto+17 clusters. Hyades&Praesepe age from Brandt and Huang 2015,
# following Rebull+17's logic.
'Hyades': agefmt(8e8),
'Praesepe': agefmt(8e8),
'Pleiades': agefmt(1.25e8),
'PLE': agefmt(1.25e8),
'Upper Sco': agefmt(1.1e7),
'Upper Scorpius': agefmt(1.1e7),
'Upper Sco Lit.': agefmt(1.1e7),
# Furnkranz+19
'ComaBer': agefmt(4e8),
'ComaBerNeighborGroup': agefmt(7e8),
# EsplinLuhman, and other
'Taurus': agefmt(5e6),
'TAU': agefmt(5e6),
# assorted
'PscEri': agefmt(1.25e8),
'LCC': agefmt(1.1e7),
'ScoOB2': agefmt(1.1e7),
'ScoOB2_PMS': agefmt(1.1e7),
'ScoOB2_UMS': agefmt(1.1e7),
# Meingast 2021
'Blanco 1': agefmt(140e6),
'IC 2391': agefmt(36e6),
'IC 2602': agefmt(40e6),
'Melotte 20': agefmt(87e6),
'Melotte 22': agefmt(125e6),
'NGC 2451A': agefmt(44e6),
'NGC 2516': agefmt(170e6),
'NGC 2547': agefmt(30e6),
'NGC 7092': agefmt(310e6),
'Platais 9': agefmt(100e6),
# Gagne2018 moving groups
'118TAU': agefmt(10e6),
'ABDMG': agefmt(149e6),
'CAR': agefmt(45e6),
'CARN': agefmt(200e6),
'CBER': agefmt(400e6),
'COL': agefmt(42e6),
'EPSC': agefmt(4e6),
'ETAC': agefmt(8e6),
'HYA': agefmt(750e6),
'IC2391': agefmt(50e6),
'IC2602': agefmt(40e6),
'LCC': agefmt(15e6),
'OCT': agefmt(35e6),
'PL8': agefmt(60e6),
'ROPH': agefmt(2e6),
'THA': agefmt(45e6),
'THOR': agefmt(22e6),
'Tuc-Hor': agefmt(22e6),
'TWA': agefmt(10e6),
'UCL': agefmt(16e6),
'CRA': agefmt(10e6),
'UCRA': agefmt(10e6),
'UMA': agefmt(414e6),
'USCO': agefmt(10e6),
'XFOR': agefmt(500e6),
'{beta}PMG': agefmt(24e6),
'BPMG': agefmt(24e6),
# CantatGaudin2019 vela
'cg19velaOB2_pop1': agefmt(46e6),
'cg19velaOB2_pop2': agefmt(44e6),
'cg19velaOB2_pop3': agefmt(40e6),
'cg19velaOB2_pop4': agefmt(35e6),
'cg19velaOB2_pop5': agefmt(25e6),
'cg19velaOB2_pop6': agefmt(20e6),
'cg19velaOB2_pop7': agefmt(11e6),
}
def assemble_initial_source_list(catalog_vnum):
"""
Given LIST_OF_LISTS_STARTER_v0.5.csv , exported from
/doc/list_of_cluster_member_lists.ods, clean and concatenate the cluster
members. Flatten the resulting list on source_ids, joining the cluster,
age, and bibcode columns into comma-separated strings.
"""
metadf = pd.read_csv(
os.path.join(clusterdatadir, 'LIST_OF_LISTS_STARTER_V0.6.csv')
)
metadf['bibcode'] = metadf.ads_link.str.extract("abs\/(.*)\/")
N_stars_in_lists = []
Nstars_with_age_in_lists = []
dfs = []
# for each table, concatenate into a dataframe of source_id, cluster,
# log10age ("age").
for ix, r in metadf.iterrows():
print(79*'-')
print(f'Beginning {r.reference_id}...')
csvpath = os.path.join(clusterdatadir, r.csv_path)
assert os.path.exists(csvpath)
df = pd.read_csv(csvpath)
df['reference_id'] = r.reference_id
df['reference_bibcode'] = r.bibcode
if 'HATSandHATNcandidates' in r.reference_id:
df['reference_bibcode'] = 'JoelHartmanPrivComm'
colnames = df.columns
#
# every CSV file needs a Gaia DR2 "source_id" column
#
if "source" in colnames:
df = df.rename(
columns={"source":"source_id"}
)
#
# every CSV file needs a "cluster name" name column
#
if "assoc" in colnames:
df = df.rename(
columns={"assoc":"cluster"} # moving groups
)
colnames = df.columns
if "cluster" not in colnames:
msg = (
f'WRN! for {r.reference_id} did not find "cluster" column. '+
f'Appending the reference_id ({r.reference_id}) as the cluster ID.'
)
print(msg)
df['cluster'] = r.reference_id
#
# every CSV file needs an "age" column, which can be null, but
# preferably is populated.
#
if "age" not in colnames:
if r.reference_id in [
'CantatGaudin2018a', 'CantatGaudin2020a', 'CastroGinard2020',
'GaiaCollaboration2018lt250', 'GaiaCollaboration2018gt250'
]:
# get clusters and ages from CG20b; use them as the reference
cg20bpath = os.path.join(
clusterdatadir,
"v05/CantatGaudin20b_cut_cluster_source_age.csv"
)
df_cg20b = pd.read_csv(cg20bpath)
cdf_cg20b = df_cg20b.drop_duplicates(subset=['cluster','age'])[
['cluster', 'age']
]
# cleaning steps
if r.reference_id == 'CastroGinard2020':
df['cluster'] = df.cluster.str.replace('UBC', 'UBC_')
elif r.reference_id in [
'GaiaCollaboration2018lt250',
'GaiaCollaboration2018gt250'
]:
df['cluster'] = df.cluster.str.replace('NGC0', 'NGC_')
df['cluster'] = df.cluster.str.replace('NGC', 'NGC_')
df['cluster'] = df.cluster.str.replace('IC', 'IC_')
df['cluster'] = df.cluster.str.replace('Stock', 'Stock_')
df['cluster'] = df.cluster.str.replace('Coll', 'Collinder_')
df['cluster'] = df.cluster.str.replace('Trump02', 'Trumpler_2')
df['cluster'] = df.cluster.str.replace('Trump', 'Trumpler_')
_df = df.merge(cdf_cg20b, how='left', on=['cluster'])
assert len(_df) == len(df)
df['age'] = _df['age']
print(
f'For {r.reference_id} got {len(df[~pd.isnull(df.age)])}/{len(df)} finite ages via CantatGaudin2020b crossmatch on cluster ID.'
)
del _df
elif (
('Zari2018' in r.reference_id)
or
('Oh2017' in r.reference_id)
or
('Ujjwal2020' in r.reference_id)
or
('CottenSong' in r.reference_id)
or
('HATSandHATNcandidates' in r.reference_id)
or
('SIMBAD' in r.reference_id)
or
('Gagne2018' in r.reference_id)
):
age = np.ones(len(df))*np.nan
df['age'] = age
else:
age_mapper = lambda k: AGE_LOOKUP[k]
age = df.cluster.apply(age_mapper)
df['age'] = age
N_stars_in_lists.append(len(df))
Nstars_with_age_in_lists.append(len(df[~pd.isnull(df.age)]))
dfs.append(df)
assert (
'source_id' in df.columns
and
'cluster' in df.columns
and
'age' in df.columns
)
metadf["Nstars"] = N_stars_in_lists
metadf["Nstars_with_age"] = Nstars_with_age_in_lists
# concatenation.
nomagcut_df = pd.concat(dfs)
assert np.sum(metadf.Nstars) == len(nomagcut_df)
# clean ages
sel = (nomagcut_df.age == -np.inf)
nomagcut_df.loc[sel,'age'] = np.nan
nomagcut_df['age'] = np.round(nomagcut_df.age,2)
#
# merge duplicates, and ','-join the cluster id strings, age values
#
scols = ['source_id', 'cluster', 'age', 'reference_id', 'reference_bibcode']
nomagcut_df = nomagcut_df[scols].sort_values(by='source_id')
for c in nomagcut_df.columns:
nomagcut_df[c] = nomagcut_df[c].astype(str)
print(79*'-')
print('Beginning aggregation (takes ~2-3 minutes for v0.5)...')
_ = nomagcut_df.groupby('source_id')
df_agg = _.agg({
"cluster": list,
"age": list,
"reference_id": list,
"reference_bibcode": list
})
u_sourceids = np.unique(nomagcut_df.source_id)
N_sourceids = len(u_sourceids)
assert len(df_agg) == N_sourceids
df_agg["source_id"] = df_agg.index
# turn the lists to comma separated strings.
outdf = pd.DataFrame({
"source_id": df_agg.source_id,
"cluster": [','.join(map(str, l)) for l in df_agg['cluster']],
"age": [','.join(map(str, l)) for l in df_agg['age']],
"mean_age": [np.round(np.nanmean(np.array(l).astype(float)),2) for l in df_agg['age']],
"reference_id": [','.join(map(str, l)) for l in df_agg['reference_id']],
"reference_bibcode": [','.join(map(str, l)) for l in df_agg['reference_bibcode']],
})
outpath = os.path.join(
clusterdatadir, f'list_of_lists_keys_paths_assembled_v{catalog_vnum}.csv'
)
metadf.to_csv(outpath, index=False)
print(f'Made {outpath}')
outpath = os.path.join(
clusterdatadir, f'cdips_targets_v{catalog_vnum}_nomagcut.csv'
)
outdf.to_csv(outpath, index=False)
print(f'Made {outpath}')
def verify_target_catalog(df, metadf):
"""
Check that each entry in the (pre magnitude cut) target catalog has
a source_id that matches the original catalog. (i.e., ensure that no
int/int64/str lossy conversion bugs have happened).
"""
print(79*'-')
print('Beginning verification...')
print(79*'-')
for ix, r in metadf.sort_values('Nstars').iterrows():
print(f'{r.reference_id} (Nstars={r.Nstars})...')
sel = df.reference_id.str.contains(r.reference_id)
df_source_ids = np.array(df.loc[sel, 'source_id']).astype(np.int64)
csvpath = os.path.join(clusterdatadir, r.csv_path)
df_true = pd.read_csv(csvpath)
if 'source_id' not in df_true.columns:
df_true = df_true.rename(columns={"source":"source_id"})
true_source_ids = (
np.unique(np.array(df_true.source_id).astype(np.int64))
)
np.testing.assert_array_equal(
np.sort(df_source_ids), np.sort(true_source_ids)
)
print('Verified that the pre-mag cut target catalog has source_ids that '
'correctly match the original. ')
print(79*'-')
def verify_gaia_xmatch(df, gdf, metadf):
"""
Check that each entry in the target catalog has a Gaia xmatch source_id
that matches the original catalog. For any that do not, understand why not.
"""
print(79*'-')
print('Beginning Gaia xmatch verification...')
print(79*'-')
gdf_source_ids = np.unique(np.array(gdf.source_id).astype(np.int64))
for ix, r in metadf.sort_values('Nstars').iterrows():
print(f'{r.reference_id} (Nstars={r.Nstars})...')
sel = df.reference_id.str.contains(r.reference_id)
df_source_ids = np.array(df.loc[sel, 'source_id']).astype(np.int64)
int1d = np.intersect1d(df_source_ids, gdf_source_ids)
if not len(int1d) == len(df_source_ids):
msg = f'\tWRN! {r.reference_id} only got {len(int1d)} Gaia xmatches.'
print(msg)
if 'NASAExoArchive' in r.reference_id:
csvpath = os.path.join(clusterdatadir, r.csv_path)
df_true = pd.read_csv(csvpath)
missing = df_source_ids[
~np.in1d(df_source_ids, gdf_source_ids)
]
# NOTE: should not be raised.
print('Verified that the pre-mag cut target catalog has source_ids that '
'match the original (or close enough). ')
print(79*'-')
def get_target_catalog(catalog_vnum, VERIFY=1):
"""
1. Assemble the target catalog (down to arbitrary brightness; i.e, just
clean and concatenate).
2. Manually async query the Gaia database based on those source_ids.
3. Verify the result, and merge and write it.
"""
csvpath = os.path.join(
clusterdatadir, f'cdips_targets_v{catalog_vnum}_nomagcut.csv'
)
if not os.path.exists(csvpath):
assemble_initial_source_list(catalog_vnum)
df = pd.read_csv(csvpath)
# made by assemble_initial_source_list above.
metapath = os.path.join(
clusterdatadir, f'list_of_lists_keys_paths_assembled_v{catalog_vnum}.csv'
)
metadf = pd.read_csv(metapath)
if VERIFY:
# one-time verification
verify_target_catalog(df, metadf)
# e.g., cdips_v05_1-result.vot.gz
votablepath = os.path.join(
clusterdatadir, f'cdips_v{str(catalog_vnum).replace(".","")}_1-result.vot.gz'
)
if not os.path.exists(votablepath):
temppath = os.path.join(clusterdatadir, f'v{str(catalog_vnum).replace(".","")}_sourceids.csv')
print(f'Wrote {temppath}')
df['source_id'].to_csv(
temppath,
index=False
)
querystr = (
"SELECT top 2000000 g.source_id, g.ra, g.dec, g.parallax, "+
"g.parallax_error, g.pmra, g.pmdec, g.phot_g_mean_mag, "+
"g.phot_rp_mean_mag, g.phot_bp_mean_mag FROM "+
f"user_lbouma.v{str(catalog_vnum).replace('.','')}_sourceids as u, gaiadr2.gaia_source AS g WHERE "+
"u.source_id=g.source_id "
)
print('Now you must go to https://gea.esac.esa.int/archive/, login, and run')
print(querystr)
assert 0
# # NOTE: the naive implementation below doesn't work, probably because of a
# # sync/async issue. given_source_ids_get_gaia_data now raises an
# # error # if n_max exceeds 5e4, because the ~70k items that WERE
# # returned are duds.
# cols = (
# 'g.source_id, g.ra, g.dec, g.parallax, g.parallax_error, g.pmra, '
# 'g.pmdec, g.phot_g_mean_mag, g.phot_rp_mean_mag, g.phot_bp_mean_mag'
# )
# gdf = given_source_ids_get_gaia_data(
# np.array(df.source_id.astype(np.int64)),
# f'cdips_targets_v{catalog_vnum}',
# n_max=int(2e6), overwrite=False,
# enforce_all_sourceids_viable=True, whichcolumns=cols,
# gaia_datarelease='gaiadr2'
# )
gdf = given_votable_get_df(votablepath, assert_equal='source_id')
if not len(gdf) == len(df):
print(79*"*")
print('WRN!')
print(f'Expected {len(df)} matches in Gaia DR2')
print(f'Got {len(gdf)} matches in Gaia DR2')
print(79*"*")
verify_gaia_xmatch(df, gdf, metadf)
# every queried source_id should have a result. the two that do not are
# EsplinLuhman2019, 377 matches to 443 stars, and Gagne2018c, 914 matches
# to 916 stars. this is 60 missing stars out of 1.5 million. we'll be okay.
# so, do the merge using the GAIA xmatch results as the base.
mdf = gdf.merge(df, on='source_id', how='left')
#
# update metadf with new info.
#
N_stars_in_lists = []
Nstars_with_age_in_lists = []
N_sel0 = []
N_sel1 = []
N_sel2 = []
for ix, r in metadf.iterrows():
csvpath = os.path.join(clusterdatadir, r.csv_path)
assert os.path.exists(csvpath)
_df = pd.read_csv(csvpath)
if 'source_id' not in _df.columns:
_df = _df.rename(columns={"source":"source_id"})
_sel = mdf.source_id.isin(_df.source_id)
N_stars_in_lists.append(len(mdf[_sel]))
_selage = (~ | pd.isnull(mdf.age) | pandas.isnull |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.