prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import numpy as np
from fredapi import Fred
api_key = '...'
fred = Fred(api_key=api_key)
ticker_tcoes_Rdata=pd.read_excel(...)
ticker=ticker_tcoes_Rdata.iloc[:,0] #data tickers
tcodes=ticker_tcoes_Rdata.iloc[:,1] #transform codes
raw_data.to_excel('/home/emre/Masaüstü/Replication of Fred-MD Python/raw_data.xlsx')
#HWI Help-Wanted Index for United States
#HWIURATIO Ratio of Help Wanted/No. Unemployed
#S&P: indust S&P’s Common Stock Price Index: Industrial
#S&P div yield S&P’s Composite Common Stock: Dividend Yield
#S&P PE ratio S&P’s Composite Common Stock: Price-Earnings Ratio
#CONSPI Nonrevolving consumer credit to Personal Income
'''
mising_ticker=[]
data={}
for i in range(len(ticker)):
try:
dum1=fred.get_series(ticker.loc[i],observation_start='1959-01-01',observation_END='2020-01-01',frequency='m')
data[ticker[i]]=dum1.copy()
except:
print(ticker[i])
mising_ticker.append(ticker[i])
raw_data=pd.DataFrame(data)
raw_data.to_excel('/home/emre/Masaüstü/Replication of Fred-MD Python/raw_data.xlsx')
###############################################################################################################################
'''
#% -------------------------------------------------------------------------
#% INPUT:
#% rawdata = raw data
#% tcode = transformation codes for each series
#%
#% OUTPUT:
#% yt = transformed data
#%
#% -------------------------------------------------------------------------
#% SUBFUNCTION:
#% transxf: transforms a single series as specified by a
#% given transfromation code
#%
#% =========================================================================
#%
###############################################################################################################################33
#APPLY TRANSFORMATION:
# Initialize output variable
yt=[] #Initialize output variable
N=raw_data.shape[1] #Number of series kept
for i in range(N):
dum=transxf(raw_data.iloc[:,i].values,tcodes[i])
yt.append(dum)
trans_data=pd.DataFrame(yt).T
trans_data.columns=raw_data.columns
trans_data.index = raw_data.index
trans_data.to_excel('/home/emre/Masaüstü/Replication of Fred-MD Python/trans_data.xlsx')
###############################################################################################################################33
def transxf(x,tcode) :
#=========================================================================
#DESCRIPTION:
#This function transforms a single series (in a column vector)as specified
#by a given transfromation code.
#
#-------------------------------------------------------------------------
#INPUT:
# x = series (in a column vector) to be transformed
# tcode = transformation code (1-7)
#
# OUTPUT:
# y = transformed series (as a column vector)
#
# =========================================================================
# SETUP:
# Number of observations (including missing values)
#Number of observations (including missing values)
n=x.size
#Value close to zero
small=1e-6
#Allocate output variable
y=np.nan*np.ones(n);
y1=np.nan*np.ones(n);
# global result
if tcode==1: # no transformation): x(t)
y=x
result=y
elif tcode==2: # First difference: x(t)-x(t-1)
y[2:n]=x[2:n]-x[1:n-1];
result= y
elif tcode==3: #Second difference: (x(t)-x(t-1))-(x(t-1)-x(t-2))
y[3:n]=x[3:n]-2*x[2:n-1]+x[1:n-2]
result=y
elif tcode==4: #Natural log: ln(x)
if min(x) < small:
y=np.nan
else :
y=np.log(x)
result=y
elif tcode==5: #First difference of natural log: ln(x)-ln(x-1)
if min(x[ | pd.notnull(x) | pandas.notnull |
##############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
################################################################################
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from numpy import inf
from sklearn.linear_model import LassoCV, RidgeCV
from sklearn.linear_model import Lasso, Ridge
from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pylab as plt
get_ipython().magic(u'matplotlib inline')
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 10, 6
from sklearn.metrics import classification_report, confusion_matrix
from functools import reduce
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from autoviml.QuickML_Stacking import QuickML_Stacking
from autoviml.Transform_KM_Features import Transform_KM_Features
from autoviml.QuickML_Ensembling import QuickML_Ensembling
from autoviml.Auto_NLP import Auto_NLP, select_top_features_from_SVD
import xgboost as xgb
import sys
##################################################################################
def find_rare_class(classes, verbose=0):
######### Print the % count of each class in a Target variable #####
"""
Works on Multi Class too. Prints class percentages count of target variable.
It returns the name of the Rare class (the one with the minimum class member count).
This can also be helpful in using it as pos_label in Binary and Multi Class problems.
"""
counts = OrderedDict(Counter(classes))
total = sum(counts.values())
if verbose >= 1:
print(' Class -> Counts -> Percent')
for cls in counts.keys():
print("%6s: % 7d -> % 5.1f%%" % (cls, counts[cls], counts[cls]/total*100))
if type(pd.Series(counts).idxmin())==str:
return pd.Series(counts).idxmin()
else:
return int(pd.Series(counts).idxmin())
###############################################################################
def return_factorized_dict(ls):
"""
###### Factorize any list of values in a data frame using this neat function
if your data has any NaN's it automatically marks it as -1 and returns that for NaN's
Returns a dictionary mapping previous values with new values.
"""
factos = pd.unique(pd.factorize(ls)[0])
categs = pd.unique(pd.factorize(ls)[1])
if -1 in factos:
categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)
return dict(zip(categs,factos))
#############################################################################################
from sklearn.metrics import confusion_matrix
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
#############################################################################################
import os
def check_if_GPU_exists():
GPU_exists = False
try:
from tensorflow.python.client import device_lib
dev_list = device_lib.list_local_devices()
print('Number of GPUs = %d' %len(dev_list))
for i in range(len(dev_list)):
if 'GPU' == dev_list[i].device_type:
GPU_exists = True
print('%s available' %dev_list[i].device_type)
except:
print('')
if not GPU_exists:
try:
os.environ['NVIDIA_VISIBLE_DEVICES']
print('GPU available on this device')
return True
except:
print('No GPU available on this device')
return False
else:
return True
#############################################################################################
def analyze_problem_type(train, targ,verbose=0):
"""
This module analyzes a Target Variable and finds out whether it is a
Regression or Classification type problem
"""
if train[targ].dtype != 'int64' and train[targ].dtype != float :
if train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
else:
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
elif train[targ].dtype == 'int64' or train[targ].dtype == float :
if len(train[targ].unique()) == 1:
print('Error in data set: Only one class in Target variable. Check input and try again')
sys.exit()
elif len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
elif train[targ].dtype == bool:
model_class = 'Binary_Classification'
elif train[targ].dtype == 'int64':
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else :
model_class = 'Regression'
return model_class
#######
def convert_train_test_cat_col_to_numeric(start_train, start_test, col,str_flag=True):
"""
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
"""
start_train = copy.deepcopy(start_train)
start_test = copy.deepcopy(start_test)
missing_flag = False
new_missing_col = ''
if start_train[col].isnull().sum() > 0:
missing_flag = True
if str_flag:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype(str)
else:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype('category')
if len(start_train[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Train data set %s column with %d data types. Fixing it...' %(
col, len(start_train[col].apply(type).value_counts())))
train_categs = start_train[col].value_counts().index.tolist()
else:
train_categs = np.unique(start_train[col]).tolist()
if not isinstance(start_test,str) :
if start_test[col].isnull().sum() > 0:
#### IN some rare cases, Test data has missing values while Train data doesn.t
#### This section is take care of those rare cases. We need to create a missing col
#### We need to create that missing flag column in both train and test in that case
if not missing_flag:
missing_flag = True
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
##### THis is to take care of Missing_Flag in start_test data set!!
start_test[new_missing_col] = 0
start_test.loc[start_test[col].isnull(),new_missing_col]=1
if str_flag:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype(str)
else:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype('category')
else:
#### In some rare cases, there is missing values in train but not in test data!
#### In those cases, we need to create a new_missing_col in test data in addition to train
start_test[new_missing_col] = 0
if len(start_test[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Test data set %s column with %d data types. Fixing it...' %(
col, len(start_test[col].apply(type).value_counts())))
test_categs = start_test[col].value_counts().index.tolist()
test_categs = [x if isinstance(x,str) else str(x) for x in test_categs]
start_test[col] = start_test[col].astype(str).values
else:
test_categs = np.unique(start_test[col]).tolist()
if not isinstance(start_test,str) :
categs_all = np.unique( train_categs + test_categs).tolist()
dict_all = return_factorized_dict(categs_all)
else:
dict_all = return_factorized_dict(train_categs)
start_train[col] = start_train[col].map(dict_all)
if not isinstance(start_test,str) :
start_test[col] = start_test[col].map(dict_all)
return start_train, start_test, missing_flag, new_missing_col
#############################################################################################################
def flatten_list(list_of_lists):
final_ls = []
for each_item in list_of_lists:
if isinstance(each_item,list):
final_ls += each_item
else:
final_ls.append(each_item)
return final_ls
#############################################################################################################
import scipy as sp
def Auto_ViML(train, target, test='',sample_submission='',hyper_param='RS', feature_reduction=True,
scoring_parameter='logloss', Boosting_Flag=None, KMeans_Featurizer=False,
Add_Poly=0, Stacking_Flag=False, Binning_Flag=False,
Imbalanced_Flag=False, verbose=0):
"""
#########################################################################################################
############# This is not an Officially Supported Google Product! #########################
#########################################################################################################
#### Automatically Build Variant Interpretable Machine Learning Models (Auto_ViML) ######
#### Developed by <NAME> ######
###### Version 0.1.652 #######
##### GPU UPGRADE!! Now with Auto_NLP. Best Version to Download or Upgrade. May 15,2020 ######
###### Auto_VIMAL with Auto_NLP combines structured data with NLP for Predictions. #######
#########################################################################################################
#Copyright 2019 Google LLC #######
# #######
#Licensed under the Apache License, Version 2.0 (the "License"); #######
#you may not use this file except in compliance with the License. #######
#You may obtain a copy of the License at #######
# #######
# https://www.apache.org/licenses/LICENSE-2.0 #######
# #######
#Unless required by applicable law or agreed to in writing, software #######
#distributed under the License is distributed on an "AS IS" BASIS, #######
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #######
#See the License for the specific language governing permissions and #######
#limitations under the License. #######
#########################################################################################################
#### Auto_ViML was designed for building a High Performance Interpretable Model With Fewest Vars. ###
#### The "V" in Auto_ViML stands for Variant because it tries Multiple Models and Multiple Features ###
#### to find the Best Performing Model for any data set.The "i" in Auto_ViML stands " Interpretable"###
#### since it selects the fewest Features to build a simpler, more interpretable model. This is key. ##
#### Auto_ViML is built mostly using Scikit-Learn, Numpy, Pandas and Matplotlib. Hence it should run ##
#### on any Python 2 or Python 3 Anaconda installations. You won't have to import any special ####
#### Libraries other than "SHAP" library for SHAP values which provides more interpretability. #####
#### But if you don't have it, Auto_ViML will skip it and show you the regular feature importances. ###
#########################################################################################################
#### INPUTS: ###
#########################################################################################################
#### train: could be a datapath+filename or a dataframe. It will detect which is which and load it.####
#### test: could be a datapath+filename or a dataframe. If you don't have any, just leave it as "". ###
#### submission: must be a datapath+filename. If you don't have any, just leave it as empty string.####
#### target: name of the target variable in the data set. ####
#### sep: if you have a spearator in the file such as "," or "\t" mention it here. Default is ",". ####
#### scoring_parameter: if you want your own scoring parameter such as "f1" give it here. If not, #####
#### it will assume the appropriate scoring param for the problem and it will build the model.#####
#### hyper_param: Tuning options are GridSearch ('GS'), RandomizedSearch ('RS')and now HyperOpt ('HO')#
#### Default setting is 'GS'. Auto_ViML with HyperOpt is approximately 3X Faster than Auto_ViML###
#### feature_reduction: Default = 'True' but it can be set to False if you don't want automatic ####
#### feature_reduction since in Image data sets like digits and MNIST, you get better #####
#### results when you don't reduce features automatically. You can always try both and see. #####
#### KMeans_Featurizer = True: Adds a cluster label to features based on KMeans. Use for Linear. #####
#### False (default) = For Random Forests or XGB models, leave it False since it may overfit.####
#### Boosting Flag: you have 3 possible choices (default is False): #####
#### None = This will build a Linear Model #####
#### False = This will build a Random Forest or Extra Trees model (also known as Bagging) #####
#### True = This will build an XGBoost model #####
#### Add_Poly: Default is 0. It has 2 additional settings: #####
#### 1 = Add interaction variables only such as x1*x2, x2*x3,...x9*10 etc. #####
#### 2 = Add Interactions and Squared variables such as x1**2, x2**2, etc. #####
#### Stacking_Flag: Default is False. If set to True, it will add an additional feature which #####
#### is derived from predictions of another model. This is used in some cases but may result#####
#### in overfitting. So be careful turning this flag "on". #####
#### Binning_Flag: Default is False. It set to True, it will convert the top numeric variables #####
#### into binned variables through a technique known as "Entropy" binning. This is very #####
#### helpful for certain datasets (especially hard to build models). #####
#### Imbalanced_Flag: Default is False. If set to True, it will downsample the "Majority Class" #####
#### in an imbalanced dataset and make the "Rare" class at least 5% of the data set. This #####
#### the ideal threshold in my mind to make a model learn. Do it for Highly Imbalanced data.#####
#### verbose: This has 3 possible states: #####
#### 0 = limited output. Great for running this silently and getting fast results. #####
#### 1 = more charts. Great for knowing how results were and making changes to flags in input. #####
#### 2 = lots of charts and output. Great for reproducing what Auto_ViML does on your own. #####
#########################################################################################################
#### OUTPUTS: #####
#########################################################################################################
#### model: It will return your trained model #####
#### features: the fewest number of features in your model to make it perform well #####
#### train_modified: this is the modified train dataframe after removing and adding features #####
#### test_modified: this is the modified test dataframe with the same transformations as train #####
################# A D D I T I O N A L N O T E S ###########
#### Finally, it writes your submission file to disk in the current directory called "mysubmission.csv"
#### This submission file is ready for you to show it clients or submit it to competitions. #####
#### If no submission file was given but as long as you give it a test file name, it will create #####
#### a submission file for you named "mySubmission.csv". #####
#### Auto_ViML works on any Multi-Class, Multi-Label Data Set. So you can have many target labels #####
#### You don't have to tell Auto_ViML whether it is a Regression or Classification problem. #####
#### Suggestions for a Scoring Metric: #####
#### If you have Binary Class and Multi-Class in a Single Label, Choose Accuracy. It will ######
#### do very well. If you want something better, try roc_auc even for Multi-Class which works. ######
#### You can try F1 or Weighted F1 if you want something complex or for Multi-Class. ######
#### Note that For Imbalanced Classes (<=5% classes), it automatically adds Class Weights. ######
#### Also, Note that it handles Multi-Label automatically so you can send Train data ######
#### with multiple Labels (Targets) and it will automatically predict for each Label. ######
#### Finally this is Meant to Be a Fast Algorithm, so use it for just quick POCs ######
#### This is Not Meant for Production Problems. It produces great models but it is not Perfect! ######
######################### HELP OTHERS! PLEASE CONTRIBUTE! OPEN A PULL REQUEST! ##########################
#########################################################################################################
"""
##### These copies are to make sure that the originals are not destroyed ####
CPU_count = os.cpu_count()
test = copy.deepcopy(test)
orig_train = copy.deepcopy(train)
orig_test = copy.deepcopy(test)
train_index = train.index
if not isinstance(test, str):
test_index = test.index
start_test = copy.deepcopy(orig_test)
####### These are Global Settings. If you change them here, it will ripple across the whole code ###
corr_limit = 0.70 #### This decides what the cut-off for defining highly correlated vars to remove is.
scaling = 'MinMax' ### This decides whether to use MinMax scaling or Standard Scaling ("Std").
first_flag = 0 ## This is just a setting to detect which is
seed= 99 ### this maintains repeatability of the whole ML pipeline here ###
subsample=0.7 #### Leave this low so the models generalize better. Increase it if you want overfit models
col_sub_sample = 0.7 ### Leave this low for the same reason above
poly_degree = 2 ### this create 2-degree polynomial variables in Add_Poly. Increase if you want more degrees
booster = 'gbtree' ### this is the booster for XGBoost. The other option is "Linear".
n_splits = 5 ### This controls the number of splits for Cross Validation. Increasing will take longer time.
matplotlib_flag = True #(default) This is for drawing SHAP values. If this is False, initJS is used.
early_stopping = 20 #### Early stopping rounds for XGBoost ######
encoded = '_Label_Encoded' ### This is the tag we add to feature names in the end to indicate they are label encoded
catboost_limit = 0.4 #### The catboost_limit represents the percentage of num vars in data. ANy lower, CatBoost is used.
cat_code_limit = 100 #### If the number of dummy variables to create in a data set exceeds this, CatBoost is the default Algorithm used
one_hot_size = 500 #### This determines the max length of one_hot_max_size parameter of CatBoost algrithm
Alpha_min = -3 #### The lowest value of Alpha in LOGSPACE that is used in CatBoost
Alpha_max = 2 #### The highest value of Alpha in LOGSPACE that is used in Lasso or Ridge Regression
Cs = [0.001,0.005,0.01,0.05,0.1,0.25,0.5,1,2,4,6,10,20,30,40,50,100,150,200,400,800,1000,2000]
#Cs = np.logspace(-4,3,40) ### The list of values of C used in Logistic Regression
tolerance = 0.001 #### This tolerance is needed to speed up Logistic Regression. Otherwise, SAGA takes too long!!
#### 'lbfgs' is the fastest one but doesnt provide accurate results. Newton-CG is slower but accurate!
#### SAGA is extremely slow. Even slower than Newton-CG. Liblinear is the fastest and as accurate as Newton-CG!
solvers = ['liblinear'] ### Other solvers for Logistic Regression model: ['newton-cg','lbfgs','saga','liblinear']
solver = 'liblinear' ### This is the next fastest solver after liblinear. Useful for Multi-class problems!
penalties = ['l2','l1'] ### This is to determine the penalties for LogisticRegression
n_steps = 6 ### number of estimator steps between 100 and max_estims
max_depth = 10 ##### This limits the max_depth used in decision trees and other classifiers
max_features = 10 #### maximum number of features in a random forest model or extra trees model
warm_start = True ### This is to set the warm_start flag for the ExtraTrees models
bootstrap = True #### Set this flag to control whether to bootstrap variables or not.
n_repeats = 1 #### This is for repeated KFold and StratifiedKFold - this changes the folds every time
Bins = 30 ### This is for plotting probabilities in a histogram. For small data sets, 30 is enough.
top_nlp_features = 100 ### This sets a limit on the number of features added by each NLP transformer!
removed_features_threshold = 5 #### This triggers the Truncated_SVD if number of removed features from XGB exceeds this!
calibrator_flag = False ### In Multi-class data sets, a CalibratedClassifier works better than regular classifiers!
max_class_length = 1 ### It turns out the number of classes is directly correlated to Estimated Time. Hence this!
print('############## D A T A S E T A N A L Y S I S #######################')
########## I F CATBOOST IS REQUESTED, THEN CHECK IF IT IS INSTALLED #######################
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
from catboost import CatBoostClassifier, CatBoostRegressor
#### Similarly for Random Forests Model, it takes too long with Grid Search, so MAKE IT RandomizedSearch!
if not Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise, Random Forests will take too long for 10,000+ rows')
elif Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if not isinstance(Boosting_Flag, str):
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise XGBoost will take too long for 10,000+ rows.')
########### T H I S I S W H E R E H Y P E R O P T P A R A M S A R E S E T #########
if hyper_param == 'HO':
########### HyperOpt related objective functions are defined here #################
from hyperopt import hp, tpe
from hyperopt.fmin import fmin
from hyperopt import Trials
from autoviml.custom_scores_HO import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores_HO import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores_HO import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores_HO import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores_HO import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores_HO import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores_HO import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores_HO import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores_HO import gini_samples_recall, gini_macro_recall, gini_micro_recall
else:
from autoviml.custom_scores import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores import gini_samples_recall, gini_macro_recall, gini_micro_recall
###### If hyper_param = 'GS', it takes a LOOOONG TIME with "SAGA" solver for LogisticRegression.
#### Hence to speed it up you need to change the tolerance threshold to something bigger
if hyper_param == 'GS':
tolerance = 0.01 #### This tolerance is bigger to speed up Logistic Regression. Otherwise, SAGA takes too long!!
########## This is where some more default parameters are set up ######
data_dimension = orig_train.shape[0]*orig_train.shape[1] ### number of cells in the entire data set .
if data_dimension > 1000000:
### if data dimension exceeds 1 million, then reduce no of params
no_iter=30
early_stopping = 10
test_size = 0.20
max_iter = 10000
Bins = 100
top_nlp_features = 300
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 5000
else:
max_estims = 400
else:
max_estims = 400
else:
if orig_train.shape[0] <= 1000:
no_iter=20
test_size = 0.1
max_iter = 4000
top_nlp_features = 250
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 3000
else:
max_estims = 300
else:
max_estims = 300
early_stopping = 4
else:
no_iter=30
test_size = 0.15
max_iter = 7000
top_nlp_features = 200
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 4000
else:
max_estims = 350
else:
max_estims = 350
early_stopping = 6
#### The warnings from Sklearn are so annoying that I have to shut it off ####
import warnings
warnings.filterwarnings("ignore")
def warn(*args, **kwargs):
pass
warnings.warn = warn
### First_Flag is merely a flag for the first time you want to set values of variables
if scaling == 'MinMax':
SS = MinMaxScaler()
elif scaling == 'Std':
SS = StandardScaler()
else:
SS = MinMaxScaler()
### Make target into a list so that we can uniformly process the target label
if not isinstance(target, list):
target = [target]
model_label = 'Single_Label'
elif isinstance(target, list):
if len(target)==1:
model_label = 'Single_Label'
elif len(target) > 1:
model_label = 'Multi_Label'
else:
print('Target variable is neither a string nor a list. Please check input and try again!')
return
##### This is where we run the Traditional models to compare them to XGB #####
start_time = time.time()
####################################################################################
##### Set up your Target Labels and Classes Properly Here #### Label Encoding #####
#### This is for Classification Problems Only where you do Label Encoding of Target
mldict = lambda: defaultdict(mldict)
label_dict = mldict()
first_time = True
print('Training Set Shape = {}'.format(orig_train.shape))
print(' Training Set Memory Usage = {:.2f} MB'.format(orig_train.memory_usage().sum() / 1024**2))
if not isinstance(orig_test,str):
print('Test Set Shape = {}'.format(orig_test.shape))
print(' Test Set Memory Usage = {:.2f} MB'.format(orig_test.memory_usage().sum() / 1024**2))
print('%s Target: %s' %(model_label,target))
###### Now analyze what problem we have here ####
try:
modeltype = analyze_problem_type(train, target[0],verbose)
except:
print('Cannot find the Target variable in data set. Please check input and try again')
return
for each_target in target:
#### Make sure you don't move these 2 lines: they need to be reset for every target!
#### HyperOpt will not do Trials beyond max_evals - so only if you reset here, it will do it again.
if hyper_param == 'HO':
params_dict = {}
bayes_trials = Trials()
############ THIS IS WHERE OTHER DEFAULT PARAMS ARE SET ###############
c_params = dict()
r_params = dict()
if modeltype == 'Regression':
scv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
eval_metric = 'rmse'
objective = 'reg:squarederror'
model_class = 'Regression'
start_train = copy.deepcopy(orig_train)
else:
if len(np.unique(train[each_target])) == 2:
model_class = 'Binary-Class'
elif len(np.unique(train[each_target])) > 2:
model_class = 'Multi-Class'
##### If multi-class happens, then you absolutely need to do SMOTE. Otherwise, you don't get good results!
#### Unfortunately SMOTE blows up when the data set is large -> so better to turn it off!
print('ALERT! Setting Imbalanced_Flag to True in Auto_ViML for Multi_Classification problems improves results!')
#Imbalanced_Flag = True
else:
print('Target label %s has less than 2 classes. Stopping' %each_target)
return
### This is for Classification Problems Only ########
print('Shuffling the data set before training')
start_train = orig_train.sample(frac=1.0, random_state=seed)
scv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
if modeltype != 'Regression':
rare_class_orig = find_rare_class(orig_train[each_target].values,verbose=1)
### Perfrom Label Transformation only for Classification Problems ####
classes = np.unique(orig_train[each_target])
if first_time:
if hyper_param == 'GS':
print('Using GridSearchCV for Hyper Parameter Tuning. This is slow. Switch to RS for faster tuning...')
elif hyper_param == 'RS':
print('Using RandomizedSearchCV for Hyper Parameter Tuning. This is 3X faster than GridSearchCV...')
else:
print('Using HyperOpt which is approximately 3X Faster than GridSearchCV but results vary...')
first_time = False
if len(classes) > 2:
##### If Boosting_Flag = True, change it to False here since Multi-Class XGB is VERY SLOW!
max_class_length = len(classes)
if Boosting_Flag:
print('CAUTION: In Multi-Class Boosting (2+ classes), TRAINING WILL TAKE A LOT OF TIME!')
objective = 'multi:softmax'
eval_metric = "mlogloss"
else:
max_class_length = 2
eval_metric="logloss"
objective = 'binary:logistic'
### Do Label Encoding when the Target Classes in each Label are Strings or Multi Class ###
if type(start_train[each_target].values[0])==str or str(start_train[each_target].dtype
)=='category' or sorted(np.unique(start_train[each_target].values))[0] != 0:
### if the class is a string or if it has more than 2 classes, then use Factorizer!
label_dict[each_target]['values'] = start_train[each_target].values
#### Factorizer is the easiest way to convert target in train and predictions in test
#### This takes care of some classes that are present in train and not in predictions
### and vice versa. Hence it is better than Label Encoders which breaks when above happens.
train_targ_categs = list(start_train[each_target].value_counts().index)
if len(train_targ_categs) == 2:
majority_class = [x for x in train_targ_categs if x != rare_class_orig]
dict_targ_all = {majority_class[0]: 0, rare_class_orig: 1}
else:
dict_targ_all = return_factorized_dict(train_targ_categs)
start_train[each_target] = start_train[each_target].map(dict_targ_all)
label_dict[each_target]['dictionary'] = copy.deepcopy(dict_targ_all)
label_dict[each_target]['transformer'] = dict([(v,k) for (k,v) in dict_targ_all.items()])
label_dict[each_target]['classes'] = copy.deepcopy(train_targ_categs)
class_nums = list(dict_targ_all.values())
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
print('String or Multi Class target: %s transformed as follows: %s' %(each_target,dict_targ_all))
rare_class = find_rare_class(start_train[each_target].values)
else:
### Since the each_target here is already numeric, you don't have to modify it
start_train[each_target] = start_train[each_target].astype(int).values
rare_class = find_rare_class(start_train[each_target].values)
label_dict[each_target]['values'] = start_train[each_target].values
label_dict[each_target]['classes'] = np.unique(start_train[each_target].values)
class_nums = np.unique(start_train[each_target].values)
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
label_dict[each_target]['transformer'] = []
label_dict[each_target]['dictionary'] = dict(zip(classes,classes))
print(' Target %s is already numeric. No transformation done.' %each_target)
if rare_class != 1:
print('Alert! Rare Class is not 1 but %s in this data set' %rare_class)
else:
#### In Regression problems, max_class_length is artificially set to one.
#### It turns out that Estimated Time is correlated to number of classes in data set. Hence we use this!
max_class_length = 1
###########################################################################################
#### This is where we start doing the iterative hyper tuning parameters #####
params_dict = defaultdict(list)
accu_mean = []
error_rate = []
###### This is where we do the training and hyper parameter tuning ########
orig_preds = [x for x in list(orig_train) if x not in target]
count = 0
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(orig_train[orig_preds], verbose)
##### Classify Columns ################
id_cols = var_df['id_vars']
nlp_columns = var_df['nlp_vars']
date_cols = var_df['date_vars']
del_cols = var_df['cols_delete']
factor_cols = var_df['factor_vars']
numvars = var_df['continuous_vars']+var_df['int_vars']
cat_vars = var_df['string_bool_vars']+var_df['discrete_string_vars']+var_df[
'cat_vars']+var_df['factor_vars']+var_df['num_bool_vars']
num_bool_vars = var_df['num_bool_vars']
#######################################################################################
preds = [x for x in orig_preds if x not in id_cols+del_cols+date_cols+target]
if len(id_cols+del_cols+date_cols)== 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(id_cols+del_cols+date_cols))
################## This is where real code begins ###################################################
GPU_exists = check_if_GPU_exists()
###### This is where we set the CPU and GPU parameters for XGBoost
param = {}
if Boosting_Flag:
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
model_name = 'CatBoost'
hyper_param = None
else:
model_name = 'XGBoost'
else:
model_name = 'XGBoost'
elif Boosting_Flag is None:
model_name = 'Linear'
else:
model_name = 'Forests'
##### Set the Scoring Parameters here based on each model and preferences of user ##############
cpu_params = {}
if model_name == 'XGBoost':
##### WE should keep CPU params as backup in case GPU fails!
cpu_params['nthread'] = -1
cpu_params['tree_method'] = 'hist'
cpu_params['grow_policy'] = 'depthwise'
cpu_params['max_depth'] = max_depth
cpu_params['max_leaves'] = 0
cpu_params['verbosity'] = 0
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
cpu_params['num_parallel_tree'] = 1
if GPU_exists:
param['nthread'] = -1
param['tree_method'] = 'gpu_hist'
param['grow_policy'] = 'depthwise'
param['max_depth'] = max_depth
param['max_leaves'] = 0
param['verbosity'] = 0
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
param['num_parallel_tree'] = 1
else:
param = copy.deepcopy(cpu_params)
validation_metric = copy.deepcopy(scoring_parameter)
elif model_name.lower() == 'catboost':
if model_class == 'Binary-Class':
catboost_scoring = 'Accuracy'
validation_metric = 'Accuracy'
loss_function='Logloss'
elif model_class == 'Multi-Class':
catboost_scoring = 'AUC'
validation_metric = 'AUC:type=Mu'
loss_function='MultiClass'
else:
loss_function = 'RMSE'
validation_metric = 'RMSE'
catboost_scoring = 'RMSE'
else:
validation_metric = copy.deepcopy(scoring_parameter)
########## D A T A P R E P R O C E S S I N G H E R E ##########################
print('############# D A T A P R E P A R A T I O N #############')
if start_train.isnull().sum().sum() > 0:
print('Filling missing values with "missing" placeholder and adding a column for missing_flags')
else:
print('No Missing Values in train data set')
copy_preds = copy.deepcopy(preds)
missing_flag_cols = []
if len(copy_preds) > 0:
dict_train = {}
for f in copy_preds:
if f in nlp_columns:
#### YOu have to skip this for NLP columns ##############
continue
missing_flag = False
if start_train[f].dtype == object:
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,True)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif start_train[f].dtype == np.int64 or start_train[f].dtype == np.int32 or start_train[f].dtype == np.int16:
### if there are integer variables, don't scale them. Leave them as is.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num).astype(int)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num).astype(int)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif f in factor_cols:
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,False)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
else:
### for all numeric variables, fill missing values with 1 less than min.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
###########################################################################################
if orig_train.isnull().sum().sum() > 0:
### If there are missing values in remaining features print it here ####
top5 = orig_train.isnull().sum().sort_values(ascending=False).index.tolist()[:5]
print(' Columns with most missing values: %s' %(
[x for x in top5 if orig_train[x].isnull().sum()>0]))
print(' and their missing value totals: %s' %([orig_train[x].isnull().sum() for x in
top5 if orig_train[x].isnull().sum()>0]))
if start_train[copy_preds].isnull().sum().sum() == 0:
print('Completed missing value Imputation. No more missing values in train.')
if verbose >= 1:
print(' %d new missing value columns added: %s' %(len(missing_flag_cols),missing_flag_cols))
else:
print('Error: Unable to complete missing value imputation in train. Exiting...')
return
####################################################################################
if type(orig_test) != str:
if start_test[copy_preds].isnull().sum().sum() > 0:
print('Test data still has some missing values. Fix it. Exiting...')
return
else:
print('Test data has no missing values. Continuing...')
###########################################################################################
else:
print(' Could not find any variables in your data set. Please check your dataset and try again')
return
###########################################################################################
print('Completed Label Encoding and Filling of Missing Values for Train and Test Data')
### This is a minor test to make sure that Boolean vars are Integers if they are Numeric!
if len(num_bool_vars) > 0:
### Just make sure that numeric Boolean vars are set as Integer type -> otherwise CatBoost will blow up
for each_bool_num in var_df['num_bool_vars']:
start_train[each_bool_num] = start_train[each_bool_num].astype(int)
if type(start_test) != str:
start_test[each_bool_num] = start_test[each_bool_num].astype(int)
######################################################################################
######### Set your Refit Criterion here - if you want to maximize Precision or Recall do it here ##
if modeltype == 'Regression':
if scoring_parameter in ['log_loss', 'neg_mean_squared_error','mean_squared_error']:
refit_metric = 'rmse'
else:
refit_metric = 'mae'
else:
if scoring_parameter in ['precision', 'precision_score','average_precision']:
refit_metric = 'precision'
elif scoring_parameter in ['logloss', 'log_loss']:
refit_metric = 'log_loss'
elif scoring_parameter in ['recall', 'recall_score']:
refit_metric = 'recall'
elif scoring_parameter in ['f1', 'f1_score','f1_weighted']:
refit_metric = 'f1'
elif scoring_parameter in ['accuracy', 'balanced_accuracy','balanced-accuracy']:
refit_metric = 'balanced_accuracy'
else:
refit_metric = 'balanced_accuracy'
print('%s problem: hyperparameters are being optimized for %s' %(modeltype,refit_metric))
###########################################################################################
### Make sure you remove variables that are highly correlated within data set first
rem_vars = left_subtract(preds,numvars)
if len(numvars) > 0 and feature_reduction:
numvars = remove_variables_using_fast_correlation(start_train,numvars, 'pearson',
corr_limit,verbose)
### Reduced Preds are now free of correlated variables and hence can be used for Poly adds
red_preds = rem_vars + numvars
#### You need to save a copy of this red_preds so you can later on create a start_train
#### with it after each_target cycle is completed. Very important!
orig_red_preds = copy.deepcopy(red_preds)
for each_target in target:
print('\n############# PROCESSING T A R G E T = %s ##########################' %each_target)
######## D E F I N I N G N E W T R A I N and N E W T E S T here #########################
#### This is where we set the orig train data set with multiple labels to the new start_train
#### start_train has the new features added or reduced with the multi targets in one cycle
### That way, we start each train with one target, and then reset it with multi target
#############################################################################################
train = start_train[[each_target]+red_preds]
if type(orig_test) != str:
test = start_test[red_preds]
###### Add Polynomial Variables and Interaction Variables to Train ######
if Add_Poly >= 1:
if Add_Poly == 1:
print('\nAdding only Interaction Variables. This may result in Overfitting!')
elif Add_Poly == 2:
print('\nAdding only Squared Variables. This may result in Overfitting!')
elif Add_Poly == 3:
print('\nAdding Both Interaction and Squared Variables. This may result in Overfitting!')
## Since the data is already scaled, we set scaling to None here ##
### For train data we have to set the fit_flag to True ####
if len(numvars) > 1:
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
train_sel, lm, train_red,md,fin_xvars,feature_xvar_dict = add_poly_vars_select(train,numvars,
each_target,modeltype,poly_degree,Add_Poly,md='',
corr_limit=corr_limit, scaling='None',
fit_flag=True,verbose=verbose)
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
if len(left_subtract(train_sel,numvars)) > 0:
#### This means that new intxn and poly vars were added. In that case, you can use them as is
#### Since these vars were alread tested for correlation, there should be no high correlation!
### SO you can take train_sel as the new list of numeric vars (numvars) going forward!
addl_vars = left_subtract(train_sel,numvars)
#numvars = list(set(numvars).intersection(set(train_sel)))
##### Print the additional Interxn and Poly variables here #######
if verbose >= 1:
print(' Intxn and Poly Vars are: %s' %addl_vars)
train = train_red[train_sel].join(train[rem_vars+[each_target]])
red_preds = [x for x in list(train) if x not in [each_target]]
if type(test) != str:
######### Add Polynomial and Interaction variables to Test ################
## Since the data is already scaled, we set scaling to None here ##
### For Test data we have to set the fit_flag to False ####
_, _, test_x_df,_,_,_ = add_poly_vars_select(test,numvars,each_target,
modeltype,poly_degree,Add_Poly,md,
corr_limit, scaling='None', fit_flag=False,
verbose=verbose)
### we need to convert x_vars into text_vars in test_x_df using feature_xvar_dict
test_x_vars = test_x_df.columns.tolist()
test_text_vars = [feature_xvar_dict[x] for x in test_x_vars]
test_x_df.columns = test_text_vars
#### test_red contains reduced variables with orig and substituted poly/intxn variables
test_red = test_x_df[train_sel]
#### we should now combined test_red with rem_vars so that it is the same shape as train
test = test_red.join(test[rem_vars])
#### Now we should change train_sel to subst_vars since that is the new list of vars going forward
numvars = copy.deepcopy(train_sel)
else:
#### NO new variables were added. so we can skip the rest of the stuff now ###
#### This means the train_sel is the new set of numeric features selected by add_poly algorithm
red_preds = train_sel+rem_vars
print(' No new variable was added by polynomial features...')
else:
print('\nAdding Polynomial vars ignored since no numeric vars in data')
train_sel = copy.deepcopy(numvars)
else:
### if there are no Polynomial vars, then all numeric variables are selected
train_sel = copy.deepcopy(numvars)
################ A U T O N L P P R O C E S S I N G B E G I N S H E R E !!! ####
if len(nlp_columns) > 0:
for nlp_column in nlp_columns:
nlp_column_train = train[nlp_column].values
if not isinstance(orig_test, str):
nlp_column_test = test[nlp_column].values
train1, test1, best_nlp_transformer,max_features_limit = Auto_NLP(nlp_column,
train, test, each_target, refit_metric,
modeltype, top_nlp_features, verbose,
build_model=False)
########################################################################
if KMeans_Featurizer:
start_time1 = time.time()
##### Do a clustering of word vectors from each NLP_column. This gives great results!
tfidf_term_array = create_tfidf_terms(nlp_column_train, best_nlp_transformer,
is_train=True, max_features_limit=max_features_limit)
print ('Creating word clusters using term matrix of size: %d for Train data set...' %len(tfidf_term_array['terms']))
num_clusters = int(np.sqrt(len(tfidf_term_array['terms']))/2)
if num_clusters < 2:
num_clusters = 2
##### Always set verbose to 0 since we KMEANS running is too verbose!
km = KMeans(n_clusters=num_clusters, random_state=seed, verbose=0)
kme, cluster_labels = return_cluster_labels(km, tfidf_term_array, num_clusters,
is_train=True)
if isinstance(nlp_column, str):
cluster_col = nlp_column + '_word_cluster_label'
else:
cluster_col = str(nlp_column) + '_word_cluster_label'
train1[cluster_col] = cluster_labels
print ('Created one new column: %s using selected NLP technique...' %cluster_col)
if not isinstance(orig_test, str):
tfidf_term_array_test = create_tfidf_terms(nlp_column_test, best_nlp_transformer,
is_train=False, max_features_limit=max_features_limit)
_, cluster_labels_test = return_cluster_labels(kme, tfidf_term_array_test, num_clusters,
is_train=False)
test1[cluster_col] = cluster_labels_test
print ('Created word clusters using same sized term matrix for Test data set...')
print(' Time Taken for creating word cluster labels = %0.0f seconds' %(time.time()-start_time1) )
####### Make sure you include the above new columns created in the predictor variables!
red_preds = [x for x in list(train1) if x not in [each_target]]
train = train1[red_preds+[each_target]]
if not isinstance(orig_test, str):
test = test1[red_preds]
################ A U T O N L P P R O C E S S I N G E N D S H E R E !!! ####
###### We have to detect float variables again since we have created new variables using Auto_NLP!!
train_sel = np.array(red_preds)[(train[red_preds].dtypes==float).values].tolist()
######### A D D D A T E T I M E F E A T U R E S ####################
if len(date_cols) > 0:
#### Do this only if date time columns exist in your data set!
for date_col in date_cols:
print('Processing %s column for date time features....' %date_col)
date_df_train = create_time_series_features(orig_train, date_col)
if not isinstance(date_df_train, str):
date_col_adds = date_df_train.columns.tolist()
print(' Adding %d columns from date time column %s' %(len(date_col_adds),date_col))
train = train.join(date_df_train)
else:
date_col_adds = []
if not isinstance(orig_test, str):
date_df_test = create_time_series_features(orig_test, date_col)
if not isinstance(date_df_test, str):
test = test.join(date_df_test)
red_preds = [x for x in list(train) if x not in [each_target]]
train_sel = train_sel + date_col_adds
######### SELECT IMPORTANT FEATURES HERE #############################
if feature_reduction:
important_features,num_vars, imp_cats = find_top_features_xgb(train,red_preds,train_sel,
each_target,
modeltype,corr_limit,verbose)
else:
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
#####################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
### Training an XGBoost model to find important features
train = train[important_features+[each_target]]
######################################################################
if type(orig_test) != str:
test = test[important_features]
############## F E A T U R E E N G I N E E R I N G S T A R T S N O W ##############
###### From here on we do some Feature Engg using Target Variable with Data Leakage ############
### To avoid Model Leakage, we will now split the Data into Train and CV so that Held Out Data
## is Pure and is unadulterated by learning from its own Target. This is known as Data Leakage.
###################################################################################################
print('Starting Feature Engineering now...')
X = train[important_features]
y = train[each_target]
################ I M P O R T A N T ##################################################
### The reason we don't use train_test_split is because we want only a partial train entropy binned
### If we use the whole of Train for entropy binning then there will be data leakage and our
### cross validation test scores will not be so accurate. So don't change the next 5 lines here!
################ I M P O R T A N T ##################################################
if modeltype == 'Regression':
skf = KFold(n_splits=n_splits, random_state=seed)
else:
skf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_train_index, cv_index = next(skf.split(X, y))
################ TRAIN CV TEST SPLIT HERE ##################################################
try:
#### Sometimes this works but other times, it gives an error!
X_train, X_cv = X.loc[cv_train_index], X.loc[cv_index]
y_train, y_cv = y.loc[cv_train_index], y.loc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.loc[cv_train_index]
part_cv = train.loc[cv_index]
except:
#### This works when the above method gives an error!
X_train, X_cv = X.iloc[cv_train_index], X.iloc[cv_index]
y_train, y_cv = y.iloc[cv_train_index], y.iloc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.iloc[cv_train_index]
part_cv = train.iloc[cv_index]
print('Train CV Split completed with', "TRAIN rows:", cv_train_index.shape[0], "CV rows:", cv_index.shape[0])
################ IMPORTANT ENTROPY BINNING FIRST TIME #####################################
############ Add Entropy Binning of Continuous Variables Here ##############################
num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
saved_important_features = copy.deepcopy(important_features) ### these are original features without '_bin' added
#### saved_num_vars is an important variable: it contains the orig_num_vars before they were binned
saved_num_vars = copy.deepcopy(num_vars) ### these are original numeric features without '_bin' added
############### BINNING FIRST TIME ##################################################
if Binning_Flag and len(saved_num_vars) > 0:
#### Do binning only when there are numeric features ####
#### When we Bin the first time, we set the entropy_binning flag to False so
#### no numeric variables are removed. But next time, we will remove them later!
part_train, num_vars, important_features, part_cv = add_entropy_binning(part_train,
each_target, saved_num_vars,
saved_important_features, part_cv,
modeltype, entropy_binning=False,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
### you get the name of the original vars which were binned here in this orig_num_vars variable!
orig_num_vars = left_subtract(saved_num_vars,num_vars)
#### you need to know the name of the binner variables. This is where you get it!
binned_num_vars = left_subtract(num_vars,saved_num_vars)
imp_cats += binned_num_vars
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
####################### KMEANS FIRST TIME ############################
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
if KMeans_Featurizer and len(saved_num_vars) > 0:
### DO KMeans Featurizer only if there are numeric features in the data set!
print(' Adding one Feature named "KMeans_Clusters" based on KMeans_Featurizer_Flag=True...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
num_clusters = int(np.round(max(2,np.log10(train.shape[0]))))
#### Make the number of clusters as the same as log10 of number of rows in Train
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features], num_clusters)
else:
### If it is Regression, you don't have to specify the number of clusters
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features])
#### Since this is returning the each_target in X_train, we need to drop it here ###
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
part_train[km_label] = train_clusters
part_cv[km_label] = cv_clusters
#X_train.drop(each_target,axis=1,inplace=True)
imp_cats.append(km_label)
for imp_cat in imp_cats:
part_train[imp_cat] = part_train[imp_cat].astype(int)
part_cv[imp_cat] = part_cv[imp_cat].astype(int)
####### The features are checked again once we add the cluster feature ####
important_features.append(km_label)
else:
print(' KMeans_Featurizer set to False or there are no numeric vars in data')
km_label = ''
####################### STACKING FIRST TIME ############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('Alert! Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_cv!
addcol, stacks1 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_train[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
addcol, stacks2 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_cv[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
part_train = part_train.join(pd.DataFrame(stacks1,index=cv_train_index,
columns=addcol))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
part_cv = part_cv.join(pd.DataFrame(stacks2,index=cv_index,
columns=addcol))
print(' Adding %d Stacking feature(s) to training data' %len(addcol))
###### We make sure that we remove any new features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(X_train,addcol,corr_limit,verbose)
important_features += addcol
###############################################################################
#### part train contains the unscaled original train. It also contains binned and orig_num_vars!
#### DO NOT DO TOUCH part_train and part_cv -> we need it to recrate train later!
####################### Now do Feature Scaling Here #################################
part_train_scaled, part_cv_scaled = perform_scaling_numeric_vars(part_train, important_features,
part_cv, model_name, SS)
#### part_train_scaled has both predictor and target variables. Target must be removed!
important_features = find_remove_duplicates(important_features)
X_train = part_train_scaled[important_features]
X_cv = part_cv_scaled[important_features]
#### Remember that the next 2 lines are crucial: if X and y are dataframes, then predict_proba
### will return dataframes or series. Otherwise it will return Numpy array's.
## Be consistent when using dataframes with XGB. That's the best way to keep feature names!
print('############### M O D E L B U I L D I N G B E G I N S ####################')
print('Rows in Train data set = %d' %X_train.shape[0])
print(' Features in Train data set = %d' %X_train.shape[1])
print(' Rows in held-out data set = %d' %X_cv.shape[0])
data_dim = X_train.shape[0]*X_train.shape[1]
### Setting up the Estimators for Single Label and Multi Label targets only
if modeltype == 'Regression':
metrics_list = ['neg_mean_absolute_error' ,'neg_mean_squared_error',
'neg_mean_squared_log_error','neg_median_absolute_error']
eval_metric = "rmse"
if scoring_parameter == 'neg_mean_absolute_error' or scoring_parameter =='mae':
meae_scorer = make_scorer(gini_meae, greater_is_better=False)
scorer = meae_scorer
elif scoring_parameter == 'neg_mean_squared_error' or scoring_parameter =='mse':
mse_scorer = make_scorer(gini_mse, greater_is_better=False)
scorer = mse_scorer
elif scoring_parameter == 'neg_mean_squared_log_error' or scoring_parameter == 'log_error':
msle_scorer = make_scorer(gini_msle, greater_is_better=False)
print(' Log Error is not recommended since predicted values might be negative and error')
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
elif scoring_parameter == 'neg_median_absolute_error' or scoring_parameter == 'median_error':
mae_scorer = make_scorer(gini_mae, greater_is_better=False)
scorer = mae_scorer
elif scoring_parameter =='rmse' or scoring_parameter == 'root_mean_squared_error':
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
else:
scoring_parameter = 'rmse'
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
#### HYPER PARAMETERS FOR TUNING ARE SETUP HERE ###
if hyper_param == 'GS':
r_params = {
"Forests": {
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': np.logspace(-5,3),
},
"XGBoost": {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
else:
import scipy as sp
r_params = {
"Forests": {
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': sp.stats.uniform(scale=1000),
},
"XGBoost": {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(2, 10),
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostRegressor(verbose=1,iterations=max_estims,random_state=99,
one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBRegressor(seed=seed,n_jobs=-1,random_state=seed,subsample=subsample,
colsample_bytree=col_sub_sample,n_estimators=max_estims,
objective=objective)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#xgbm = Lasso(max_iter=max_iter,random_state=seed)
xgbm = Lasso(max_iter=max_iter,random_state=seed)
else:
xgbm = RandomForestRegressor(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,
'max_features': "sqrt"
})
else:
#### This is for Binary Classification ##############################
classes = label_dict[each_target]['classes']
metrics_list = ['accuracy_score','roc_auc_score','logloss', 'precision','recall','f1']
# Create regularization hyperparameter distribution with 50 C values ####
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#'max_features': [1,2,5, max_features],
#"criterion":['gini','entropy'],
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
}
c_params["CatBoost"] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'max_features': ['log', "sqrt"] ,
#'class_weight':[None,'balanced']
}
# Create regularization hyperparameter distribution using uniform distribution
if len(classes) == 2:
objective = 'binary:logistic'
if scoring_parameter == 'accuracy' or scoring_parameter == 'accuracy_score':
accuracy_scorer = make_scorer(gini_accuracy, greater_is_better=True, needs_proba=False)
scorer =accuracy_scorer
elif scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer =gini_scorer
elif scoring_parameter == 'auc' or scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_scorer = make_scorer(gini_roc, greater_is_better=True, needs_threshold=True)
scorer =roc_scorer
elif scoring_parameter == 'log_loss' or scoring_parameter == 'logloss':
scoring_parameter = 'neg_log_loss'
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'precision' or scoring_parameter == 'precision_score':
precision_scorer = make_scorer(gini_precision, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =precision_scorer
elif scoring_parameter == 'recall' or scoring_parameter == 'recall_score':
recall_scorer = make_scorer(gini_recall, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =recall_scorer
elif scoring_parameter == 'f1' or scoring_parameter == 'f1_score':
f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =f1_scorer
elif scoring_parameter == 'f2' or scoring_parameter == 'f2_score':
f2_scorer = make_scorer(f2_measure, greater_is_better=True, needs_proba=False)
scorer =f2_scorer
else:
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
#f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
# pos_label=rare_class)
#scorer = f1_scorer
### DO NOT USE NUM CLASS WITH BINARY CLASSIFICATION ######
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance,
warm_start=warm_start, max_iter=max_iter)
else:
xgbm = RandomForestClassifier(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,'oob_score':True,
'max_features': "sqrt"
})
else:
##### This is for MULTI Classification ##########################
objective = 'multi:softmax'
eval_metric = "mlogloss"
if scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = gini_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_auc_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = roc_auc_scorer
elif scoring_parameter == 'average_precision' or scoring_parameter == 'mean_precision':
average_precision_scorer = make_scorer(gini_average_precision,
greater_is_better=True, needs_proba=True)
scorer = average_precision_scorer
elif scoring_parameter == 'samples_precision':
samples_precision_scorer = make_scorer(gini_samples_precision,
greater_is_better=True, needs_proba=True)
scorer = samples_precision_scorer
elif scoring_parameter == 'weighted_precision' or scoring_parameter == 'weighted-precision':
weighted_precision_scorer = make_scorer(gini_weighted_precision,
greater_is_better=True, needs_proba=True)
scorer = weighted_precision_scorer
elif scoring_parameter == 'macro_precision':
macro_precision_scorer = make_scorer(gini_macro_precision,
greater_is_better=True, needs_proba=True)
scorer = macro_precision_scorer
elif scoring_parameter == 'micro_precision':
scorer = micro_precision_scorer
micro_precision_scorer = make_scorer(gini_micro_precision,
greater_is_better=True, needs_proba=True)
elif scoring_parameter == 'samples_recall':
samples_recall_scorer = make_scorer(gini_samples_recall, greater_is_better=True, needs_proba=True)
scorer = samples_recall_scorer
elif scoring_parameter == 'weighted_recall' or scoring_parameter == 'weighted-recall':
weighted_recall_scorer = make_scorer(gini_weighted_recall,
greater_is_better=True, needs_proba=True)
scorer = weighted_recall_scorer
elif scoring_parameter == 'macro_recall':
macro_recall_scorer = make_scorer(gini_macro_recall,
greater_is_better=True, needs_proba=True)
scorer = macro_recall_scorer
elif scoring_parameter == 'micro_recall':
micro_recall_scorer = make_scorer(gini_micro_recall, greater_is_better=True, needs_proba=True)
scorer = micro_recall_scorer
elif scoring_parameter == 'samples_f1':
samples_f1_scorer = make_scorer(gini_samples_f1,
greater_is_better=True, needs_proba=True)
scorer = samples_f1_scorer
elif scoring_parameter == 'weighted_f1' or scoring_parameter == 'weighted-f1':
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
elif scoring_parameter == 'macro_f1':
macro_f1_scorer = make_scorer(gini_macro_f1,
greater_is_better=True, needs_proba=True)
scorer = macro_f1_scorer
elif scoring_parameter == 'micro_f1':
micro_f1_scorer = make_scorer(gini_micro_f1,
greater_is_better=True, needs_proba=True)
scorer = micro_f1_scorer
else:
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
import scipy as sp
if Boosting_Flag:
# Create regularization hyperparameter distribution using uniform distribution
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100, max_estims),
'max_depth': sp.stats.randint(1, 10)
}
c_params['CatBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
num_class= len(classes),
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
if hyper_param == 'GS':
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
}
else:
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
}
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance, multi_class='auto',
max_iter=max_iter, warm_start=False,
)
else:
if hyper_param == 'GS':
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion":['gini','entropy'],
}
else:
c_params["Forests"] = {
##### I have set these to avoid OverFitting which is a problem for small data sets ###
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'class_weight':[None,'balanced']
}
xgbm = RandomForestClassifier(bootstrap=bootstrap, oob_score=True,warm_start=warm_start,
n_estimators=100,max_depth=3,
min_samples_leaf=2,max_features='auto',
random_state=seed,n_jobs=-1)
###### Now do RandomizedSearchCV using # Early-stopping ################
if modeltype == 'Regression':
#scoreFunction = {"mse": "neg_mean_squared_error", "mae": "neg_mean_absolute_error"}
#### I have set the Verbose to be False here since it produces too much output ###
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=r_params[model_name],
scoring = scorer,
n_jobs=-1,
cv = scv,
refit = refit_metric,
return_train_score = True,
verbose=0)
elif hyper_param == 'RS':
gs = RandomizedSearchCV(xgbm,
param_distributions = r_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
cv = scv,
n_jobs=-1,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
else:
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=c_params[model_name],
scoring = scorer,
return_train_score = True,
n_jobs=-1,
refit = refit_metric,
cv = scv,
verbose=0)
elif hyper_param == 'RS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = RandomizedSearchCV(xgbm,
param_distributions = c_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
n_jobs=-1,
cv = scv,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
#trains and optimizes the model
eval_set = [(X_train,y_train),(X_cv,y_cv)]
print('Finding Best Model and Hyper Parameters for Target: %s...' %each_target)
##### Here is where we put the part_train and part_cv together ###########
if modeltype != 'Regression':
### Do this only for Binary Classes and Multi-Classes, both are okay
baseline_accu = 1-(train[each_target].value_counts(1).sort_values())[rare_class]
print(' Baseline Accuracy Needed for Model = %0.2f%%' %(baseline_accu*100))
print('CPU Count = %s in this device' %CPU_count)
if modeltype == 'Regression':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(80000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
else:
if hyper_param == 'GS':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(300000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(10000.*CPU_count)))
elif Boosting_Flag is None:
#### A Linear model is usually the fastest ###########
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(16000.*CPU_count)))
else:
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(100000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(25000.*CPU_count)))
##### Since we are using Multiple Models each with its own quirks, we have to make sure it is done this way
##### ############ TRAINING MODEL FIRST TIME WITH X_TRAIN AND TESTING ON X_CV ############
model_start_time = time.time()
################################################################################################################################
##### BE VERY CAREFUL ABOUT MODIFYING THIS NEXT LINE JUST BECAUSE IT APPEARS TO BE A CODING MISTAKE. IT IS NOT!! #############
################################################################################################################################
#######
if Imbalanced_Flag:
if modeltype == 'Regression':
########### In case someone sets the Imbalanced_Flag mistakenly to True and it is Regression, you must set it to False ######
Imbalanced_Flag = False
else:
####### Imbalanced with Classification #################
try:
print('############## Imbalanced Flag on: Training model with SMOTE Oversampling method ###########')
#### The model is the downsampled model Trained on downsampled data sets. ####
model, X_train, y_train = training_with_SMOTE(X_train,y_train,eval_set, gs,
Boosting_Flag, eval_metric,
modeltype, model_name,training=True,
minority_class=rare_class,imp_cats=imp_cats,
calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params = cpu_params,
verbose=verbose)
if isinstance(model, str):
model = copy.deepcopy(gs)
#### If d_model failed, it will just be an empty string, so you try the regular model ###
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
try:
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train,
cat_features=imp_cats,eval_set=(X_cv,y_cv), use_best_model=True,plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats,use_best_model=False,plot=False)
else:
model.fit(X_train, y_train)
#### If downsampling succeeds, it will be used to get the best score and can become model again ##
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
else:
val_keys = list(model.best_score_.keys())
best_score = model.best_score_[val_keys[-1]][validation_metric]
except:
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
best_score = 0
################################################################################################################################
####### Though this next step looks like it is a Coding Mistake by Me, don't change it!!! ###################
####### This is for case when Imbalanced with Classification succeeds, this next step is skipped ############
################################################################################################################################
if not Imbalanced_Flag:
########### This is for both regular Regression and regular Classification Model Training. It is not a Mistake #############
########### In case Imbalanced training fails, this method is also tried. That's why we test the Flag here!! #############
try:
model = copy.deepcopy(gs)
if Boosting_Flag:
if model_name == 'XGBoost':
try:
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train, cat_features=imp_cats,
eval_set=(X_cv,y_cv), use_best_model=True, plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X_train, y_train)
except:
print('Training regular model first time is Erroring: Check if your Input is correct...')
return
try:
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
validation_metric = copy.deepcopy(scoring_parameter)
else:
val_keys = list(model.best_score_.keys())
if 'validation' in val_keys:
validation_metric = list(model.best_score_['validation'].keys())[0]
best_score = model.best_score_['validation'][validation_metric]
else:
validation_metric = list(model.best_score_['learn'].keys())[0]
best_score = model.best_score_['learn'][validation_metric]
except:
print('Error: Not able to print validation metrics. Continuing...')
## TRAINING OF MODELS COMPLETED. NOW GET METRICS on CV DATA ################
print(' Actual training time (in seconds): %0.0f' %(time.time()-model_start_time))
print('########### S I N G L E M O D E L R E S U L T S #################')
if modeltype != 'Regression':
############## This is for Classification Only !! ########################
if scoring_parameter in ['logloss','neg_log_loss','log_loss','log-loss','']:
print('{}-fold Cross Validation {} = {}'.format(n_splits, 'logloss', best_score))
elif scoring_parameter in ['accuracy','balanced-accuracy','balanced_accuracy','roc_auc','roc-auc',
'f1','precision','recall','average-precision','average_precision',
'weighted_f1','weighted-f1','AUC']:
print('%d-fold Cross Validation %s = %0.1f%%' %(n_splits,scoring_parameter, best_score*100))
else:
print('%d-fold Cross Validation %s = %0.1f' %(n_splits,validation_metric, best_score))
else:
######### This is for Regression only ###############
if best_score < 0:
best_score = best_score*-1
if scoring_parameter == '':
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,'RMSE', best_score))
else:
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,validation_metric, best_score))
#### We now need to set the Best Parameters, Fit the Model on Full X_train and Predict on X_cv
### Find what the order of best params are and set the same as the original model ###
if hyper_param == 'RS' or hyper_param == 'GS':
best_params= model.best_params_
print(' Best Parameters for Model = %s' %model.best_params_)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
#### CatBoost does not need too many iterations. Just make sure you set the iterations low after the first time!
if model.get_best_iteration() == 0:
### In some small data sets, the number of iterations becomes zero, hence we set it as a default number
best_params = dict(zip(['iterations','learning_rate'],[1000,model.get_all_params()['learning_rate']]))
else:
best_params = dict(zip(['iterations','learning_rate'],[model.get_best_iteration(),model.get_all_params()['learning_rate']]))
print(' %s Best Parameters for Model: Iterations = %s, learning_rate = %0.2f' %(
model_name, model.get_best_iteration(), model.get_all_params()['learning_rate']))
if hyper_param == 'RS' or hyper_param == 'GS':
#### In the case of CatBoost, we don't do any Hyper Parameter tuning #########
gs = copy.deepcopy(model)
model = gs.best_estimator_
if modeltype == 'Multi_Classification':
try:
if X_cv.shape[0] <= 1000:
# THis works well for small data sets and is similar to parametric
method= 'sigmoid' # 'isotonic' # #
else:
# THis works well for large data sets and is non-parametric
method= 'isotonic'
model = CalibratedClassifierCV(model, method=method, cv="prefit")
model.fit(X_train, y_train)
print('Using a Calibrated Classifier in this Multi_Classification dataset to improve results...')
calibrator_flag = True
except:
calibrator_flag = False
pass
### Make sure you set this flag as False so that when ensembling is completed, this flag is True ##
if model_name.lower() == 'catboost':
print('Best Model selected and its parameters are:\n %s' %model.get_all_params())
else:
print('Best Model selected and its parameters are:\n %s' %model)
performed_ensembling = False
if modeltype != 'Regression':
m_thresh = 0.5
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
if len(classes) <= 2:
print('Finding Best Threshold for Highest F1 Score...')
precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,rare_class])
#precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,1])
try:
f1 = (2*precision*recall)/(precision+recall)
f1 = np.nan_to_num(f1)
m_idx = np.argmax(f1)
m_thresh = thresholds[m_idx]
best_f1 = f1[m_idx]
except:
best_f1 = f1_score(y_cv, y_pred)
m_thresh = 0.5
# retrieve just the probabilities for the positive class
pos_probs = y_proba[:, rare_class]
if verbose >= 1:
# create a histogram of the predicted probabilities for the Rare Class since it will help decide threshold
plt.figure(figsize=(6,6))
plt.hist(pos_probs, bins=Bins, color='g')
plt.title("Model's Predictive Probability Histogram for Rare Class=%s with suggested threshold in red" %rare_class_orig)
plt.axvline(x=m_thresh, color='r', linestyle='--')
plt.show();
print(" Using threshold=0.5. However, %0.3f provides better F1=%0.2f for rare class..." %(m_thresh,best_f1))
###y_pred = (y_proba[:,rare_class]>=m_thresh).astype(int)
predicted = copy.deepcopy(y_proba)
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if m_thresh != 0.5:
y_pred = predicted[:,rare_class]
else:
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
else:
y_pred = model.predict(X_cv)
### This is where you print out the First Model's Results ########
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
if isinstance(y_cv,pd.Series):
y_cv = y_cv.values
print('%s Model Prediction Results on Held Out CV Data Set:' %model_name)
if modeltype == 'Regression':
rmsle_calculated_m = rmse(y_cv, y_pred)
print_regression_model_stats(y_cv, y_pred,'%s Model: Predicted vs Actual for %s'%(model_name,each_target))
else:
if model_name == 'Forests':
if calibrator_flag:
print(' OOB Score = %0.3f' %model.base_estimator.oob_score_)
else:
print(' OOB Score = %0.3f' %model.oob_score_)
rmsle_calculated_m = balanced_accuracy_score(y_cv,y_pred)
if len(classes) == 2:
print(' Regular Accuracy Score = %0.1f%%' %(accuracy_score(y_cv,y_pred)*100))
y_probas = model.predict_proba(X_cv)
rmsle_calculated_m = print_classification_model_stats(y_cv, y_probas, m_thresh)
else:
###### Use a nice classification matrix printing module here #########
print(' Balanced Accuracy Score = %0.1f%%' %(rmsle_calculated_m*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv, y_pred))
###### SET BEST PARAMETERS HERE ######
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if modeltype == 'Regression':
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
try:
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d regressors' %len(cols))
ensem_pred = subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[cols].mean(axis=1))
print('#############################################################################')
performed_ensembling = True
#### Since we have a new ensembled y_pred, make sure it is series or array before printing it!
if isinstance(y_pred,pd.Series):
print_regression_model_stats(y_cv, ensem_pred.values,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
else:
print_regression_model_stats(y_cv, ensem_pred,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
## This is for Classification Problems Only #
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
#### We do Ensembling only if the Stacking_Flag is False. Otherwise, we don't!
try:
classes = label_dict[each_target]['classes']
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d classifiers' %len(cols))
ensem_pred = np.round(subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(cols))
ensem_pred = (subm[cols].mean(axis=1)).astype(int)
print('#############################################################################')
performed_ensembling = True
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
print('No Ensembling of models done since Stacking_Flag = True ')
if verbose >= 1:
if len(classes) == 2:
plot_classification_results(model,X_cv, y_cv, y_pred, classes, class_nums, each_target )
else:
try:
Draw_ROC_MC_ML(model, X_cv, y_cv, each_target, model_name, verbose)
Draw_MC_ML_PR_ROC_Curves(model,X_cv,y_cv)
except:
print('Could not plot PR and ROC curves. Continuing...')
#### In case there are special scoring_parameter requests, you can print it here!
if scoring_parameter == 'roc_auc' or scoring_parameter == 'auc':
if len(classes) == 2:
print(' ROC AUC Score = %0.1f%%' %(roc_auc_score(y_cv, y_proba[:,rare_class])*100))
else:
print(' No ROC AUC score for multi-class problems')
elif scoring_parameter == 'jaccard':
accu_all = jaccard_singlelabel(y_cv, y_pred)
print(' Mean Jaccard Similarity = {:,.1f}%'.format(
accu_all*100))
## This is for multi-label problems ##
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
elif scoring_parameter == 'basket_recall':
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
if not Stacking_Flag and performed_ensembling:
if modeltype == 'Regression':
rmsle_calculated_f = rmse(y_cv, y_pred)
print('After multiple models, Ensemble Model Results:')
print(' RMSE Score = %0.5f' %(rmsle_calculated_f,))
print('#############################################################################')
if rmsle_calculated_f < rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
else:
rmsle_calculated_f = balanced_accuracy_score(y_cv,y_pred)
print('After multiple models, Ensemble Model Results:')
rare_pct = y_cv[y_cv==rare_class].shape[0]/y_cv.shape[0]
print(' Balanced Accuracy Score = %0.3f%%' %(
rmsle_calculated_f*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv,y_pred))
print('#############################################################################')
if rmsle_calculated_f > rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
if verbose >= 1:
if Boosting_Flag:
try:
if model_name.lower() == 'catboost':
plot_xgb_metrics(model,catboost_scoring,eval_set,modeltype,'%s Results' %each_target,
model_name)
else:
plot_xgb_metrics(gs.best_estimator_,eval_metric,eval_set,modeltype,'%s Results' %each_target,
model_name)
except:
print('Could not plot Model Evaluation Results Metrics')
else:
try:
plot_RS_params(gs.cv_results_, scoring_parameter, each_target)
except:
print('Could not plot Cross Validation Parameters')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
print('Training model on complete Train data and Predicting using give Test Data...')
################ I M P O R T A N T: C O M B I N I N G D A T A ######################
#### This is Second time: we combine train and CV into Train and Test Sets #################
train = part_train.append(part_cv)
important_features = [x for x in list(train) if x not in [each_target]]
############################################################################################
###### Now that we have used partial data to make stacking predictors, we can remove them from consideration!
if Stacking_Flag:
important_features = left_subtract(important_features, addcol)
try:
train.drop(addcol,axis=1, inplace=True)
except:
pass
###### Similarly we will have to create KMeans_Clusters again using full Train data!
if KMeans_Featurizer:
important_features = left_subtract(important_features, km_label)
try:
train.drop(km_label,axis=1, inplace=True)
except:
pass
########################## BINNING SECOND TIME ###############################
new_num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
## Now we re-use the saved_num_vars which contained a list of num_vars for binning now!
###### Once again we do Entropy Binning on the Full Train Data Set !!
########################## BINNING SECOND TIME ###############################
if Binning_Flag and len(saved_num_vars) > 0:
### when you bin the second time, you have to send in important_features with original
### numeric variables so that it works on binning only those. Otherwise it will fail.
### Do Entropy Binning only if there are numeric variables in the data set! #####
#### When we Bin the second first time, we set the entropy_binning flag to True so
#### that all numeric variables that are binned are removed. This way, only bins remain.
train, num_vars, important_features, test = add_entropy_binning(train, each_target,
orig_num_vars, important_features, test,
modeltype, entropy_binning=True,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
####################### KMEANS SECOND TIME ############################
if KMeans_Featurizer and len(saved_num_vars) > 0:
#### Perform KMeans Featurizer only if there are numeric variables in data set! #########
print('Adding one feature named "KMeans_Clusters" using KMeans_Featurizer...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features], num_clusters)
else:
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features])
#### Now make sure that the cat features are either string or integers ######
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
train[km_label] = train_cluster
if not isinstance(test, str):
test[km_label] = test_cluster
#X_train.drop(each_target,axis=1,inplace=True)
for imp_cat in imp_cats:
train[imp_cat] = train[imp_cat].astype(int)
if not isinstance(test, str):
test[imp_cat] = test[imp_cat].astype(int)
saved_num_vars.append(km_label) ### You need to add it to this variable list for Scaling later!
important_features.append(km_label)
########################## STACKING SECOND TIME ###############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('CAUTION: Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_cv to train on and using it to predict on X_train!
addcol, stacks1 = QuickML_Stacking(train[important_features],train[each_target],'',
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#### The reason we add the word "Partial_Train" is to show that these Stacking results are from Partial Train data!
addcols = copy.deepcopy(addcol)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
train = train.join(pd.DataFrame(stacks1,index=train.index,
columns=addcols))
##### Leaving multiple columns for Stacking is best! Do not do the average of predictions!
print(' Adding %d Stacking feature(s) to training data' %len(addcols))
if not isinstance(orig_test, str):
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_test
_, stacks2 = QuickML_Stacking(train[important_features],train[each_target],test[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
test = test.join(pd.DataFrame(stacks2,index=test.index,
columns=addcols))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#test = test.join(pd.DataFrame(stacks2.mean(axis=1).round().astype(int),
# columns=[addcol],index=test.index))
###### We make sure that we remove too many features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(train,addcol,corr_limit,verbose)
important_features += addcols
saved_num_vars.append(addcol) ### You need to add it for binning later!
############################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(saved_important_features)
#important_features = copy.deepcopy(red_preds)
############################################################################################
if model_name.lower() == 'catboost':
print(' Setting best params for CatBoost model from Initial State since you cannot change params to a fitted Catboost model ')
model = xgbm.set_params(**best_params)
print(' Number of Categorical and Integer variables used in CatBoost training = %d' %len(imp_cats))
#### Perform Scaling of Train data a second time using FULL TRAIN data set this time !
#### important_features keeps track of all variables that we need to ensure they are scaled!
train, test = perform_scaling_numeric_vars(train, important_features, test,
model_name, SS)
################ T R A I N I N G M O D E L A S E C O N D T I M E ###################
### The next 2 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
trainm = train[important_features+[each_target]]
red_preds = copy.deepcopy(important_features)
X = trainm[red_preds]
y = trainm[each_target]
eval_set = [()]
##### ############ TRAINING MODEL SECOND TIME WITH FULL_TRAIN AND PREDICTING ON TEST ############
model_start_time = time.time()
if modeltype != 'Regression':
if Imbalanced_Flag:
try:
print('################## Imbalanced Flag Set ############################')
print('Imbalanced Class Training using SMOTE Rare Class Oversampling method...')
model, X, y = training_with_SMOTE(X,y, eval_set, model,
Boosting_Flag, eval_metric,modeltype, model_name,
training=False, minority_class=rare_class,
imp_cats=imp_cats, calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params=cpu_params,
verbose=verbose)
if isinstance(model, str):
#### If downsampling model failed, it will just be an empty string, so you can try regular model ###
model = copy.deepcopy(best_model)
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
#### Set the Verbose to 0 since we don't want too much output ##
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
### Since second time we don't have X_cv, we remove it
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Training regular model second time erroring: Check if Input is correct...')
return
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X, y)
except:
print('Training model second time is Erroring: Check if Input is correct...')
return
print('Actual Training time taken in seconds = %0.0f' %(time.time()-model_start_time))
## TRAINING OF MODELS COMPLETED. NOW START PREDICTIONS ON TEST DATA ################
#### new_cols is to keep track of new prediction columns we are creating #####
new_cols = []
if not isinstance(orig_test, str):
### If there is a test data frame, then let us predict on it #######
### The next 3 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
try:
#### We need the id columns to carry over into the predictions ####
testm = orig_test[id_cols].join(test[red_preds])
except:
### if for some reason id columns are not available, then do without it
testm = test[red_preds]
X_test = testm[red_preds]
else:
##### If there is no Test file, then do a final prediction on Train itself ###
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
testm = orig_train[id_cols].join(trainm[red_preds])
X_test = testm[red_preds]
if modeltype == 'Regression':
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
######## This is for Regression Problems Only ###########
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
try:
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d regressors' %len(new_cols))
ensem_pred = subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[new_cols].mean(axis=1))
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
ensem_pred = ensem_pred.values
new_col = each_target+'_Ensembled_predictions'
testm[new_col] = ensem_pred
new_cols.append(new_col)
print('Completed Ensemble predictions on held out data')
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,
scoring_parameter,verbose=verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
if len(stack_cols) == 1:
testm[new_col] = stacksfinal
else:
#### Just average the predictions from each stacked model into a final pred
testm[new_col] = stacksfinal.mean(axis=1)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
#### If there is a test file, it probably doesn't have target, so add predictions to it!
testm[each_target+'_predictions'] = y_pred
else:
proba_cols = []
######## This is for both Binary and Multi Classification Problems ###########
y_proba = model.predict_proba(X_test)
y_pred = model.predict(X_test)
predicted = copy.deepcopy(y_proba)
if len(classes) <= 2:
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if predicted[:,rare_class].mean()==0 or predicted[:,rare_class].mean()==1:
### If the model is predicting all 0's or all 1's, you need to use a regular threshold
m_thresh = 0.5
print(' Making test Data predictions using regular Threshold = %0.3f' %m_thresh)
else:
### If the model is good with the modified threshold, then you use the modified threshold!
print(' Making test Data predictions using modified Threshold = %0.3f' %m_thresh)
y_pred = predicted[:,rare_class]
else:
##### For multi-class, just make predictions of multiple classes here #######
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values.astype(int)
else:
### In a small number of cases, it's an array but has a shape of 1.
### This causes errors later. Hence I have to make it a singleton array.
try:
if y_pred.shape[1] == 1:
y_pred = y_pred.ravel()
except:
y_pred = y_pred.astype(int)
if len(label_dict[each_target]['transformer']) == 0:
######### NO T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is no transformer, then leave the predicted classes as is
classes = label_dict[each_target]['classes']
##### If there is no transformer, you can just predict the classes as is and save it here ###
testm[each_target+'_predictions'] = y_pred
###### If Stacking_Flag is False, then we do Ensembling #######
if not Stacking_Flag:
### Ensembling is not done when the model name is CatBoost ####
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### You will need to create probabilities for each class here ####
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = int(label_dict[each_target]['dictionary'][each_class])
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
if not Stacking_Flag:
new_col = each_target+'_Ensembled_predictions'
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
testm[new_col] = ensem_pred
new_cols.append(new_col)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
else:
######### T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is a transformer, then you must convert the predicted classes to orig classes
classes = label_dict[each_target]['classes']
dic = label_dict[each_target]['dictionary']
transformer = label_dict[each_target]['transformer']
class_nums = label_dict[each_target]['class_nums']
##### If there is a transformer, you must convert predictions to original classes
testm[each_target+'_predictions'] = pd.Series(y_pred).map(transformer).values
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = label_dict[each_target]['dictionary'][each_class]
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = pd.Series(y_pred).map(transformer).values
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = pd.Series(ensembles[:,each]).map(transformer).values
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
print('Completed Ensemble predictions on held out data')
new_col = each_target+'_Ensembled_predictions'
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
print('########################################################')
print('Completed Stacked predictions on held out data')
testm[new_col] = pd.Series(ensem_pred).map(transformer).values
new_cols.append(new_col)
if not isinstance(sample_submission, str):
sample_submission[each_target] = pd.Series(y_pred).map(transformer).values
##################### P L O T F E A T U R E I M P O R T A N C E S H E R E ###################
if calibrator_flag:
plot_model = model.base_estimator
else:
plot_model = copy.deepcopy(model)
try:
if Boosting_Flag is None:
### If you don't use absolute values, you won't get the right set of features in order. Make sure!
imp_features_df = pd.DataFrame(abs(plot_model.coef_[0]),
columns=['Feature Importances'],index=important_features).sort_values(
'Feature Importances',ascending=False)
else:
if model_name.lower() == 'xgboost':
##### SHAP requires this step: XGBoost models must have been "predicted"
_ = plot_model.predict(X_test)
### It is possible that in some cases, XGBoost has fewer features than what was sent in.
### In those cases, we need to identify and know which features in XGBoost are in and which are out
#### In that case, we need to find those features and then do a feature importance
dictf = plot_model.get_booster().get_score(importance_type='gain')
if len(left_subtract(plot_model.get_booster().feature_names,important_features)) > 0:
#### If feature names from XGBoost and important_features are not same,you must transform dictf like this!
dicta = dict(zip(plot_model.get_booster().feature_names,important_features))
featdict = dict([(x,dicta[x]) for x in dictf.keys()])
featdict2 = dict([(dicta[x],dictf[x]) for x in featdict.keys()])
imp_features_df = pd.DataFrame(featdict2.values(),index=featdict2.keys(),
columns = ['Feature Importances']).sort_values('Feature Importances',
ascending=False)
else:
#### If the feature names from XGBoost and the important_features are same,
### you can plot dictf immediately!
imp_features_df = pd.DataFrame(dictf.values(),index=dictf.keys(),
columns = ['Feature Importances']).sort_values('Feature Importances',
ascending=False)
elif model_name == 'Forests':
imp_features_df = pd.DataFrame(plot_model.feature_importances_, columns=['Feature Importances'],
index=important_features).sort_values('Feature Importances',
ascending=False)
elif model_name.lower() == 'catboost':
from catboost import Pool
imp_features_df = pd.DataFrame(plot_model.get_feature_importance(
Pool(X_cv, label=y_cv,cat_features=imp_cats)),
columns=['Feature Importances'],
index=important_features).sort_values(
'Feature Importances',ascending=False)
### Now draw the feature importances using the data frame above!
height_size = 5
width_size = 10
color_string = 'byrcmgkbyrcmgkbyrcmgkbyrcmgk'
print('Plotting Feature Importances to explain the output of model')
imp_features_df[:15].plot(kind='barh',title='Feature Importances for predicting %s' %each_target,
figsize=(width_size, height_size), color=color_string);
except:
print('Could not draw feature importance plot due to an error')
########### D R A W SHAP VALUES USING TREE BASED MODELS. THE REST WILL NOT GET SHAP ############
if verbose >= 2:
print('Trying to plot SHAP values if SHAP is installed in this machine...')
try:
if model_name.lower() == 'catboost':
if verbose > 0:
import shap
from catboost import Pool
shap.initjs()
plt.figure()
shap_values = plot_model.get_feature_importance(Pool(X_cv, label=y_cv,cat_features=imp_cats),type="ShapValues")
shap_df = pd.DataFrame(np.c_[X_cv.values,y_cv],columns=[list(X_cv)+[each_target]])
if modeltype == 'Multi_Classification':
for each_i in range(len(classes)):
### This is needed for Catboost models but it is very cumbersome!
### You need to cycle through multiple values of classes from 0 to n_classes-1.
### There is no way to force it in an Ax => so you are stuck printing multiple charts
shap.summary_plot(shap_values[:,each_i,:], shap_df, plot_type="violin")
else:
shap.summary_plot(shap_values, shap_df, plot_type="violin")
else:
import shap
shap.initjs()
#### This works well for RFC and XGBoost for multiclass problems #####
#### This plots a violin plot that is different from the bar chart above!
#### This does not work for CatBoost so try something else!
if model_name.lower() == 'linear':
explainer = shap.LinearExplainer(plot_model, X_test, feature_dependence="independent")
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
if modeltype != 'Regression':
plt.figure()
shap.summary_plot(shap_values, X_test)
elif model_name.lower() == 'forests':
#### This works well for RFC and XGBoost for multiclass problems #####
### It works for both binary and multi-class problems ########
### However, it does NOT work for CatBoost models!
explainer = shap.TreeExplainer(plot_model)
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
### There is no single violin plot for Random Forests in SHAP
#### It actually has multiple outputs so you can loop through it for each class
if modeltype != 'Regression':
for each_i in range(len(classes)):
plt.figure()
shap.summary_plot(shap_values[each_i], X_test)
elif model_name.lower() == 'xgboost':
#### This works well for RFC and XGBoost for multiclass problems #####
### It works for both binary and multi-class problems ########
### However, it does NOT work for CatBoost models!
explainer = shap.TreeExplainer(plot_model)
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
if modeltype != 'Regression':
plt.figure()
shap.summary_plot(shap_values, X_test)
except:
print('Could not plot SHAP values since SHAP is not installed or could not import SHAP in this machine')
print('############### P R E D I C T I O N O N T E S T C O M P L E T E D #################')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
## Write the test and submission files to disk ###
print('Writing Output files to disk...')
#############################################################################################
if not isinstance(testm, str):
try:
write_file_to_folder(testm, each_target, each_target+'_'+modeltype+'_'+'test_modified.csv')
##### D R A W K D E P L O T S FOR PROBABILITY OF PREDICTIONS - very useful! #########
if modeltype != 'Regression':
if verbose >= 2:
testm[proba_cols].plot(kind='kde',figsize=(10,6),
title='Predictive Probability Density Chart with suggested threshold in red')
plt.axvline(x=m_thresh, color='r', linestyle='--');
except:
print(' Error: Not able to save test modified file. Skipping...')
#############################################################################################
if isinstance(sample_submission, str):
sample_submission = testm[id_cols+[each_target+'_predictions']]
try:
write_file_to_folder(sample_submission, each_target, each_target+'_'+modeltype+'_'+'submission.csv')
except:
print(' Error: Not able to save submission file. Skipping...')
#############################################################################################
try:
#### Bring trainm back to its original index ###################
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
write_file_to_folder(trainm, each_target, each_target+'_'+modeltype+'_'+'train_modified.csv')
except:
print(' Error: Not able to save train modified file. Skipping...')
### In case of multi-label models, we will reset the start train and test dataframes to contain new features created
start_train = start_train[target].join(start_train[orig_red_preds])
if not isinstance(orig_test, str):
start_test = start_test[orig_red_preds]
#### Once each target cycle is over, reset the red_preds to the orig_red_preds so we can start over
red_preds = copy.deepcopy(orig_red_preds)
#### Perform Final Multi-Label Operations here since all Labels are finished by now ###
#### Don't change the target here to each_target since this is for multi-label situations only ###
if (scoring_parameter == 'basket_recall' or scoring_parameter == 'jaccard') and modeltype != 'Regression':
y_preds = np.array(list(zipped))
_,_,_,y_actuals = train_test_split(train[red_preds], train[target].values,
test_size=test_size, random_state=seed)
print('Shape of Actuals: %s and Preds: %s' %(y_actuals.shape[0], y_preds.shape[0]))
if y_actuals.shape[0] == y_preds.shape[0]:
if scoring_parameter == 'basket_recall' and len(target) > 1:
accu_all = basket_recall(y_actuals, y_preds).mean()
print(' Mean Basket Recall = {:,.1f}%'.format(
accu_all*100))
elif scoring_parameter == 'jaccard' and len(target) > 1:
## This shows similarity in multi-label situations ####
accu_all = jaccard_multilabel(y_actuals, y_preds)
print(' Mean Jaccard Similarity = %s' %(
accu_all))
## END OF ONE LABEL IN A MULTI LABEL DATA SET ! WHEW ! ###################
print('############### C O M P L E T E D ################')
print('Time Taken in mins = %0.1f for the Entire Process' %((time.time()-start_time)/60))
#return model, imp_features_df.index.tolist(), trainm, testm
return model, important_features, trainm, testm
###############################################################################
def plot_SHAP_values(m,X,modeltype,Boosting_Flag=False,matplotlib_flag=False,verbose=0):
import shap
# load JS visualization code to notebook
if not matplotlib_flag:
shap.initjs();
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(m)
shap_values = explainer.shap_values(X)
if not Boosting_Flag is None:
if Boosting_Flag:
# visualize the first prediction's explanation (use matplotlib=True to avoid Javascript)
if verbose > 0 and modeltype != 'Multi_Classification':
shap.summary_plot(shap_values, X, plot_type="violin");
if verbose >= 1:
shap.summary_plot(shap_values, X, plot_type="bar");
else:
shap.summary_plot(shap_values, X, plot_type="bar");
################################################################################
################ Find top features using XGB ###################
################################################################################
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, mutual_info_regression, mutual_info_classif
def find_top_features_xgb(train,preds,numvars,target,modeltype,corr_limit,verbose=0):
"""
This is a fast utility that uses XGB to find top features. You
It returns a list of important features.
Since it is XGB, you dont have to restrict the input to just numeric vars.
You can send in all kinds of vars and it will take care of transforming it. Sweet!
"""
import xgboost as xgb
###################### I M P O R T A N T ##############################################
###### This top_num decides how many top_n features XGB selects in each iteration.
#### There a total of 5 iterations. Hence 5x10 means maximum 50 featues will be selected.
##### If there are more than 50 variables, then maximum 5*25 = 125 variables will be selected
if len(preds) <= 50:
top_num = 10
else:
top_num = 25
###################### I M P O R T A N T ##############################################
#### If there are more than 30 categorical variables in a data set, it is worth reducing features.
#### Otherwise. XGBoost is pretty good at finding the best features whether cat or numeric !
n_splits = 5
max_depth = 8
max_cats = 5
###################### I M P O R T A N T ##############################################
train = copy.deepcopy(train)
preds = copy.deepcopy(preds)
numvars = copy.deepcopy(numvars)
subsample = 0.7
col_sub_sample = 0.7
train = copy.deepcopy(train)
start_time = time.time()
test_size = 0.2
seed = 1
early_stopping = 5
####### All the default parameters are set up now #########
kf = KFold(n_splits=n_splits, random_state=33)
rem_vars = left_subtract(preds,numvars)
catvars = copy.deepcopy(rem_vars)
############ I M P O R T A N T ! I M P O R T A N T ! ######################
##### Removing the Cat Vars selection using Linear Methods since they fail so often.
##### Linear methods such as Chi2 or Mutual Information Score are not great
#### for feature selection since they can't handle large data and provide
#### misleading results for large data sets. Hence I am using XGBoost alone.
#### Also, another method of using Spearman Correlation for CatVars with 100's
#### of variables is very slow. Also, is not very clear is effective: only 3-4 vars
#### are removed. Hence for now, I am not going to use Spearman method. Perhaps later.
##############################################################################
#if len(catvars) > max_cats:
# start_time = time.time()
# important_cats = remove_variables_using_fast_correlation(train,catvars,'spearman',
# corr_limit,verbose)
# if verbose >= 1:
# print('Time taken for reducing highly correlated Categorical vars was %0.0f seconds' %(time.time()-start_time))
#else:
important_cats = copy.deepcopy(catvars)
print('No categorical feature reduction done. All %d Categorical vars selected ' %(len(catvars)))
if len(numvars) > 1:
final_list = remove_variables_using_fast_correlation(train,numvars,'pearson',
corr_limit,verbose)
else:
final_list = copy.deepcopy(numvars)
print(' Adding %s categorical variables to reduced numeric variables of %d' %(
len(important_cats),len(final_list)))
if isinstance(final_list,np.ndarray):
final_list = final_list.tolist()
preds = final_list+important_cats
#######You must convert category variables into integers ###############
for important_cat in important_cats:
if str(train[important_cat].dtype) == 'category':
train[important_cat] = train[important_cat].astype(int)
######## Drop Missing value rows since XGB for some reason #########
######## can't handle missing values in early stopping rounds #######
train.dropna(axis=0,subset=preds+[target],inplace=True)
######## Dont move this train and y definition anywhere else ########
y = train[target]
print('############## F E A T U R E S E L E C T I O N ####################')
important_features = []
if modeltype == 'Regression':
objective = 'reg:squarederror'
model_xgb = XGBRegressor( n_estimators=100,subsample=subsample,objective=objective,
colsample_bytree=col_sub_sample,reg_alpha=0.5, reg_lambda=0.5,
seed=1,n_jobs=-1,random_state=1)
eval_metric = 'rmse'
else:
#### This is for Classifiers only
classes = np.unique(train[target].values)
if len(classes) == 2:
model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,
n_jobs=-1, nthread=None, objective='binary:logistic',
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
eval_metric = 'logloss'
else:
model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,
n_jobs=-1, nthread=None, objective='multi:softmax',
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
eval_metric = 'mlogloss'
#### This is where you start to Iterate on Finding Important Features ################
save_xgb = copy.deepcopy(model_xgb)
train_p = train[preds]
if train_p.shape[1] < 10:
iter_limit = 2
else:
iter_limit = int(train_p.shape[1]/5+0.5)
print('Current number of predictors = %d ' %(train_p.shape[1],))
print(' Finding Important Features using Boosted Trees algorithm...')
try:
for i in range(0,train_p.shape[1],iter_limit):
new_xgb = copy.deepcopy(save_xgb)
print(' using %d variables...' %(train_p.shape[1]-i))
if train_p.shape[1]-i < iter_limit:
X = train_p.iloc[:,i:]
if modeltype == 'Regression':
train_part = int((1-test_size)*X.shape[0])
X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]
else:
X_train, X_cv, y_train, y_cv = train_test_split(X, y,
test_size=test_size, random_state=seed)
try:
eval_set = [(X_train,y_train),(X_cv,y_cv)]
model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,
eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
except:
new_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,
eval_metric=eval_metric,verbose=False)
print('XGB has a bug in version xgboost 1.02 for feature importances. Try to install version 0.90 or 1.10 - continuing...')
important_features += pd.Series(new_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
important_features = list(OrderedDict.fromkeys(important_features))
else:
X = train_p[list(train_p.columns.values)[i:train_p.shape[1]]]
#### Split here into train and test #####
if modeltype == 'Regression':
train_part = int((1-test_size)*X.shape[0])
X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]
else:
X_train, X_cv, y_train, y_cv = train_test_split(X, y,
test_size=test_size, random_state=seed)
eval_set = [(X_train,y_train),(X_cv,y_cv)]
try:
model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,
eval_set=eval_set,eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
except:
new_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,
eval_set=eval_set,eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
important_features = list(OrderedDict.fromkeys(important_features))
except:
print('Finding top features using XGB is crashing. Continuing with all predictors...')
important_features = copy.deepcopy(preds)
return important_features, [], []
important_features = list(OrderedDict.fromkeys(important_features))
print('Found %d important features' %len(important_features))
#print(' Time taken (in seconds) = %0.0f' %(time.time()-start_time))
numvars = [x for x in numvars if x in important_features]
important_cats = [x for x in important_cats if x in important_features]
return important_features, numvars, important_cats
################################################################################
def basket_recall(label, pred):
"""
This tests the recall of a given basket of items in a label by the second basket, pred.
It compares the 2 baskets (arrays or lists) named as label and pred, and finds common items
between the two. Then it divides that length by the total number of items in the label basket
to come up with a basket recall score. This score may be useful in recommendation problems
where you are interested in finding how many items in a basket (labels) that your
predictions (pred) basket got correct. The order of the items in the baskets does not matter.
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
if len(label) > 1:
jacc_arr = []
for row1,row2,count in zip(label,pred, range(len(label))):
intersection = len(np.intersect1d(row1,row2))
union = len(row1)
jacc = float(intersection / union)
if count == 0:
jacc_arr = copy.deepcopy(jacc)
else:
jacc_arr = np.r_[jacc_arr,jacc]
return jacc_arr
else:
intersection = len(list(set(list1).intersection(set(list2))))
union = (len(list1) + len(list2)) - intersection
jacc_arr = float(intersection / union)
return jacc_arr
################################################################################
def jaccard_singlelabel(label, pred):
"""
This compares 2 baskets (could be lists or arrays): label and pred, and finds common items
between the two. Then it divides that number by either rows or columns to return %.
### Jaccard_Columnwise = this means you have multi-labels and you want it summed columnwise
### This tells you what is the average accuracy for each column in multi-label target
### It will return as many totals as the number of columns in your multi-label target.
### To get a percentage, you will have to divide it by the number of rows in the data set.
### This percentage gives you the % of rows in each label you got correctly=%Each_Label Accuracy
### This will give you as many percentages as there are labels in your multi-label target.
### Jaccard_Row-wise = this means you have combos but where order matters and you want it compared row-wise
### This tells you how many labels in each row did you get right. THat's accuracy by row.
### It will return as many totals as the number of rows in your data set.
### To get a percentage, you will have to divide it by the number of labels in the data set.
### This percentage gives you the % of labels in each row you got correctly=%Combined_Label_Accuracy
### This will give you a single percentage number for the whole data set
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
try:
### This is for Multi-Label Problems ##### Returns 2 results: one number and
### the second is an array with as many items as number of labels in target
jacc_each_label = np.sum(label==pred,axis=0)/label.shape[0]
return jacc_each_label
except:
return 0
################################################################################
def jaccard_multilabel(label, pred):
"""
This compares 2 baskets (could be lists or arrays): label and pred, and finds common items
between the two. Then it divides that number by either rows or columns to return %.
### Jaccard_Columnwise = this means you have multi-labels and you want it summed columnwise
### This tells you what is the average accuracy for each column in multi-label target
### It will return as many totals as the number of columns in your multi-label target.
### To get a percentage, you will have to divide it by the number of rows in the data set.
### This percentage gives you the % of rows in each label you got correctly=%Each_Label Accuracy
### This will give you as many percentages as there are labels in your multi-label target.
### Jaccard_Row-wise = this means you have combos but where order matters and you want it compared row-wise
### This tells you how many labels in each row did you get right. THat's accuracy by row.
### It will return as many totals as the number of rows in your data set.
### To get a percentage, you will have to divide it by the number of labels in the data set.
### This percentage gives you the % of labels in each row you got correctly=%Combined_Label_Accuracy
### This will give you a single percentage number for the whole data set
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
### This is for Multi-Label Problems ##### Returns 2 results: one number and
### the second is an array with as many items as number of labels in target
try:
jacc_data_set = np.sum(label==pred,axis=1).sum()/label.shape[1]
return jacc_data_set
except:
return 0
################################################################################
def plot_RS_params(cv_results, score, mname):
"""
####### This plots the GridSearchCV Results sent in ############
"""
df = pd.DataFrame(cv_results)
params = [x for x in list(df) if x.startswith('param_')]
traincols = ['mean_train_score' ]
testcols = ['mean_test_score' ]
cols = traincols+testcols
ncols = 2
noplots = len(params)
if noplots%ncols == 0:
rows = noplots/ncols
else:
rows = (noplots/ncols)+1
height_size = 5
width_size = 15
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.suptitle('Training and Validation: Hyper Parameter Tuning for target=%s' %mname, fontsize=20,y=1.01)
#### If the values are negative, convert them to positive ############
if len(df.loc[df[cols[0]]<0]) > 0:
df[cols] = df[cols]*-1
for each_param, count in zip(params, range(noplots)):
plt.subplot(rows,ncols,count+1)
ax1 = plt.gca()
if df[each_param].dtype != object:
df[[each_param]+cols].groupby(each_param).mean().plot(kind='line',
title='%s for %s' %(each_param,mname),ax=ax1)
else:
try:
df[each_param] = pd.to_numeric(df[each_param])
df[[each_param]+cols].groupby(each_param).mean().plot(kind='line',
title='%s for %s' %(each_param,mname), ax=ax1)
except:
df[[each_param]+cols].groupby(each_param).mean().plot(kind='bar',stacked=False,
title='%s for %s' %(each_param,mname), ax=ax1)
#### This is to plot the test_mean_score against params to see how it increases
for each_param in params:
#### This is to find which parameters are non string and convert them to strings
if df[each_param].dtype!=object:
df[each_param] = df[each_param].astype(str)
try:
df['combined_parameters'] = df[params].apply(lambda x: '__'.join(x), axis=1 )
except:
df['combined_parameters'] = df[params].apply(lambda x: '__'.join(x.map(str)), axis=1 )
if len(params) == 1:
df['combined_parameters'] = copy.deepcopy(df[params])
else:
df[['combined_parameters']+cols].groupby('combined_parameters').mean().sort_values(
cols[1]).plot(figsize=(width_size,height_size),kind='line',subplots=False,
title='Combined Parameters: %s scores for %s' %(score,mname))
plt.xticks(rotation=45)
plt.show();
return df
################################################################################
def plot_xgb_metrics(model,eval_metric,eval_set,modeltype,model_label='',model_name=""):
height_size = 5
width_size = 10
if model_name.lower() == 'catboost':
results = model.get_evals_result()
else:
results = model.evals_result()
res_keys = list(results.keys())
eval_metric = list(results[res_keys[0]].keys())
if isinstance(eval_metric, list):
# plot log loss
eval_metric = eval_metric[0]
# plot metrics now
fig, ax = plt.subplots(figsize=(width_size, height_size))
epochs = len(results[res_keys[0]][eval_metric])
x_axis = range(0, epochs)
if model_name.lower() == 'catboost':
ax.plot(x_axis, results[res_keys[0]][eval_metric], label='%s' %res_keys[0])
else:
ax.plot(x_axis, results[res_keys[0]][eval_metric], label='%s' %res_keys[0])
epochs = len(results[res_keys[-1]][eval_metric])
x_axis = range(0, epochs)
ax.plot(x_axis, results[res_keys[-1]][eval_metric], label='%s' %res_keys[-1])
ax.legend()
plt.ylabel(eval_metric)
plt.title('%s Train and Validation Metrics across Epochs (Early Stopping in effect)' %model_label)
plt.show();
################################################################################
######### NEW And FAST WAY to CLASSIFY COLUMNS IN A DATA SET #######
################################################################################
def classify_columns(df_preds, verbose=0):
"""
Takes a dataframe containing only predictors to be classified into various types.
DO NOT SEND IN A TARGET COLUMN since it will try to include that into various columns.
Returns a data frame containing columns and the class it belongs to such as numeric,
categorical, date or id column, boolean, nlp, discrete_string and cols to delete...
####### Returns a dictionary with 10 kinds of vars like the following: # continuous_vars,int_vars
# cat_vars,factor_vars, bool_vars,discrete_string_vars,nlp_vars,date_vars,id_vars,cols_delete
"""
max_cols_to_print = 30
print('############## C L A S S I F Y I N G V A R I A B L E S ####################')
print('Classifying variables in data set...')
#### Cat_Limit defines the max number of categories a column can have to be called a categorical colum
cat_limit = 15
def add(a,b):
return a+b
train = df_preds[:]
sum_all_cols = dict()
orig_cols_total = train.shape[1]
#Types of columns
cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1
) | (train[col].isnull().sum()/len(train) >= 0.90)]
train = train[left_subtract(list(train),cols_delete)]
var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(
columns={0:'type_of_column'})
sum_all_cols['cols_delete'] = cols_delete
var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']
and len(train[x['index']].value_counts()) == 2 else 0, axis=1)
string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])
sum_all_cols['string_bool_vars'] = string_bool_vars
var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16','int32','int64',
'float16','float32','float64'] and len(
train[x['index']].value_counts()) == 2 else 0, axis=1)
num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])
sum_all_cols['num_bool_vars'] = num_bool_vars
###### This is where we take all Object vars and split them into diff kinds ###
discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[
'index'] not in string_bool_vars+cols_delete else 0,axis=1)
######### This is where we figure out whether a string var is nlp or discrete_string var ###
var_df['nlp_strings'] = 0
var_df['discrete_strings'] = 0
var_df['cat'] = 0
var_df['id_col'] = 0
discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()
if len(var_df.loc[discrete_or_nlp==1]) != 0:
for col in discrete_or_nlp_vars:
#### first fill empty or missing vals since it will blowup ###
train[col] = train[col].fillna(' ')
if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) >= 50 and len(train[col].value_counts()
) <= len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'nlp_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) <= len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'discrete_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) == len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
var_df.loc[var_df['index']==col,'cat'] = 1
nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])
sum_all_cols['nlp_vars'] = nlp_vars
discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])
sum_all_cols['discrete_string_vars'] = discrete_string_vars
###### This happens only if a string column happens to be an ID column #######
#### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...
#### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###
var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,
axis=1)
factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])
sum_all_cols['factor_vars'] = factor_vars
########################################################################
date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16',
'int32','int64'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
######### This is where we figure out whether a numeric col is date or id variable ###
var_df['int'] = 0
var_df['date_time'] = 0
### if a particular column is date-time type, now set it as a date time variable ##
var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
### this is where we save them as date time variables ###
if len(var_df.loc[date_or_id==1]) != 0:
for col in var_df.loc[date_or_id==1]['index'].values.tolist():
if len(train[col].value_counts()) == len(train):
if train[col].min() < 1900 or train[col].max() > 2050:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
if train[col].min() < 1900 or train[col].max() > 2050:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
pass
int_vars = list(var_df[(var_df['int'] ==1)]['index'])
date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])
id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])
sum_all_cols['int_vars'] = int_vars
copy_date_vars = copy.deepcopy(date_vars)
for date_var in copy_date_vars:
#### This test is to make sure sure date vars are actually date vars
try:
| pd.to_datetime(train[date_var],infer_datetime_format=True) | pandas.to_datetime |
import sys,os.path, copy,time,math
import pandas as pd
from os import path
import numpy as np
def check_source(source):
if not (path.exists(source)): #check file exists or not
print("No such file exists")
exit(0)
if not source.endswith('.csv'): #check format of input file
print("Format is not supported")
exit(0)
file1 = pd.read_csv(source)
col = file1.shape
if not col[1]>=3: #check no. of columns
print("Input file must contain three or more columns")
exit(0)
k = 0
for i in file1.columns:
k = k+1
for j in file1.index:
if k!=1:
val = isinstance(file1[i][j],int)
val1 = isinstance(file1[i][j],float)
if not val and not val1:
print(f'Value is not numeric in {k} column')
exit(0)
def check_weight(source, w):
file1 = pd.read_csv(source)
col = file1.shape
weight = []
l = w.split(',')
for i in l:
k = 0
for j in i:
if not j.isnumeric():
if k>=1 or j!='.':
print("Format of weight is not correct")
exit(0)
else:
k = k+1
weight.append(float(i))
if len(weight) != (col[1]-1):
print("No. of weights and no. of columns must be same")
exit(0)
return weight
def check_impact(source,im):
file1 = pd.read_csv(source)
col = file1.shape
impact = im.split(',')
for i in impact:
if i not in {'+','-'} :
print("Format of impact is not correct")
exit(0)
if len(impact) != (col[1]-1) :
print("No. of impacts and no. of columns must be same")
exit(0)
return impact
def normalized_matrix(source):
check_source(source)
df = pd.read_csv(source)
col = list(df.columns)
col.remove(col[0])
for i in col:
sum = 0
for j in df.index:
sum = sum+(df[i][j])*(df[i][j])
sum = math.sqrt(sum)
for j in df.index:
df.at[j,i] = (df[i][j])/sum
return df
def weight_normalized(source,we):
df = normalized_matrix(source)
w = check_weight(source,we)
col = list(df.columns)
col.remove(col[0])
k = 0
for i in col:
for j in df.index:
df.at[j,i] = w[k]*df[i][j]
k = k+1
return df
def ideal_best_worst(source,we,imp):
df = weight_normalized(source,we)
im = check_impact(source,imp)
col = list(df.columns)
col.remove(col[0])
best = []
worst = []
k = 0
for i in col:
if im[k] == '+':
best.append(max(df[i]))
worst.append(min(df[i]))
else:
best.append(min(df[i]))
worst.append(max(df[i]))
k = k+1
return (best,worst)
def euclidean_distance(source,we,imp):
df = weight_normalized(source,we)
col = list(df.columns)
col.remove(col[0])
best,worst = ideal_best_worst(source,we,imp)
p1 = []
p2 = []
for i in df.index:
sum1 = 0
sum2 = 0
k = 0
for j in col:
a = best[k]- df[j][i]
b = worst[k] - df[j][i]
sum1 = sum1 + a*a
sum2 = sum2 + b*b
k = k+1
sum1 = math.sqrt(sum1)
sum2 = math.sqrt(sum2)
p1.append(sum1)
p2.append(sum2)
return (p1,p2)
def topsis_score(source,we,imp):
p1,p2 = euclidean_distance(source,we,imp)
d = pd.read_csv(source)
n = len(p1)
p = []
for i in range(n):
sum = p1[i]+p2[i]
sum = p2[i]/sum
p.append(sum)
d['Topsis Score'] = p
p = | pd.Series(p) | pandas.Series |
import os, json, requests, sys
from pandas import read_excel, isnull, ExcelWriter, Series
from mpcontribs.io.core.recdict import RecursiveDict
from mpcontribs.io.core.utils import clean_value, nest_dict
from mpcontribs.io.archieml.mpfile import MPFile
from pymatgen.ext.matproj import MPRester
project = "dilute_solute_diffusion"
from pymongo import MongoClient
client = MongoClient("mongodb+srv://" + os.environ["MPCONTRIBS_MONGO_HOST"])
db = client["mpcontribs"]
print(db.contributions.count_documents({"project": project}))
z = json.load(open("z.json", "r"))
def run(mpfile, hosts=None, download=False):
mpr = MPRester()
fpath = f"{project}.xlsx"
if download or not os.path.exists(fpath):
figshare_id = 1546772
url = "https://api.figshare.com/v2/articles/{}".format(figshare_id)
print("get figshare article {}".format(figshare_id))
r = requests.get(url)
figshare = json.loads(r.content)
print("version =", figshare["version"]) # TODO set manually in "other"?
print("read excel from figshare into DataFrame")
df_dct = None
for d in figshare["files"]:
if "xlsx" in d["name"]:
# Dict of DataFrames is returned, with keys representing sheets
df_dct = read_excel(d["download_url"], sheet_name=None)
break
if df_dct is None:
print("no excel sheet found on figshare")
return
print("save excel to disk")
writer = ExcelWriter(fpath)
for sheet, df in df_dct.items():
df.to_excel(writer, sheet)
writer.save()
else:
df_dct = read_excel(fpath, sheet_name=None)
print(len(df_dct), "sheets loaded.")
print("looping hosts ...")
host_info = df_dct["Host Information"]
host_info.set_index(host_info.columns[0], inplace=True)
host_info.dropna(inplace=True)
for idx, host in enumerate(host_info):
if hosts is not None:
if isinstance(hosts, int) and idx + 1 > hosts:
break
elif isinstance(hosts, list) and not host in hosts:
continue
print("get mp-id for {}".format(host))
mpid = None
for doc in mpr.query(
criteria={"pretty_formula": host}, properties={"task_id": 1}
):
if "decomposes_to" not in doc["sbxd"][0]:
mpid = doc["task_id"]
break
if mpid is None:
print("mp-id for {} not found".format(host))
continue
print("add host info for {}".format(mpid))
hdata = host_info[host].to_dict(into=RecursiveDict)
for k in list(hdata.keys()):
v = hdata.pop(k)
ks = k.split()
if ks[0] not in hdata:
hdata[ks[0]] = RecursiveDict()
unit = ks[-1][1:-1] if ks[-1].startswith("[") else ""
subkey = "_".join(ks[1:-1] if unit else ks[1:]).split(",")[0]
if subkey == "lattice_constant":
unit = "Å"
try:
hdata[ks[0]][subkey] = clean_value(v, unit.replace("angstrom", "Å"))
except ValueError:
hdata[ks[0]][subkey] = v
hdata["formula"] = host
df = df_dct["{}-X".format(host)]
rows = list( | isnull(df) | pandas.isnull |
#!/usr/bin/env python
"""Dynamic Calculation, yikes"""
import cgi
import re
import os
import datetime
import pandas as pd
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn, ssw
VARRE = re.compile(r"(AGR[0-9]{1,2})")
def get_df(equation):
"""Attempt to compute what was asked for"""
pgconn = get_dbconn("sustainablecorn")
varnames = VARRE.findall(equation)
df = read_sql(
"""
SELECT * from agronomic_data WHERE varname in %s
""",
pgconn,
params=(tuple(varnames),),
index_col=None,
)
df["value"] = pd.to_numeric(df["value"], errors="coerce")
df = pd.pivot_table(
df,
index=("uniqueid", "plotid", "year"),
values="value",
columns=("varname",),
aggfunc=lambda x: " ".join(str(v) for v in x),
)
df.eval("calc = %s" % (equation,), inplace=True)
df.sort_values(by="calc", inplace=True)
df.reset_index(inplace=True)
df = df[pd.notnull(df["calc"])]
return df
def main():
"""Go Main"""
form = cgi.FieldStorage()
equation = form.getfirst("equation", "AGR33 / AGR4").upper()
fmt = form.getfirst("fmt", "html")
df = get_df(equation)
if fmt == "excel":
ssw("Content-type: application/octet-stream\n")
ssw(
("Content-Disposition: attachment; filename=cscap_%s.xlsx\n\n")
% (datetime.datetime.now().strftime("%Y%m%d%H%M"),)
)
writer = | pd.ExcelWriter("/tmp/ss.xlsx") | pandas.ExcelWriter |
import pandas as pd
import time
def patient(rdb):
""" Returns list of patients """
patients = """SELECT "Name" FROM patient ORDER BY index"""
try:
patients = pd.read_sql(patients, rdb)
patients = patients["Name"].values.tolist()
except:
patients = ['Patient']
return patients
def label(rdb):
""" Returns list of parameter for linear and bar drop down """
sql = """SELECT type FROM name WHERE type IN ('Heart Rate','Heart Rate Variability SDNN', 'Resting Heart Rate',
'VO2 Max','Walking Heart Rate Average')"""
sql2 = """SELECT type FROM name WHERE type NOT IN ('Heart Rate','Heart Rate Variability SDNN',
'Resting Heart Rate','VO2 Max','Walking Heart Rate Average')"""
try:
df, df2 = pd.read_sql(sql, rdb), pd.read_sql(sql2, rdb)
label_linear, label_bar = df["type"].values.tolist(), df2["type"].values.tolist()
except:
label_linear, label_bar = [], []
return label_linear, label_bar
def month(rdb, patient):
""" Returns list of months in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date",'YYYY-MM') AS month
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY month""".format(patient)
try:
df = pd.read_sql(sql, rdb)
months = df['month'].to_list()
except:
months = []
return months
def week(rdb, patient):
""" Returns list of weeks in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date", 'IYYY/IW') AS week
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY week """.format(patient)
try:
df = pd.read_sql(sql, rdb)
weeks = df['week'].to_list()
except:
weeks = []
return weeks
def min_max_date(rdb, patient):
""" Returns min and max date for selected patient """
sql = """SELECT min_date,max_date FROM patient WHERE "Name"='{}'""".format(patient)
try:
df = pd.read_sql(sql, rdb)
min_date, max_date = df['min_date'].iloc[0].date(), df['max_date'].iloc[0].date()
except:
min_date, max_date = '', ''
return min_date, max_date
def age_sex(rdb, patient):
""" Returns age and gender for selected patient"""
sql = """SELECT "Age","Sex" from patient where "Name"='{}' """.format(patient)
try:
df = pd.read_sql(sql, rdb)
age, sex = df['Age'][0], df['Sex'][0]
except:
age, sex = '', ''
return age, sex
def classification_ecg(rdb, patient):
""" Returns ecg classification for patient information card """
sql = """SELECT "Classification",count(*) FROM ecg WHERE "Patient"='{}' GROUP BY "Classification" """.format(patient)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def number_of_days_more_6(rdb, patient):
""" Returns number of days the patient had the Apple Watch on their hand for more than 6 hours"""
sql = """SELECT count (*)
FROM (SELECT "Date"::date
FROM applewatch_categorical
WHERE "Name" = '{}'
AND "type" = 'Apple Stand Hour'
GROUP BY "Date"::date
HAVING count("Date"::date) > 6) days """.format(patient)
try:
df = pd.read_sql(sql, rdb)
df = df.iloc[0]['count']
except:
df = '0'
return df
def card(rdb, patient, group, date, value):
""" Returns DataFrame with resting, working, mean hear rate, step count, exercise time, activity for the cards """
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM') """
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date", 'Day')) """
group_by = "DOW"
else:
to_char = """ "Date"::date """
group_by = "date"
value = date
sql = """SELECT {0} AS {3},type,
CASE
WHEN type in ('Active Energy Burned','Step Count','Apple Exercise Time') THEN SUM("Value")
WHEN type in ('Heart Rate','Walking Heart Rate Average','Resting Heart Rate') THEN AVG("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND type in ('Active Energy Burned','Step Count','Apple Exercise Time','Heart Rate',
'Walking Heart Rate Average','Resting Heart Rate')
AND {0}='{2}'
GROUP BY {3},type""".format(to_char, patient, value, group_by)
try:
df = pd.read_sql(sql, rdb)
df["Value"] = df["Value"].round(2)
except:
df = pd.DataFrame()
return df
def table(rdb, patient, group, linear, bar):
""" Returns a table with the patient and parameters that were selected from drop downs """
if isinstance(linear, list):
linear = "'" + "','".join(linear) + "'"
else:
linear = "'" + linear + "'"
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date",'Day')) """
group_by = ' "DOW" '
else:
to_char = """ "Date"::date """
group_by = "date"
sql = """SELECT {0} as {4},"type",
CASE WHEN type IN ('Heart Rate','Heart Rate Variability SDNN','Resting Heart Rate','VO2 Max',
'Walking Heart Rate Average') THEN AVG("Value") ELSE SUM("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND "type" in ({2},'{3}')
GROUP BY {0},type
ORDER BY "type",{4} """.format(to_char, patient, linear, bar, group_by)
try:
df = pd.read_sql(sql, rdb)
if group == 'DOW':
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
df['DOW'] = pd.Categorical(df['DOW'], categories=cats, ordered=True)
df = df.sort_values('DOW')
group_by = "DOW"
df = df.pivot(index=group_by, columns='type', values='Value').reset_index()
except:
df = pd.DataFrame()
return df, group_by
def day_figure(rdb, patient, bar, date):
""" Returns DataFrame for day figure with heart rate and selected parameter and patient """
sql = """ SELECT "Date","type","Value"
FROM applewatch_numeric
WHERE "Name" = '{}'
AND "Date"::date='{}'
AND "type" in ('Heart Rate','{}')
ORDER BY "type","Date" """.format(patient, date, bar)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def trend_figure(rdb, patient, group, start_date, end_date):
""" Returns DataFrame for trend figure """
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """TRIM(TO_CHAR("Date", 'Day')) """
group_by = """ "DOW" """
else:
to_char = """ "Date"::date """
group_by = "date"
""" TRIM(TO_CHAR("Date", 'Day')) in ()"""
sql = """SELECT {0} as {1},extract('hour' from "Date") as hour,AVG("Value") AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{2}'
AND type='Heart Rate'
AND "Date" BETWEEN '{3}' AND '{4}'
GROUP BY {0},extract('hour' from "Date")
ORDER BY {1},hour """.format(to_char, group_by, patient, start_date, end_date)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
# Query data for ECG_analyse
def ecgs(rdb, patient):
""" Returns DataFrame for table_ecg"""
sql2 = """SELECT "Day","Date"::time AS Time, "Classification"
FROM ecg
WHERE "Patient"='{}'
ORDER BY "Day" """.format(patient)
try:
df = pd.read_sql(sql2, rdb)
except:
df = pd.DataFrame()
return df
def ecg_data(rdb, day, patient, time):
""" Returns DatFrame to plot ecg signal """
sql = """SELECT * FROM ECG where "Day"='{0}' and "Patient"='{1}' and "Date"::time='{2}' """.format(day, patient, time)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def table_hrv(rdb):
""" Returns DataFrame with all information about ecg ann calculate HRV feature for time and frequency domain """
sql = """ SELECT "Patient","Day","Date"::time as Time, "hrvOwn", "SDNN", "SENN", "SDSD", "pNN20", "pNN50", "lf",
"hf", "lf_hf_ratio","total_power", "vlf", "Classification" FROM ecg ORDER BY "Patient","Day" """
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def scatter_plot_ecg(rdb, x_axis, y_axis):
""" Returns DataFrame for scatter plot with patients ids/numbers and selected features """
sql = """ SELECT "Patient","{0}","{1}" FROM ecg """.format(x_axis, y_axis)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def box_plot_ecg(rdb, x_axis):
""" Returns DataFrame for box plot with patients ids/numbers and selected feature """
sql = """ SELECT "Patient","{}" FROM ecg """.format(x_axis)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
# Patient Workouts
def workout_activity_data(rdb, patient):
""" Returns the DataFrame for table and summary figure on the Workouts Tab. The table is filtered by selected
patient in drop down list """
sql = """SELECT type,duration,distance,"EnergyBurned","Start_Date"::date AS date,"Start_Date"::time AS "Start",
"End_Date"::time AS "End",TO_CHAR("Start_Date",'YYYY-MM') AS month,
TO_CHAR("Start_Date", 'IYYY/IW') as week,TO_CHAR("Start_Date", 'Day') as "DOW"
FROM workout
WHERE "Name"='{}'
ORDER BY type,"Start_Date" """.format(patient)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def workout_activity_pie_chart(rdb, patient, value, group, what):
""" Returns the DataFrame for pie plot on the Workouts Tab. The table is filtered and grouped by selected
patient,day/week/month in drop down list """
if group == 'M':
if value is not None: value = value["points"][0]["x"][:7]
to_char = """ TO_CHAR("Start_Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
if value is not None: value = value["points"][0]["x"]
to_char = """ TO_CHAR("Start_Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
if value is not None: value = value["points"][0]["x"].replace(" ", "")
to_char = """TRIM(TO_CHAR("Start_Date", 'Day')) """
group_by = """ "DOW" """
else:
if value is not None: value = str(value["points"][0]["x"])
to_char = """ "Start_Date"::date """
group_by = "date"
if value is None:
value = """SELECT {} AS {}
FROM workout
WHERE "Name"='{}'
LIMIT 1""".format(to_char, group, patient)
else:
value = "'"+value+"'"
sql = """SELECT type,"{0}",{1} as {2}
FROM workout
WHERE "Name"='{3}'
AND duration between 10 and 300
AND {1} in ({4}) """.format(what, to_char, group_by, patient, value)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def heart_rate(rdb, click, patient):
""" Returns DataFrames to plot workout figure in Workout tab"""
if click is None:
click = """SELECT "Start_Date":: date AS day
FROM workout
WHERE "Name"='{}'
LIMIT 1""".format(patient)
else:
click = "'" + str(click["points"][0]["x"]) + "'"
sql1 = """SELECT type,"Start_Date","End_Date"
FROM workout
WHERE "Name"='{}'
AND duration between 10 and 300
AND "Start_Date":: date in ({}) """.format(patient, click)
sql2 = """SELECT "Name","Date","Value"
FROM applewatch_numeric
WHERE "Name"='{}'
AND "Date":: date in ({})
AND type='Heart Rate'
order by "Date" """.format(patient, click)
try:
df1 = pd.read_sql(sql1, rdb)
df2 = pd.read_sql(sql2, rdb)
except:
df1, df2 = pd.DataFrame(), pd.DataFrame()
return df1, df2
# Comparison Tab
def activity_type(rdb):
""" Select types of workouts for drop down in Comparison tab"""
sql = """SELECT type FROM activity_type"""
try:
df = pd.read_sql(sql, rdb)
df = df['type'].to_list()
except:
df = ['empty']
return df
def plots_comparison(rdb, gr, linear, bar):
""" Returns DataFrame to update box plots, histogram, scatter plot in comparison tab depending on the drop downs """
sql = """SELECT p."{2}",an."Date"::date as date,an."type",
CASE WHEN an.type in ('Heart Rate','Heart Rate Variability SDNN','Resting Heart Rate','VO2 Max',
'Walking Heart Rate Average') THEN AVG("Value") ELSE sum("Value")
END as "Value"
FROM applewatch_numeric as an
LEFT JOIN patient as p
ON p."Name" = an."Name"
WHERE an.type in ('{0}','{1}')
GROUP BY p."{2}",(an."Date"::date),an.type""".format(linear, bar, gr)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def linear_plot(rdb, gr, linear, bar):
""" Returns DataFrame to update linear plot in comparison tab depending on the drop downs """
sql = """ SELECT p."{2}",date_trunc('week', an."Date") AS week,an."type",
CASE WHEN type in ('Heart Rate','Heart Rate Variability SDNN','Resting Heart Rate','VO2 Max',
'Walking Heart Rate Average') THEN AVG("Value") ELSE sum("Value")
END AS "Value"
FROM applewatch_numeric as an
LEFT JOIN patient as p
ON p."Name" = an."Name"
WHERE an.type in ('{0}','{1}')
GROUP BY p."{2}",date_trunc('week', an."Date"),an.type
ORDER BY week """.format(linear, bar, gr)
try:
df = pd.read_sql(sql, rdb)
if gr == 'Age':
df[gr] = df[gr].astype(str)
df_linear = df[df['type'] == linear]
df_bar = df[df['type'] == bar]
except:
df_linear, df_bar = pd.DataFrame(), pd.DataFrame()
return df_linear, df_bar
def workout_hr_comparison(rdb, gr, type):
""" Returns DataFrame to compare heart rate during workouts in comparison tab """
sql = """SELECT p."{0}",w."HR_average"
FROM workout as w
LEFT JOIN patient as p
ON p."Name" = w."Name"
WHERE w."duration" > 10 AND w."duration" < 300
AND "HR_average" !='0'
AND w.type = '{1}'
ORDER BY p."{0}",w."Start_Date" """.format(gr, type)
sql2 = """SELECT p."{0}","Start_Date"::date as date,AVG(w."HR_average") as "HR_average"
FROM workout as w
LEFT JOIN patient as p
ON p."Name" = w."Name"
WHERE w."duration" > 10 and w."duration" < 300
AND "HR_average" !='0'
AND w.type = '{1}'
GROUP BY p."{0}",date
ORDER BY p."{0}",date""".format(gr, type)
try:
df_box = | pd.read_sql(sql, rdb) | pandas.read_sql |
import sys
from typing import List
import pandas as pd
# Note: must mark "common" as "Sources Root" in PyCharm to have visibility
from common_paths import *
from utilities_cbc import read_excel_or_csv_path, debug_write_raw_text, circle_abbrev_from_path
from text_extractor import TextExtractorFactory
from input_files_context import InputFilesContext
from text_transform import pre_process_line, secondary_species_processing
from local_translation_context import LocalTranslationContext
from taxonomy import Taxonomy
# from nlp_context import NLPContext
from parameters import Parameters
from taxonomy_token_identify import TaxonomyTokenIdentify
from spacy.tokens import Span
from spacy.util import filter_spans
from spacy_extra import filter_to_possibles, \
write_visualization
from write_final_checklist import write_local_checklist_with_group, \
write_final_checklist_spreadsheet
from write_categorized_lines import write_categorized_lines_spreadsheet
from write_basic_spreadsheet import write_basic_spreadsheet
# 🧭 U+1F9ED Compass Emoji (use for ranged)
# 🎂 U+1F382 Birthday Cake (use for Adult/Immature)
sys.path.append('common')
sys.path.append('textextractor')
sys.path.append('taxonomy')
emoji_compass = '\U0001f9ed'
emoji_birthday_cake = '\U0001f382'
def load_rarities_text(rarities_path: Path) -> List[str]:
# Check for a rarities list
rare_species = []
if rarities_path.exists():
with open(rarities_path, 'r') as fp:
lines = fp.read()
rare_species = lines.split('\n')
return rare_species
def process_rarities(checklist: pd.DataFrame, rare_species: List[str]) -> pd.DataFrame:
# Mark rarities
if rare_species:
rare_idxs = checklist.index[checklist['CommonName'].isin(rare_species)]
if len(rare_idxs):
checklist.at[rare_idxs, 'Rare'] = 'X'
return checklist
def process_annotations(checklist: pd.DataFrame, annotations_path: Path) -> pd.DataFrame:
# The set operations below are because not all annotations files will have all columns
annotations = load_annotations(annotations_path)
if not annotations.empty:
# rare_mask = [xs == 'X' for xs in annotations['Rare'].values]
# rare_species = list(annotations[rare_mask].CommonName.values)
# if rare_species:
# rare_idxs = local_checklist.index[local_checklist['CommonName'].isin(rare_species)]
# if len(rare_idxs):
# local_checklist.at[rare_idxs, 'Rare'] = 'X'
emd_cols = {'Easy', 'Marginal', 'Difficult'} & set(annotations.columns)
if any([[xs == 'X' for xs in annotations[col].values] for col in
emd_cols]):
checklist['D'] = ''
annotations_effort = {}
for ix, row in annotations.iterrows():
annotations_effort[row['CommonName']] = row['Difficulty']
difficulty = [annotations_effort.get(cn, '') for cn in checklist.CommonName]
checklist['Difficulty'] = difficulty
# Add new annotation columns to local_checklist
adim_cols = {'Adult', 'Immature', 'W-morph',
'B-Morph', 'CountSpecial'} & set(annotations.columns)
for col in adim_cols:
if any([xs == 'X' for xs in annotations[col].values]):
checklist[col] = ''
checklist['Ad'] = ''
checklist['Im'] = ''
checklist['CountSpecial'] = ''
rare_adim_cols = {'Rare', 'Adult', 'Immature',
'W-morph', 'B-Morph', 'CountSpecial'} & set(annotations.columns)
for col in rare_adim_cols:
mask = [xs == 'X' for xs in annotations[col].values]
related_species = list(annotations[mask].CommonName.values)
if related_species:
species_idxs = checklist.index[
checklist['CommonName'].isin(related_species)]
if len(species_idxs):
checklist.at[species_idxs, col] = 'X'
# Overload the Difficulty field with the ranging field
if 'Ranging' in annotations.columns:
mask = [xs == 'X' for xs in annotations['Ranging'].values]
related_species = list(annotations[mask].CommonName.values)
if related_species:
species_idxs = checklist.index[
checklist['CommonName'].isin(related_species)]
if len(species_idxs):
checklist.at[species_idxs, 'D'] = emoji_compass
return checklist
def process_annotations_or_rarities(checklist: pd.DataFrame,
checklist_path: Path,
circle_prefix: str) -> pd.DataFrame:
"""
Look for Annotations or Rarities files and mark the 'Rare' column in checklist
with an 'X'
Annotations.xlsx must have these columns:
Rarities.xlsx (or CSV) requires 'CommonName' and 'Rare' columns
Rarities.txt is just a text list of rare species
:param circle_prefix:
:param checklist:
:param checklist_path: full path for checklist. Used to construct names for inputs
:return: checklist with 'Rare' column set to 'X' if species is rare
"""
# Process annotations. The XXXX-LocalAnnotations.xlsx file will be preferred over
# the rarities list if it exists
annotations_dir = checklist_path.parent
annotations_path = annotations_dir / f'{circle_prefix}Annotations.xlsx'
print(f'Annotations path: {annotations_path}')
if annotations_path.exists():
return process_annotations(checklist, annotations_path)
for ext in ['xlsx', 'csv', 'txt']:
rarities_path = annotations_dir / f'{circle_prefix}Rarities.{ext}'
if not rarities_path.exists():
continue
if ext == 'txt':
rare_species = load_rarities_text(rarities_path)
else:
rarities_df = read_excel_or_csv_path(rarities_path)
rare_species = list(rarities_df[rarities_df.Rare == 'X'].CommonName.values)
checklist = process_rarities(checklist, rare_species)
break
return checklist
def process_exceptions(candidate_names: List[str], checklist_path: Path,
circle_prefix: str) -> List[str]:
# checklist_path = inputs_parse_path / 'CAPA-checklist.xlsx' # only care about path and prefix
exceptions_dir = checklist_path.parent
exceptions_path = exceptions_dir / f'{circle_prefix}Exceptions.xlsx'
print(f'Exceptions path: {exceptions_path}')
if not exceptions_path.exists():
return candidate_names
print(f'Exceptions: {exceptions_path}')
exceptions_df = read_excel_or_csv_path(exceptions_path)
if exceptions_df.empty:
return candidate_names
mask_add = exceptions_df.Add == 'X'
mask_sub = exceptions_df.Subtract == 'X'
additions = set(exceptions_df[mask_add].CommonName.values)
subtractions = set(exceptions_df[mask_sub].CommonName.values)
addstr = ', '.join(additions)
subst = ', '.join(subtractions)
print(f'Additions: {addstr}\nSubtractions: {subst}')
local_names = list((set(candidate_names) | additions) - subtractions)
return local_names
def build_full_tally_sheet(double_translated,
fpath: Path,
taxonomy: Taxonomy,
parameters: Parameters,
circle_prefix: str):
candidate_names = [x for x, y in double_translated]
local_names = process_exceptions(candidate_names, fpath, circle_prefix)
# if issf etc in list, then base species must be also
issfs = taxonomy.filter_issf(local_names)
for cn in issfs:
base_species = taxonomy.report_as(cn)
if base_species:
local_names.append(base_species)
entries = []
for local_name in local_names:
# common_name, taxon_order, species_group, NACC_SORT_ORDER
record = taxonomy.find_local_name_row(local_name)
if record is not None:
# e.g. ('White-throated Sparrow', 31943, 'New World Sparrows', 1848.0)
entry = (record.comName, record.TAXON_ORDER, record.SPECIES_GROUP,
record.NACC_SORT_ORDER, record.ABA_SORT_ORDER, '', 0) # append 'Rare', 'Total'
entries.append(entry)
df = pd.DataFrame(entries, columns=['CommonName', 'TaxonOrder', 'Group',
'NACC_SORT_ORDER', 'ABA_SORT_ORDER', 'Rare', 'Total'])
# Re-order
cols = ['Group', 'CommonName', 'Rare', 'Total', 'TaxonOrder',
'NACC_SORT_ORDER', 'ABA_SORT_ORDER']
local_checklist = df[cols]
local_checklist.sort_values(by='TaxonOrder', inplace=True)
# local_checklist.shape
# double_translated may have duplicates
local_checklist = local_checklist[
~local_checklist.duplicated(subset=['CommonName'], keep='first')]
local_checklist = process_annotations_or_rarities(local_checklist, fpath, circle_prefix)
# Re-order columns
preferred_order = ['Group', 'CommonName', 'Rare', 'D', 'Total', 'Ad', 'Im',
'TaxonOrder', 'NACC_SORT_ORDER', 'ABA_SORT_ORDER', 'Difficulty',
'Adult', 'Immature', 'W-morph', 'B-Morph', 'CountSpecial']
newcols = [col for col in preferred_order if col in local_checklist.columns]
local_checklist = local_checklist[newcols]
# Write out full tally sheet
# circle_code = circle_prefix[0:4]
# double_path = outputs_path / f'{circle_code}-DoubleX.xlsx'
# write_local_checklist_with_group(local_checklist, double_path, parameters.parameters)
return local_checklist
def strip_off_scientific_names(text_list: List[str], taxonomy: Taxonomy) -> List[str]:
# The CAMP-2020 checklist has <Common Name> <Scientific Name>
# Assume all scientific names are two words and drop
stripped_text_list = []
for line in text_list:
line = line.strip()
# e.g. line = 'California Quail Callipepla californica'
words = line.split(' ')
if len(words) > 2:
sci_name = ' '.join(words[-2:]).lower()
row = taxonomy.find_scientific_name_row(sci_name)
if row is not None:
line = ' '.join(words[:-2]) #.lower()
stripped_text_list.append(line)
return stripped_text_list
def process_checklist(checklist_path: Path,
output_dir: Path,
taxonomy: Taxonomy,
local_translation_context: LocalTranslationContext,
parameters: Parameters,
circle_prefix: str
):
"""
- Extract text
"""
# Use circle_abbrev as a prefix to distinguish output for multiple checklists
# Extract text from file and do basic text preprocessing
text_extractor = TextExtractorFactory().create(checklist_path)
text = text_extractor.extract()
debug_write_raw_text(text, checklist_path, debug_path)
text_list = sorted(list(set(text.split('\n'))))
# skip tertiary_transformation() for now
text_list = [secondary_species_processing(pre_process_line(line)) for line in text_list]
# text_list = [tertiary_transformation(secondary_species_processing(pre_process_line(line))) \
# for line in text_list]
text_list = strip_off_scientific_names(text_list, taxonomy)
# print(text_list)
text_list = sorted(list(set(text_list)))
# Processing 1 checklist here
tti = TaxonomyTokenIdentify(taxonomy, cache_path)
# use text_list from above
text_list_lower = [x.lower() for x in text_list]
possibles = filter_to_possibles(tti, text_list_lower)
print(f'Possible species lines: {len(possibles)} (based on word intersections)')
# Double translate
# print('Doing double translation') # Can take a while
translated = []
for line in text_list_lower: # was: possibles
txline = local_translation_context.apply_translations(line.lower(), True)
translated.append(txline)
double_translated = []
for line, _ in translated:
txline2 = local_translation_context.apply_translations(line.lower(), True)
double_translated.append(txline2)
# Write Spacy visualization
write_visualization(list(set([x[0] for x in double_translated])), checklist_path, debug_path,
taxonomy,
tti)
# -------
local_checklist = build_full_tally_sheet(double_translated,
checklist_path, taxonomy,
parameters, circle_prefix)
cols_to_hide = ['Rare', 'Adult', 'Immature', 'W-morph', 'B-Morph', 'Difficulty', 'CountSpecial']
# The first checklist we write has a single column for group and is
# used as the template for the Service-ProcessEBird phase
# don't use circle_prefix here
circle_abbrev = circle_abbrev_from_path(checklist_path)
single_path = output_dir / f'{circle_abbrev}-Single.xlsx'
write_final_checklist_spreadsheet(local_checklist,
single_path,
parameters.parameters,
additional_sheets=None,
cols_to_hide=cols_to_hide,
cols_to_highlight=['Total'])
# Write out an empty annotations file if none exists
annotations_path = inputs_parse_path / f'{circle_prefix}Annotations.xlsx'
if not annotations_path.exists():
print(f'Creating empty annotations file: {annotations_path.as_posix()}')
annotations = local_checklist.copy()
for col in ['Rare', 'Adult', 'Immature', 'Easy', 'Marginal', 'Difficult']:
annotations[col] = ''
write_final_checklist_spreadsheet(annotations,
annotations_path,
parameters.parameters,
additional_sheets=None,
cols_to_hide=None,
cols_to_highlight=None)
exceptions_path = inputs_parse_path / f'{circle_prefix}Exceptions.xlsx'
if not exceptions_path.exists():
print(f'Creating empty exceptions file: {exceptions_path.as_posix()}')
empty_exceptions = pd.DataFrame(
{'CommonName': '', 'Add': '', 'Subtract': '', 'Comments': ''},
index=range(20)) # Adding rows to a table is a pain in Excel, give some room
write_basic_spreadsheet(empty_exceptions, exceptions_path,
column_widths={'CommonName': 30, 'Add': 11,
'Subtract': 11, 'Comments': 50},
columns_to_center=['Add', 'Subtract'])
double_path = output_dir / f'{circle_abbrev}-Double.xlsx'
write_local_checklist_with_group(local_checklist, double_path, parameters.parameters)
ground_truths_df = ground_truth_for_code(circle_abbrev)
if not ground_truths_df.empty:
_ = check_against_ground_truth(local_checklist, ground_truths_df)
categorized_lines = categorize_lines(circle_abbrev, text_list,
local_translation_context, tti)
write_categorized_lines_spreadsheet(categorized_lines,
debug_path / f'{circle_abbrev}-categorized_lines.xlsx',
col_widths=[40, 40, 11, 16],
col_align=['left', 'left', 'center', 'center'],
sheet_name='Categorized Lines',
)
return text_list, double_translated, local_checklist
# ------------------------------------------------------------------------------------------
def process_checklists(checklists_path: Path,
output_dir: Path,
taxonomy: Taxonomy,
local_translation_context: LocalTranslationContext,
parameters: Parameters,
circle_prefix: str # e.g. 'CACR-2020-'
):
# Return parameters useful when debugging single list
# parsable_filetypes = TextExtractorFactory().formats()
ifc = InputFilesContext(checklists_path, ['.xlsx', '.csv', '.pdf'])
checklist_paths = ifc.allowable_files(f'{circle_prefix}checklist')
print(f'Path: {checklists_path}')
# - Extract text from tally sheet (checklist)
# - Make LocalTranslationContext and TaxonomyTokenIdentify objects
# - Do a double translation (should be idempotent)
text_list = []
double_translated = []
local_checklist = pd.DataFrame()
if len(checklist_paths) == 0:
print(f'No valid checklists found in: {checklists_path}')
return None, None, None
for fpath in checklist_paths:
print(f'Name: {fpath.stem}')
text_list, double_translated, local_checklist = \
process_checklist(fpath, output_dir, taxonomy,
local_translation_context,
parameters,
circle_prefix)
print('--------------------------------------------------------')
return text_list, double_translated, local_checklist
# ------------------------------------------------------------------------------------------
def ground_truths():
ground_truths_in_path = base_path / 'ground_truths.xlsx'
ground_truths_in = read_excel_or_csv_path(ground_truths_in_path)
return ground_truths_in
def ground_truth_for_code(circle_code: str) -> pd.DataFrame:
# circle_code is e.g. 'CACR-1'
ground_truths_in = ground_truths()
try:
gt_cols = ['name', 'Category', circle_code]
mask = (ground_truths_in[circle_code] == 1)
ground_truths_subset = ground_truths_in[mask][gt_cols].reset_index(
drop=True)
except Exception as ee:
print(f'{circle_code} not found in ground truths: {ee}')
return | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from pkg_resources import resource_stream
from tqdm import tqdm
from io import BytesIO
from pytsp.util.distances import get_distance
DEFAULT_SEED: int = 42
DEFAULT_NUM_CITIES: int = 10
DEFAULT_ALLOW_REPEATING_CITIES: bool = False
DEFAULT_VERBOSE: bool = True
class DataGenerator(object):
MIN_NUM_CITIES: int = 5
def __init__(
self,
num_cities: int = DEFAULT_NUM_CITIES,
seed: int = DEFAULT_SEED,
allow_repeating_cities: bool = DEFAULT_ALLOW_REPEATING_CITIES,
verbose: bool = DEFAULT_VERBOSE,
):
self.all_cities = self.__get_all_cities()
self.__set_num_cities(num_cities)
self.generate_new_cities_selection(
num_cities, seed, allow_repeating_cities, verbose
)
@property
def num_cities(self):
return self._num_cities
def __set_num_cities(self, value: int):
if value is None:
raise ValueError("Number of cities can't be None")
if value < self.MIN_NUM_CITIES:
raise ValueError(
f"Number of cities can't be less than {self.MIN_NUM_CITIES}"
)
self._num_cities = value
@property
def selected_cities(self):
return self._selected_cities
@property
def distances(self):
return self._distances
def __get_all_cities(self):
starbucks_data = BytesIO(
resource_stream("pytsp", "data/starbucks_us_locations.csv").read()
)
columns = ["longitude", "latitude", "id", "address"]
cities = pd.read_csv(starbucks_data, names=columns, header=None)
cities.dropna(inplace=True)
print(cities.columns)
print(len(cities))
cities[["state", "city"]] = cities.id.str.split("-", expand=True)[
[1, 2]
]
cities["city"] = cities["city"].str[:-7]
cities["city"] = cities["city"].str.replace(r"\[.*", "", regex=True)
cities["state_city"] = cities["state"] + "-" + cities["city"]
cities["coordinates"] = list(
zip(cities["latitude"], cities["longitude"])
)
return cities
def __get_selected_cities(
self,
seed: int = DEFAULT_SEED,
allow_repeating_cities: bool = DEFAULT_ALLOW_REPEATING_CITIES,
):
exists_more_distinct_cities_than_selected = (
len(self.all_cities["state_city"].unique()) > self.num_cities
)
if (
not exists_more_distinct_cities_than_selected
and not allow_repeating_cities
):
raise ValueError("Available distinct cities are less than selected")
city_columns = self.all_cities.columns.tolist()
selected_cities = pd.DataFrame(columns=city_columns)
random_state = seed
for _ in range(self.num_cities):
random_city = self.all_cities.sample(random_state=random_state)
if not allow_repeating_cities:
while (
random_city["state_city"]
.isin(selected_cities["state_city"])
.any()
):
random_state += 1
random_city = self.all_cities.sample(
random_state=random_state
)
random_state += 1
selected_cities = | pd.concat([selected_cities, random_city]) | pandas.concat |
import pandas as pd
import numpy as np
import feather as f
import os, sys, re
import time, datetime
import os, sys, argparse
import cv2
id_pattern_string = '.*(BL|Sal|Met).*(B6|C3)-(WO?#?[0-9]+).*'
id_pattern_sub = '\\2-\\3-\\1'
def get_time_for_cover(filename, default='10:00:00'):
try:
with open(filename, 'r') as fp:
ret_val = [line for line in fp if 'cage cover' in line]
ret_val = re.sub('.*([0-9]{2}:[0-9]{2}:[0-9]{2}).*','\\1',ret_val[0])[:-1]
return ret_val
except:
return default
def export_animal(args):
# Testing pattern for any downstream matching
test_animal_id = re.sub(id_pattern_string,id_pattern_sub,args.animal_id)
# List files and folder for results
if args.data_folder is not None:
if os.path.isdir(args.data_folder):
data_folder = args.data_folder
animal_pattern = os.path.basename(args.animal_id)
else:
data_folder = os.path.dirname(args.data_folder)
animal_pattern = os.path.basename(args.data_folder)
data_files = os.listdir(data_folder)
data_files = [x for x in data_files if re.search(animal_pattern, x)]
file_df = pd.DataFrame({'chunk_idx': [int(re.sub('.*_([0-9]+)\.csv','\\1',x)) for x in data_files], 'filename': [data_folder + '/' + x for x in data_files]}).sort_values(by=['chunk_idx']).reset_index(drop=True)
else:
file_df = pd.DataFrame({'chunk_idx': [1], 'filename': [args.data_file]})
# Do the same for eeg calls
annotations_missing = False
if args.eeg_folder is not None:
if os.path.isdir(args.eeg_folder):
eeg_folder = args.eeg_folder
else:
eeg_folder = os.path.dirname(args.eeg_folder)
eeg_files = os.listdir(eeg_folder)
eeg_ids = [re.sub(id_pattern_string,id_pattern_sub,x) for x in eeg_files]
match_ids = np.where(eeg_ids == np.array(test_animal_id))[0]
if len(match_ids)==1:
eeg_file = eeg_folder + eeg_files[match_ids[0]]
else:
annotations_missing = True
elif args.eeg_file is not None:
eeg_file = arg.eeg_file
else:
annotations_missing = True
# Time sync file (alignment to eeg/emg annotations)
if args.timesyncfile is not None:
sync_frames = pd.read_csv(args.timesyncfile)
sync_tests = [re.sub(r'.*(BL|Sal|Met).*(B6|C3)-(WO?#[0-9]+).*',id_pattern_sub,x) for x in sync_frames['Video']]
match_ids = np.where(sync_tests == np.array(test_animal_id))[0]
if len(match_ids)==1:
sync_skip = sync_frames['TimeSyncFrame'][match_ids[0]]
else:
sync_skip = 0
# Timestamp file
if args.timestamp_folder is not None:
if os.path.isdir(args.timestamp_folder):
timestamp_folder = args.timestamp_folder
else:
timestamp_folder = os.path.dirname(args.timestamp_folder)
timestamp_files = os.listdir(timestamp_folder)
# Only look at _timestamps.txt
timestamp_files = [x for x in timestamp_files if re.search('_timestamps.txt', x)]
timestamp_ids = [re.sub(id_pattern_string,id_pattern_sub,x) for x in timestamp_files]
match_ids = np.where(timestamp_ids == np.array(test_animal_id))[0]
if len(match_ids)==1:
timestamp_file = timestamp_folder + timestamp_files[match_ids[0]]
else:
print('Timestamp file not linked correctly, found ' + str(len(match_ids)) + ' matching timestamp files. Use --timestamp_file to specify.')
exit(0)
else:
timestamp_file = args.timestamp_file
# Begin reading in the data
# Time information
timestamps = pd.read_csv(timestamp_file, header=None, names=['times'])
if args.fragmented_clip_length > 0:
# Assumes keyframe rate are RPi's default of 60 frames/keyframe
to_remove = np.round(args.fragmented_clip_length*np.arange(len(timestamps)/args.fragmented_clip_length)/60)*60
timestamps = timestamps.drop(to_remove).reset_index(drop=True)
# Import Annotation Data
# Insert defaults if annotations were missing
if annotations_missing:
try:
# Attempt to parse the "starting timestamps" file for the notes
eeg_record_date = datetime.datetime.strptime(pd.read_table(re.sub('timestamps.txt','StartingTimestamp.txt',timestamp_file), skiprows=3, nrows=1, header=None, sep=': ')[1][0], '%a %Y-%m-%d %H:%M:%S %Z')
# Align to 10am
start_time = datetime.datetime.strptime(datetime.datetime.strftime(eeg_record_date, '%Y-%m-%d ') + '10:00:00', '%Y-%m-%d %H:%M:%S')
except:
# Insert dummy data of Jan 1, 1970 10AM
eeg_record_date = '1/1/1970'
start_time = datetime.datetime(1970, 1, 1, 10, 0)
# Populate the data as best as possible
eeg_data = pd.DataFrame({'time_bin':[start_time + datetime.timedelta(seconds=int(i*10)) for i in np.arange(np.floor(timestamps['times'].values[-1]-timestamps['times'].values[0]))], 'Sleep Stage':'NA'})
else:
# StartingTimestamps is more reliable than eeg file...
try:
eeg_record_date = datetime.datetime.strptime(pd.read_table(re.sub('timestamps.txt','StartingTimestamp.txt',timestamp_file), skiprows=3, nrows=1, header=None, sep=': ')[1][0], '%a %Y-%m-%d %H:%M:%S %Z')
# Align to note in starting timestamp file (default to 10AM)
start_string = get_time_for_cover(re.sub('timestamps.txt','StartingTimestamp.txt',timestamp_file))
start_time = datetime.datetime.strptime(datetime.datetime.strftime(eeg_record_date, '%Y-%m-%d ') + start_string, '%Y-%m-%d %H:%M:%S')
found_start = True
# Fall back to eeg records
except:
found_start = False
if not found_start:
eeg_record_date = | pd.read_csv(eeg_file, header=None, sep='\t', skiprows=3, nrows=1) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
| assert_frame_equal(sorted_df, expected) | pandas.util.testing.assert_frame_equal |
"""
GUI code modified based on https://github.com/miili/StreamPick
For earthquake PKiKP coda quality evaluation and stack
"""
import os
import pickle
import pandas as pd
import numpy as np
# GUI import
import PyQt5
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sys
import signal
import scipy
import gpar
from gpar.util import util
from itertools import cycle
#figure plot import
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.transforms import offset_copy
from matplotlib.widgets import RectangleSelector
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import shiftgrid
# from mpl_toolkits.axesgrid1 import make_axes_locatable
#obspy import
from obspy.taup import TauPyModel
import obspy
from obspy.core.trace import Trace
from obspy.core.stream import Stream
from obspy.core import read
from obspy.core import AttribDict
signal.signal(signal.SIGINT, signal.SIG_DFL)
# color = list(mcolors.cnames.values())
color = ['red', 'blue', 'green','yellow','cyan','magenta','purple']
#class for event first evaluation
class glanceEQ(QtWidgets.QMainWindow):
def __init__(self, array=None, parent=None, ap=None):
if ap is None:
self.qApp = QtWidgets.QApplication(sys.argv)
else:
self.qApp = ap
self.KeepGoing = False
if isinstance(array, str):
ar = util.loadArray(array)
elif isinstance(array, gpar.arrayProcess.Array):
ar = array
else:
msg = 'Define Array instance = gpar.arrayPropocess.Array() or a path to a pickle file'
raise ValueError(msg)
self.array = ar
self.eve_type = ['A','B','C','D']
self._shortcuts = {'eve_next': 'n',
'eve_prev': 'p',
'trim_apply': 'w',
'gain_up': 'u',
'gain_down': 'd',
'strip': 's',
'A':'a',
'B':'b',
'C':'c',
'D':'d'}
self._plt_drag = None
# init events in the array
self._events = ar.events #defines list self._events
self.savefile = None
self._initEqList()
self._stripDF = pd.DataFrame()
self._badDF = pd.DataFrame()
self._btype = 'beam'
self._method = 'all'
self.trinWin = [{'name':'N200-C200','noise':200.0,'coda':200.0,
'stime':400.0,'etime':1800,
'smooth':4.0,'model':'ak135'}]
self._current_win = None
self._current_strip = False
self._eventCycle = cycle(self._eqlist)
self._eventInfo(next(self._eventCycle))
QMainWindow.__init__(self)
self.setupUI()
def setupUI(self):
self.main_widget = QtWidgets.QWidget(self)
self._initMenu()
self._createStatusBar()
self._initPlots()
l = QVBoxLayout(self.main_widget)
l.addLayout(self.btnbar)
l.addLayout(self.btnbar2)
l.addWidget(self.canvas)
self.setCentralWidget(self.main_widget)
self.setGeometry(300, 300, 1200, 800)
self.setWindowTitle('Array Analysis: %s'%self.array.name)
self.show()
def _killLayout():
pass
def _initEqList(self):
self._eqlist = []
for _eve in self._events:
self._eqlist.append(_eve.ID)
self._eqlist.sort()
def _initPlots(self):
self.fig = Figure(facecolor='.86',dpi=100, frameon=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocusPolicy(PyQt5.QtCore.Qt.StrongFocus)
self._drawFig()
# connect the events
self.fig.canvas.mpl_connect('scroll_event', self._pltOnScroll)
self.fig.canvas.mpl_connect('motion_notify_event', self._pltOnDrag)
self.fig.canvas.mpl_connect('button_release_event', self._pltOnButtonRelease)
def _initMenu(self):
# Next and Prev Earthquake
nxt = QtWidgets.QPushButton('Next >>',
shortcut=self._shortcuts['eve_next'], parent=self.main_widget)
nxt.clicked.connect(self._pltNextEvent)
nxt.setToolTip('shortcut <b>n</d>')
nxt.setMaximumWidth(150)
prv = QPushButton('Prev >>',
shortcut=self._shortcuts['eve_prev'], parent=self.main_widget)
prv.clicked.connect(self._pltPrevEvent)
prv.setToolTip('shortcut <b>p</d>')
prv.setMaximumWidth(150)
# Earthquake drop-down
self.evecb = QComboBox(self)
for eve in self._eqlist:
self.evecb.addItem(eve)
self.evecb.activated.connect(self._pltEvent)
self.evecb.setMaximumWidth(1000)
self.evecb.setMinimumWidth(80)
# coda strip button
self.codabtn = QtWidgets.QPushButton('Strip',
shortcut=self._shortcuts['strip'],parent=self.main_widget)
self.codabtn.setToolTip('shortcut <b>s</b>')
self.codabtn.clicked.connect(self._appStrip)
self.codacb = QComboBox(self)
for med in ['all', 'coda','twoline']:
self.codacb.addItem(med)
self.codacb.activated.connect(self._selectMethod)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.wincb = QComboBox(self)
self.wincb.activated.connect(self._changeStrip)
self._updateWindow()
# edit/delete coda selected window
winEdit = QtWidgets.QPushButton('Coda Window')
winEdit.resize(winEdit.sizeHint())
winEdit.clicked.connect(self._editTimeWindow)
winDelt = QtWidgets.QPushButton('Delete')
winDelt.resize(winDelt.sizeHint())
winDelt.clicked.connect(self._deleteWin)
# Coda level
_radbtn = []
for _o in self.eve_type:
_radbtn.append(QRadioButton(_o.upper(), shortcut=self._shortcuts[_o.upper()]))
_radbtn[-1].setToolTip('Level: '+_o)
self.levelGrp = QButtonGroup()
self.levelGrp.setExclusive(True)
levelbtn = QHBoxLayout()
for _i, _btn in enumerate(_radbtn):
self.levelGrp.addButton(_btn, _i)
levelbtn.addWidget(_btn)
# plot slide beam figure button
self.sbcb = QComboBox(self)
for btype in ['beam', 'slide', 'vespetrum','strip']:
self.sbcb.addItem(btype)
self.sbcb.activated.connect(self._updatePlot)
self.vepcb = QComboBox(self)
for scale in ['log10', 'log','sqrt','beam']:
self.vepcb.addItem(scale)
self.vepcb.activated.connect(self._updatePlot )
self.vepcb.setEnabled(False)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.ampmin = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=1)
self.ampmax = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=3)
self.ampmin.valueChanged.connect(self._updatePlot)
self.ampmax.valueChanged.connect(self._updatePlot)
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
# self._initAmp()
self.sbcb.activated.connect(self._activeAmp)
self.ttbtn = QtWidgets.QPushButton('Phases', parent=self.main_widget)
self.ttbtn.setCheckable(True)
self.ttbtn.clicked.connect(self._updatePlot)
# Arrange buttons
vline = QFrame()
vline.setFrameStyle(QFrame.VLine | QFrame.Raised)
self.btnbar = QHBoxLayout()
self.btnbar.addWidget(prv)
self.btnbar.addWidget(nxt)
self.btnbar.addWidget(QLabel('Event'))
self.btnbar.addWidget(self.evecb)
##
self.btnbar.addWidget(vline)
self.btnbar.addWidget(self.codabtn)
self.btnbar.addWidget(self.codacb)
self.btnbar.addWidget(self.wincb)
self.btnbar.addWidget(winEdit)
self.btnbar.addWidget(winDelt)
self.btnbar.addStretch(1)
self.btnbar2 = QHBoxLayout()
self.btnbar2.addWidget(QLabel('Level: '))
self.btnbar2.addLayout(levelbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('TYPE'))
self.btnbar2.addWidget(self.sbcb)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('Scale'))
self.btnbar2.addWidget(self.vepcb)
self.btnbar2.addWidget(QLabel('AMP'))
self.btnbar2.addWidget(self.ampmin)
self.btnbar2.addWidget(self.ampmax)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.ttbtn)
self.btnbar2.addStretch(1)
#Menubar
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save', self._saveFile)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save as', self._saveFileFormat)
fileMenu.addSeparator()
fileMenu.addAction(QIcon().fromTheme('document-open'),
'Load array', self._openArray)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-open'),
'Load Strip Pickle File', self._openFile)
fileMenu.addSeparator()
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save Plot', self._savePlot)
fileMenu.addSeparator()
quit = QAction(QIcon().fromTheme('application-exit')," &Exit", self)
fileMenu.addAction(quit)
fileMenu.triggered[QAction].connect(self.closeArray)
def _hardExist(self):
self.deleteLater()
def _activeAmp(self):
if self.sbcb.currentText() == 'vespetrum':
self.ampmin.setEnabled(True)
self.ampmax.setEnabled(True)
self.vepcb.setEnabled(True)
if self.vepcb.currentText() == 'beam':
self.ampmax.setMaximum(100000)
# self.ampmax.setValue(1000)
self.ampmax.setSingleStep(500)
# self.ampmin.setValue(10)
self.ampmin.setMaximum(100000)
self.ampmin.setSingleStep(500)
elif self.vepcb.currentText() == 'sqrt':
self.ampmax.setMaximum(300)
# self.ampmax.setValue(30)
self.ampmax.setSingleStep(5)
# self.ampmin.setValue(3)
self.ampmin.setMaximum(300)
self.ampmin.setSingleStep(5)
elif self.vepcb.currentText() == 'log':
self.ampmax.setMaximum(12)
# # self.ampmax.setValue(7)
self.ampmax.setSingleStep(1)
# # self.ampmin.setValue(2)
self.ampmin.setMaximum(12)
self.ampmin.setSingleStep(1)
elif self.vepcb.currentText() == 'log10':
self.ampmax.setSingleStep(0.5)
self.ampmin.setSingleStep(0.5)
self.ampmax.setMaximum(5)
self.ampmin.setMaximum(5)
else:
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
self.vepcb.setEnabled(False)
def _createStatusBar(self):
"""
Creates the status bar
"""
sb =QStatusBar()
sb.setFixedHeight(18)
self.setStatusBar(sb)
self.statusBar().showMessage('Ready')
def _selectMethod(self, index):
self._method = self.codacb.currentText()
self.sbcb.setCurrentIndex(3)
self._updatePlot()
def _changeStrip(self,index):
if index == len(self.trinWin):
return self._newTrim()
else:
return self._appStrip()
def _newTrim(self):
"""
Creat new strip window
"""
newWin = self.defWindow(self)
if newWin.exec_():
self.trinWin.append(newWin.getValues())
self._updateWindow()
self.wincb.setCurrentIndex(len(self.trinWin)-1)
self._appStrip()
def _editTimeWindow(self):
"""
Edit existing coda selection window
"""
_i = self.wincb.currentIndex()
this_window = self.trinWin[_i]
editWindow = self.defWindow(self, this_window)
if editWindow.exec_():
self.trinWin[_i] = editWindow.getValues()
self._updateWindow()
self.wincb.setCurrentIndex(_i)
self._appStrip()
def _deleteWin(self):
"""
Delete window
"""
pass
_i = self.wincb.currentIndex()
def _updateWindow(self):
self.wincb.clear()
self.wincb.setCurrentIndex(-1)
for _i, _f in enumerate(self.trinWin):
self.wincb.addItem('Noise %.2f sec - Coda %.2f sec' %(_f['noise'], _f['coda']))
self.wincb.addItem('Create new Window')
def _appStrip(self, button=True, draw=True):
"""
Apply coda strip
"""
_method = self.codacb.currentText()
_j = self.wincb.currentIndex()
self._eventInfo(self._current_id)
self._current_strip = True
spts = int(self.trinWin[_j]['smooth'] / self._current_delta )
codaStrip(self._current_event, method=_method, window=spts,
siglen=self.trinWin[_j]['coda'], noise=self.trinWin[_j]['noise'],beamphase=self.beamphase,
model=self.trinWin[_j]['model'], stime=self.trinWin[_j]['stime'], etime=self.trinWin[_j]['etime'],)
self._btype = 'strip'
self.sbcb.setCurrentIndex(3)
self._setCodaStrip()
self._updatePlot()
def _pltEvent(self):
"""
Plot event from DropDown Menu
"""
_i = self.evecb.currentIndex()
while next(self._eventCycle) != self._eqlist[_i]:
pass
self._eventInfo(self._eqlist[_i])
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip=True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
self._drawFig()
def _pltPrevEvent(self):
"""
Plot previous events
"""
_j = self.evecb.currentIndex()
for _i in range(len(self._eqlist) - 1):
prevEvent = next(self._eventCycle)
self._eventInfo(prevEvent)
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
if _j == 0:
_n = len(self.evecb) - 1
self.evecb.setCurrentIndex(_n)
else:
self.evecb.setCurrentIndex(_j-1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _pltNextEvent(self):
_id = self._current_event.ID
level = self.eve_type[self.levelGrp.checkedId()]
if level == 'D':
self._current_strip = True
self._setCodaStrip()
else:
# if len(self._stripDF) != 0:
# existDF = self._stripDF[(self._stripDF.ID == _id)]
# else:
# existDF = pd.DataFrame()
# if len(existDF) == 0:
if not self._current_strip:
choice = QMessageBox.question(self, 'Stripping?',
"Haven't stripping yet, want to do it?",
QMessageBox.Yes | QMessageBox.No)
if choice is QMessageBox.Yes:
self._current_strip = True
self._appStrip()
return
self._eventInfo(next(self._eventCycle))
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
_i = self.evecb.currentIndex()
if _i == len(self.evecb) - 1:
self.evecb.setCurrentIndex(0)
else:
self.evecb.setCurrentIndex(_i+1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _eventInfo(self, eqid):
"""
Copies the array process result from the current Earthquake object
"""
for eve in self._events:
if eve.ID == eqid:
event = eve
self._current_event = event
self.beamphase = event.beamphase
self._current_id = eqid
if not hasattr(event, 'beam'):
return
self._current_beam = event.beam
filts = {}
for tr in self._current_beam:
filts[tr.stats.station] = tr.stats.channel
self._current_filts = filts
self._current_ID = event.ID
self._current_dis = event.dis
self._current_p = event.rayp
self._current_bb = event.bb
self._current_bakAz = event.baz
self._current_delta = event.delta
if hasattr(event, 'slideSt'):
self._current_slide = event.slideSt
if hasattr(event, 'energy'):
self._current_energy = event.energy
self._current_time = event.slantTime
self._current_K = event.slantK
self._current_type = event.slantType
def _setCodaStrip(self):
if not self._current_strip:
return
event = self._current_event
_i = self.wincb.currentIndex()
win = self.trinWin[_i]
if len(self._stripDF) != 0:
existDF = self._stripDF[(self._stripDF.ID == self._current_event.ID) & (self._stripDF.winName == win['name'])]
else:
existDF = pd.DataFrame()
if len(self._badDF) !=0:
_badDF = self._badDF[self._badDF.ID == self._current_event.ID]
else:
_badDF = pd.DataFrame()
if len(existDF) !=0:
choice = QMessageBox.question(self, 'Replace stripping',
"Do you want to replace existed stripping?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = existDF.index
self._stripDF.drop(index,axis=0,inplace=True)
self._stripDF.reset_index(inplace=True, drop=True)
else:
return
if len(_badDF) != 0:
choice = QMessageBox.question(self, 'Bad Event',
"Want to replace it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = _badDF.index
self._badDF.drop(index,axis=0,inplace=True)
self._badDF.reset_index(inplace=True, drop=True)
else:
return
level = self.eve_type[self.levelGrp.checkedId()]
ID = event.ID
lat = event.lat
lon = event.lon
dep = event.dep
mw = event.mw
dis = event.dis
bb = event.bb
bakAzi = event.baz
delta = event.delta
if level =='D':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,'Level':'D'}
msg = ('%s is Bad Event'%self._current_ID)
gpar.log(__name__, msg, level='info', pri=True)
self._badDF = self._badDF.append(newRow, ignore_index=True)
else:
if self._method == 'all':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,
'winName':win['name'], 'win':win,
'Level':level, 'delta': delta,
'codaResSt':event.codaResSt,
'codaSt':event.codaSt,
'crms':event.codaMod,
'twoResSt':event.twoResSt,
'twoSt':event.twoSt,
'trms':event.twoMod}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
elif self._method == 'coda':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'winName':win['name'], 'win':win,
'BB':bb,'bakAzi':bakAzi,
'Level':level, 'delta': delta,
'codaResSt':event.codaResSt,
'codaSt':event.codaSt,
'crms':event.codaMod,
}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
elif self._method == 'twoline':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,
'Level':level, 'delta': delta,
'winName':win['name'], 'win':win,
'twoResSt':event.twoResSt,
'twoSt':event.twoSt,
'trms':event.twoMod}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
def _drawFig(self):
self.fig.clear()
a = u"\u00b0"
if self._btype == 'beam':
num_plots = len(self._current_beam)
for _i, tr in enumerate(self._current_beam):
ax = self.fig.add_subplot(num_plots, 1, _i+1)
if hasattr(tr.stats, 'channel'):
label = tr.stats.channel
else:
label=None
time = np.arange(tr.stats.npts) * tr.stats.delta + tr.stats.sac.b
ax.plot(time, tr.data, 'k', label=label)
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
arrival = self._current_event.arrivals[self.beamphase]['TT']# - self._current_event.time
ax.vlines(arrival, ax.get_ylim()[0],ax.get_ylim()[1],'r', label=self.beamphase)
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for name, tt in _arr.items():
if name is self.beamphase:
continue
ax.vlines(tt['TT'], ax.get_ylim()[0],ax.get_ylim()[1],'b',label=name)
ax.legend()
if _i == 0:
ax.set_xlabel('Seconds')
self.fig.suptitle('%s - %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._btype, self._current_event.dep, self._current_event.dis, a))
elif self._btype == 'slide':
self.fig.suptitle('%s - %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._btype, self._current_event.dep, self._current_event.dis, a))
nfilts = len(self._current_slide.keys())
ax = self.fig.subplots(4, nfilts, sharex='col', sharey='row')
ax = ax.reshape(4,nfilts)
for ind, (name,st) in enumerate(self._current_slide.items()):
for _i, tr in enumerate(st):
if hasattr(tr.stats, 'channel'):
label = tr.stats.channel
else:
label=None
time = np.arange(tr.stats.npts) * tr.stats.delta + tr.stats.sac.b
ax[_i,ind].plot(time, tr.data, 'k', label=None)
ax[_i, ind].set_xlim([np.min(time), np.max(time)])
if label == 'Amplitude':
peak = np.max(tr.data) + 1
ax[_i,ind].set_ylim([-1, peak])
elif label == 'Slowness':
ax[_i,ind].set_ylim([0, 15])
rp = self._current_event.rayp
ax[_i, ind].hlines(rp, np.min(time), np.max(time), 'r', 'dashed')
elif label == 'Back Azimuth':
ax[_i, ind].set_ylim([0,360])
elif label == 'coherence':
ax[_i, ind].set_ylim([0,1])
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
arrival = self._current_event.arrivals[self.beamphase]['TT']# - self._current_event.time
ax[_i,ind].vlines(arrival, ax[_i,ind].get_ylim()[0],ax[_i,ind].get_ylim()[1],'r',label=self.beamphase)
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for pname, tt in _arr.items():
if pname is self.beamphase:
continue
ax[_i,ind].vlines(tt['TT'], ax[_i,ind].get_ylim()[0],ax[_i,ind].get_ylim()[1],'b',label=pname)
ax[_i,ind].legend()
# ax[_i,ind].set_aspect(aspect=0.3)
if _i == 3:
ax[_i,ind].set_xlabel('Seconds')
if _i == 0:
ax[_i,ind].set_title(name)
if ind == 0:
ax[_i,ind].set_ylabel(label)
elif self._btype == 'vespetrum':
num = len(self._current_energy)
extent=[np.min(self._current_time),np.max(self._current_time),np.min(self._current_K),np.max(self._current_K)]
vmin = float(self.ampmin.cleanText())
vmax = float(self.ampmax.cleanText())
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
for ind, _row in self._current_energy.iterrows():
# abspow = _row.POWER
name = _row.FILT
if self.vepcb.currentText() == 'log10':
abspow = np.log10(np.abs(_row.POWER))
elif self.vepcb.currentText() == 'log':
abspow = np.log(np.abs(_row.POWER))
elif self.vepcb.currentText() == 'sqrt':
abspow = np.sqrt(np.abs(_row.POWER))
else:
abspow = np.abs(_row.POWER)
ax = self.fig.add_subplot(1, num, ind+1)
ax.imshow(abspow, extent=extent, aspect='auto', cmap='Reds', vmin=vmin, vmax=vmax, origin='lower')
arrival = self._current_event.arrivals[self.beamphase]['TT']
ax.vlines(arrival, ax.get_ylim()[0],ax.get_ylim()[1],'k',label=self.beamphase)
rp = self._current_event.rayp
ax.hlines(rp, ax.get_xlim()[0],ax.get_xlim()[1], 'b')
ax.hlines(-rp, ax.get_xlim()[0],ax.get_xlim()[1], 'b')
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for name, tt in _arr.items():
if name is self.beamphase:
continue
ax.vlines(tt['TT'], ax.get_ylim()[0],ax.get_ylim()[1],'b',label=name)
ax.legend()
ax.set_xlabel('Seconds')
if ind == 0:
ax.set_ylabel(self._current_type)
ax.set_title(name)
if self._current_type == 'slowness':
title = '%s - %s\nSlant Stack at a Backazimuth of %.1f %sN\nDep:%s Distance: %s%s' \
%(self._btype, self._current_ID, self._current_event.baz,a,
self._current_event.dep, self._current_event.dis, a)
elif self._current_type == 'theta':
title = '%s - %s\nSlant Stack at a slowness of %.2f s/deg\nDep:%s Distance: %s%s' \
%(self._btype, self._current_ID, self._current_event.rayp,
self._current_event.dep, self._current_event.dis, a)
self.fig.suptitle(title)
elif self._btype == 'strip':
_i = self.wincb.currentIndex()
win = self.trinWin[_i]
if len(self._stripDF) != 0:
existDF = self._stripDF[(self._stripDF.ID == self._current_event.ID) & (self._stripDF.winName == win['name'])]
else:
existDF = pd.DataFrame()
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == self._current_event.ID]
else:
_badDF = pd.DataFrame()
if len(existDF) == 0 and len(_badDF) == 0:
choice = QMessageBox.question(self, 'Stripping?',
"Haven't stripping yet, want to do it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
self._appStrip()
else:
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._updatePlot()
elif len(_badDF) != 0:
choice = QMessageBox.question(self, 'Bad event!',
"Want to reevalua it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = _badDF.index
self._badDF.drop(index,axis=0,inplace=True)
self._badDF.reset_index(inplace=True, drop=True)
self.sbcb.setCurrentIndex(0)
self._updatePlot()
else:
self.sbcb.setCurrentIndex(0)
self._updatePlot()
elif len(existDF) != 0:
trinwin = existDF.win.iloc[0]
stime = trinwin['stime']
etime = trinwin['etime']
delta = self._current_beam[0].stats.delta
# npts = int((etime - stime)/delta) + 1
npts = int((etime - stime)/delta)
# time = np.linspace(stime, etime, npts)
time = stime + np.arange(npts) * delta
sind = int(stime / delta)
# eind = int(etime / delta)
if self._method == 'all':
codamode = existDF.crms.iloc[0]
twomode = existDF.trms.iloc[0]
nfilter = len(codamode)
codaSt = existDF.codaSt.iloc[0]
twoSt = existDF.twoSt.iloc[0]
cRes = existDF.codaResSt.iloc[0]
tRes = existDF.twoResSt.iloc[0]
timeR = np.arange(cRes[0].stats.npts)*cRes[0].stats.delta - trinwin['noise']
data_time = np.arange(twoSt[0].stats.npts) * delta + (twoSt[0].stats.starttime - self._current_beam[0].stats.starttime)
ax = self.fig.subplots(2, nfilter)
if nfilter == 1:
ax = ax.reshape(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_coda = codaSt[ind].data
time_coda = stime + np.arange(len(data_coda)) * delta
ax[0,ind].plot(time_coda,np.log10(data_coda),'r', label='coda')
data_two = twoSt[ind].data
ax[0,ind].plot(data_time, data_two,'b', label='twoline')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_c = "Coda: Mean RMS = %s"%(codamode['RMS'].iloc[ind])
label_t = "Twoline: Mean RMS = %s"%(twomode['RMS'].iloc[ind])
ax[1,ind].plot(timeR,cRes[ind].data, 'r', label=label_c)
ax[1,ind].plot(timeR, tRes[ind].data, 'b',label=label_t)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%twomode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
elif self._method == 'coda':
codamode = existDF.crms.iloc[0]
nfilter = len(codamode)
codaSt = existDF.codaSt.iloc[0]
cRes = existDF.codaResSt.iloc[0]
timeR = np.arange(cRes[0].stats.npts)*cRes[0].stats.delta - trinwin['noise']
ax = self.fig.subplots(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_coda = codaSt[ind].data
time_coda = stime + np.arange(len(data_coda)) * delta
ax[0,ind].plot(time_coda,np.log10(data_coda),'r', label='coda')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_c = "Coda: Mean RMS = %s"%(codamode['RMS'].iloc[ind])
ax[1,ind].plot(timeR,cRes[ind].data, 'r', label=label_c)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%codamode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
elif self._method == 'twoline':
twomode = existDF.trms.iloc[0]
nfilter = len(twomode)
twoSt = existDF.twoSt.iloc[0]
tRes = existDF.twoResSt.iloc[0]
timeR = np.arange(tRes[0].stats.npts)*tRes[0].stats.delta - trinwin['noise']
data_time = np.arange(twoSt[0].stats.npts) * delta + (twoSt[0].stats.starttime - self._current_beam[0].stats.starttime)
ax = self.fig.subplots(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_two = twoSt[ind].data
ax[0,ind].plot(data_time, data_two,'b', label='twoline')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_t = "Twoline: Mean RMS = %s"%(twomode['RMS'].iloc[ind])
ax[1,ind].plot(timeR, tRes[ind].data, 'b',label=label_t)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%twomode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
self.fig.suptitle('Coda Strip for %s using %s method in win %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._method, trinwin['name'],
self._current_event.dep, self._current_event.dis, a))
self._canvasDraw()
#def _plotTT(self):
# if self.ttbtn.isChecked() is False:
def _updatePlot(self):
self._activeAmp()
self._btype = self.sbcb.currentText()
self._drawFig()
def _canvasDraw(self):
"""
Redraws the canvas and re-set mouse focus
"""
# if isinstance(st, obspy.core.stream.Stream):
# delta = st[0].stats.delta
# elif isinstance(st, obspy.core.trace.Trace):
# delta = st.stats.delta
for _i, _ax in enumerate(self.fig.get_axes()):
_ax.set_xticklabels(_ax.get_xticks())
self.fig.canvas.draw()
self.canvas.setFocus()
def _pltOnScroll(self, event):
"""
Scrolls/Redraws the plot along x axis
"""
if event.inaxes is None:
return
if event.key == 'control':
axes = [event.inaxes]
else:
axes = self.fig.get_axes()
for _ax in axes:
left = _ax.get_xlim()[0]
right = _ax.get_xlim()[1]
extent_x = right - left
dxzoom = .2 * extent_x
aspect_left = (event.xdata - _ax.get_xlim()[0]) / extent_x
aspect_right = (_ax.get_xlim()[1] - event.xdata) / extent_x
up = _ax.get_ylim()[1]
down = _ax.get_ylim()[0]
extent_y = up - down
dyzoom = 0.5 * extent_y
aspect_down = (0 - _ax.get_ylim()[0]) / extent_y
aspect_up = _ax.get_ylim()[1] / extent_y
if event.button == 'up':
left += dxzoom * aspect_left
right -= dxzoom * aspect_right
down += dyzoom * aspect_down
up -= dyzoom * aspect_up
elif event.button == 'down':
left -= dxzoom * aspect_left
right += dxzoom * aspect_right
down -= dyzoom * aspect_down
up += dyzoom * aspect_up
else:
return
_ax.set_xlim([left, right])
_ax.set_ylim([down, up])
self._canvasDraw()
def _pltOnDrag(self, event):
"""
Drags/redraws the plot upon drag
"""
if event.inaxes is None:
return
if event.key == 'control':
axes = [event.inaxes]
else:
axes = self.fig.get_axes()
if event.button == 1:
if self._plt_drag is None:
self._plt_drag = event.xdata
return
for _ax in axes:
_ax.set_xlim([_ax.get_xlim()[0] +
(self._plt_drag - event.xdata), _ax.get_xlim()[1] + (self._plt_drag - event.xdata)])
else:
return
self._canvasDraw()
def _pltOnButtonRelease(self, event):
"""
On Button Release Reset drag variable
"""
self._plt_drag = None
# def _pltOnButtonPress(self, event):
# """
# This function is using for zoom in relative phase region
# """
def _saveFile(self):
if self.savefile is None:
return self._saveFileFormat()
savefile = str(self.savefile)
if os.path.splitext(savefile)[1].lower() == '.pkl':
self._savePickle(savefile)
elif os.path.splitext(savefile)[1].lower() == '.csv':
self._saveCSV(savefile)
def _saveFileFormat(self):
files_types = "Pickle (*.pkl);; CSV (*.csv)"
self.savefile,_ = QFileDialog.getSaveFileName(self,
'Save as', os.getcwd(), files_types)
self.savefile = str(self.savefile)
if os.path.splitext(self.savefile)[1].lower() == '.pkl':
self._savePickle(self.savefile)
elif os.path.splitext(self.savefile)[1].lower() == '.csv':
self._saveCSV(self.savefile)
def _savePickle(self, filename):
self._stripDF.to_pickle(filename)
name = os.path.splitext(filename)
badname = name[0]+'.D'+name[1]
if len(self._badDF) != 0:
self._badDF.to_pickle(badname)
def _saveCSV(self, filename):
_stripDF = self._stripDF
_stripDF.drop(['codaSt','twoSt','twoResSt','codaResSt'])
_stripDF.to_csv(filename,index=False,sep=',')
if len(self._badDF) != 0:
_badDF = self._badDF
name = os.path.splitext(filename)
badname = name[0] +'.D' +name[1]
_badDF.to_csv(badname, index=False, sep=',')
def _openFile(self):
filename,_ = QFileDialog.getOpenFileName(self,'Load Pickle File',
os.getcwd(), 'Pickle Format (*.pkl)', '20')
if filename:
filename = str(filename)
self._stripDF = pd.read_pickle(filename)
name = os.path.splitext(filename)
badname = name[0]+'.D'+name[1]
if os.path.exists(badname):
self._badDF = pd.read_pickle(badname)
self._pltEvent()
self.savefile = str(filename)
def _openArray(self):
filename,_ = QFileDialog.getOpenFileName(self, 'Load array',
os.getcwd(), 'Pickle Format (*.pkl)', '20')
if filename:
filename = str(filename)
ar = util.loadArray(filename)
self._refreshArray(ar)
def _refreshArray(self, ar):
self.array = ar
self._plt_drag = None
# init events in the array
self._events = ar.events #defines list self._events
self.savefile = ar.name+'.strip.pkl'
self._initEqList()
self._stripDF = | pd.DataFrame() | pandas.DataFrame |
from Bio import SeqIO
import pandas as pd
import numpy as np
import math
import re
import os
def invert_new(pdDataFrame, No_genes, pd_column_index, gene_description):
pdDataFrame_dict = pdDataFrame.to_dict()
new_pdDataFrame_dict = {}
for i in No_genes.index:
new_pdDataFrame_dict[i] = list(pdDataFrame_dict[i].values())[:No_genes[i]]
complement = re.compile(r'strand_(.*)?\|\|')
pdDataFrame_dict_inverted = {}
for i in pd_column_index.columns:
for j in pd_column_index.index:
if 1 <= pd_column_index.at[j,i] < 2:
if complement.search(gene_description.at[j,i]).group(1) == '-1':
pdDataFrame_dict_inverted[i] = list(new_pdDataFrame_dict[i])[::-1]
else:
pdDataFrame_dict_inverted[i] = list(new_pdDataFrame_dict[i])
else:
continue
return pd.DataFrame.from_dict(pdDataFrame_dict_inverted, orient='index').T
def make_target_centered(list_AMR, temp):
AMR = list_AMR[0].copy()
for i in range(len(list_AMR))[::-1]:
AMR.update(list_AMR[i][list_AMR[i].astype(int) == list_AMR[i].astype(int).max().max()])
AMR.replace({0.01:np.nan}, inplace=True)
No_genes = (~(AMR.isnull())).sum()
gene_dict = {}
description = {}
gene_name = re.compile(r'\|\|\s(.*)?')
for i in No_genes.index:
each_gene = []
each_description = []
for j in range(No_genes[i]):
record = SeqIO.read(temp + '/each_fasta/' + i +'/' + str(j) + '.fasta','fasta')
each_description.append(record.description)
each_gene.append(gene_name.search(record.description).group(1))
gene_dict[i] = each_gene
description[i] = each_description
gene = pd.DataFrame.from_dict(gene_dict, orient='index').T
gene_description = pd.DataFrame.from_dict(description, orient='index').T
AMR_inverted = invert_new(AMR, No_genes, AMR, gene_description)
target_len = pd.DataFrame(columns=['complete sequence','position','len','seq'])
AMR_inverted_int = AMR_inverted.fillna(0).astype(int)
for i in AMR_inverted_int.T.index:
position = []
length = []
sequence = []
for j in AMR_inverted_int[i][AMR_inverted_int[i]==1].index:
record = SeqIO.read(temp + '/each_fasta/' + i +'/' + str(j) + '.fasta','fasta')
position.append(j)
length.append(len(record.seq))
sequence.append(str(record.seq))
target_len.loc[i,'position'] = position
target_len.loc[i,'len'] = length
target_len.loc[i,'seq'] = sequence
for i in No_genes.index:
record = SeqIO.read(temp + '/fasta/'+ i + '.fasta','fasta')
if re.compile(r'complete sequence').search(record.description) or re.compile(r'complete genome').search(record.description):
if not re.compile(r'cds').search(record.description):
target_len.loc[i,'complete sequence'] = 'yes'
# targetを先頭にする
first = {}
for i in AMR_inverted.columns:
position = target_len.loc[i,'position'][0]
upper = list(AMR_inverted.T.loc[i,position:].dropna())
lower = list(AMR_inverted.T.loc[i,:position-1].dropna())
first[i] = upper + lower
target_first = pd.DataFrame.from_dict(first,orient='index')
gene_inverted = invert_new(gene, No_genes, AMR, gene_description)
# geneの種類についてtargetを先頭にする
first_gene = {}
for i in AMR_inverted.columns:
position = target_len.loc[i,'position'][0]
upper = list(gene_inverted.T.loc[i,position:].dropna())
lower = list(gene_inverted.T.loc[i,:position-1].dropna())
first_gene[i] = upper + lower
gene_target_first = pd.DataFrame.from_dict(first_gene,orient='index')
# 最大のレーン決める
mid = pd.DataFrame(columns=['mid'])
for i in target_first.index:
mid.loc[i,'mid'] = math.ceil((len(target_first.loc[i,:]) - target_first.loc[i,:].isnull().sum())/2)
maximum = mid.max()
# とりあえずplasmidを半分にして、targetをだいたい中央に持ってくる
center = {}
for i in target_first.index:
Mid = int(mid.loc[i,'mid'])
center[i] = [None] * (int(maximum[0])-Mid) + list(target_first.loc[i,Mid+1:].dropna()) + list(target_first.loc[i,:Mid])
modified_AMR = pd.DataFrame.from_dict(center, orient='index').T
# とりあえずplasmidを半分にして、targetをだいたい中央に持ってくる
gene_center = {}
for i in gene_target_first.index:
Mid = int(mid.loc[i,'mid'])
gene_center[i] = [None] * (int(maximum[0])-Mid) + list(gene_target_first.loc[i,Mid+1:].dropna()) + list(gene_target_first.loc[i,:Mid])
modified_gene = | pd.DataFrame.from_dict(gene_center, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 9 16:22:47 2020
@author: mrizwan
"""
import pandas as pd
# Initialize dataframes
df1 = pd.read_csv('one.csv')
print(df1)
'''
year name id education
0 2010 andy 101 bachelor
1 2012 peter 102 master
2 2009 mark 103 school
'''
df2 = pd.read_csv('two.csv')
print(df2)
'''
id name country age city status
0 101 andy usa 31 nyc single
1 102 peter uk 29 london single
2 103 mark aus 33 sydney married
3 104 riz ind 34 blore married
'''
# Merge two dfs based on it key id
df = pd.merge(df1,df2, on=['id', 'name'])
print(df)
'''
year name id education country age city status
0 2010 andy 101 bachelor usa 31 nyc single
1 2012 peter 102 master uk 29 london single
2 2009 mark 103 school aus 33 sydney married
'''
# Merge based on inner method
# Merges common element of data frame
df = | pd.merge(df1,df2, how='inner', on='id') | pandas.merge |
#!/usr/bin/python3
"""run_filter.py
Run a flight data set through a filter and output a few simple plots
Author: <NAME>, University of Minnesota
"""
import argparse
import math
from matplotlib import pyplot as plt
import matplotlib.transforms
#import mpld3
import numpy as np
import os
import pandas as pd
from tqdm import tqdm
from aurauas_flightdata import flight_loader, flight_interp
parser = argparse.ArgumentParser(description='nav filter')
parser.add_argument('--flight', required=True, help='flight data log')
parser.add_argument('--wind-time', type=float, help='force a wind re-estimate with this time factor.')
args = parser.parse_args()
r2d = 180.0 / math.pi
d2r = math.pi / 180.0
m2nm = 0.0005399568034557235 # meters to nautical miles
mps2kt = 1.94384 # m/s to kts
ft2m = 0.3048
m2ft = 1.0 / ft2m
path = args.flight
data, flight_format = flight_loader.load(path)
print("imu records:", len(data['imu']))
imu_dt = (data['imu'][-1]['time'] - data['imu'][0]['time']) \
/ float(len(data['imu']))
print("imu dt: %.3f" % imu_dt)
print("gps records:", len(data['gps']))
if 'air' in data:
print("airdata records:", len(data['air']))
if len(data['imu']) == 0 and len(data['gps']) == 0:
print("not enough data loaded to continue.")
quit()
# make data frames for easier plotting
df0_imu = pd.DataFrame(data['imu'])
df0_imu.set_index('time', inplace=True, drop=False)
df0_gps = | pd.DataFrame(data['gps']) | pandas.DataFrame |
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from celery_progress.backend import ProgressRecorder
from datetime import datetime
import pytz
from django.utils import timezone as django_timezone
import os
import glob
import pickle
import numpy as np
import pandas as pd
import logging
import json
from scipy import stats
from pygest import algorithms
from pygest.convenience import bids_val, extract_seed, build_descriptor
from pygest.erminej import run_gene_ontology
import pygest as ge
from .models import PushResult, ResultSummary, GroupedResultSummary
from .plots import plot_all_train_vs_test, plot_performance_over_thresholds, plot_overlap
from .plots import plot_fig_2, plot_fig_3, plot_fig_4
from .plots import describe_overlap, describe_mantel
from .genes import describe_genes, describe_ontologies
from .decorators import print_duration
class NullHandler(logging.Handler):
def emit(self, record):
pass
null_handler = NullHandler()
def tz_aware_file_mtime(path):
""" Return New_York time, with timezone, from file's last modified timestamp. """
return pytz.timezone("America/New_York").localize(
datetime.fromtimestamp(os.path.getmtime(path))
)
def glob_str_from_keys(
pygest_data="/data",
sub="*", hem="*", samp="*", prob="*", parby="*", splby="*", batch="train", split_pad="00*",
tgt="*", algo="*", shuf="*", comp="*", mask="*", norm="*", adj="*"
):
""" Use BIDS keys and values to generate a globbable string
"""
return str(os.path.join(
pygest_data,
"derivatives",
"sub-{}_hem-{}_samp-{}_prob-{}".format(sub, hem, samp, prob),
"parby-{}_splby-{}_batch-{}{}".format(parby, splby, batch, split_pad),
"tgt-{}_algo-{}_shuf-{}".format(tgt, algo, shuf),
"sub-{}_comp-{}_mask-{}_norm-{}_adj-{}*.tsv".format(sub, comp, mask, norm, adj),
))
def interpret_descriptor(descriptor):
""" Parse the plot descriptor into parts """
rdict = {
'descriptor': descriptor,
'comp': comp_from_signature(descriptor[:4]),
'parby': "glasser" if descriptor[3].lower() == "g" else "wellid",
'splby': "glasser" if descriptor[4].lower() == "g" else "wellid",
'mask': descriptor[5:7],
'algo': {"o": "once", "s": "smrt", "l": "leon", "_": "unkn", "-": "unkn"}[descriptor[7]],
'norm': 'srs' if ((len(descriptor) > 8) and (descriptor[8] == "s")) else "none",
'xval': descriptor[9] if len(descriptor) > 9 else '0',
'phase': 'train',
'split_pad': '*',
'opposite_phase': 'test',
}
try:
rdict['threshold'] = int(descriptor[8:])
except ValueError:
# This catches "peak" or "", both should be fine as None
rdict['threshold'] = None
# The xval, or cross-validation sample split range indicator,
# is '2' for splits in the 200's and '4' for splits in the 400s.
# Splits in the 200's are split-halves; 400's are split-quarters.
split_min = 0
split_max = 999
if rdict['xval'] == '1':
split_min = 100
split_max = 199
rdict["split_pad"] = "001*"
if rdict['xval'] == '2':
split_min = 200
split_max = 299
rdict["split_pad"] = "002*"
elif rdict['xval'] == '4':
split_min = 400
split_max = 499
rdict["split_pad"] = "004*"
elif rdict['xval'] in ['_', '-', '0', ]:
split_max = 0
rdict["phase"] = "whole"
rdict["split_pad"] = ""
rdict['query_set'] = PushResult.objects.filter(
samp="glasser", prob="fornito", split__gte=split_min, split__lte=split_max,
algo=rdict['algo'], comp=rdict['comp'], parby=rdict['parby'], splby=rdict['splby'],
mask=rdict['mask'], norm=rdict['norm'],
batch__startswith=rdict['phase']
)
rdict['n'] = (rdict['query_set']).count()
# Files are named with mask-none rather than mask-00, so tweak that key
rdict['glob_mask'] = "none" if rdict['mask'] == "00" else rdict['mask']
rdict['glob_pattern'] = os.path.join(
"derivatives", "sub-all_hem-A_samp-glasser_prob-fornito",
"parby-{parby}_splby-{splby}_batch-{phase}{split_pad}",
"tgt-max_algo-{algo}_shuf-*",
"sub-all_comp-{comp}_mask-{glob_mask}_norm-{norm}_adj-none*.tsv"
).format(**rdict)
rdict['glob_dict'] = {
"sub": "all", "hem": "A", "samp": "glasser", "prob": "fornito",
"parby": rdict["parby"], "splby": rdict["splby"],
"batch": rdict["phase"] + rdict["split_pad"],
"tgt": "max", "algo": rdict["algo"], "shuf": "*",
"comp": rdict["comp"], "mask": rdict["glob_mask"], "norm": rdict["norm"], "adj": "none",
}
return rdict
def gather_results(pattern=None, glob_dict=None, data_root="/data", pr=None):
""" Quickly find available files, and queue any heavy processing elsewhere.
:param pattern: glob pattern to restrict files searched for
:param glob_dict: glob dict to restrict files searched for
:param data_root: default /data, base path to all of the results
:param pr: progress_recorder to report intermediate status.
"""
if pr:
pr.set_progress(0, 100, "Looking for files")
# Get a list of files to parse.
if pattern is None and glob_dict is None:
globbable = str(os.path.join(data_root, "derivatives", "*", "*", "*", "*.tsv"))
elif isinstance(glob_dict, dict):
globbable = glob_str_from_keys(pygest_data=data_root, **glob_dict)
elif isinstance(pattern, str):
globbable = pattern
else:
print("File globbing can run via 'gather_results()' by setting 'pattern' to a path")
print("or 'glob_dict' to a BIDS dict. Something else was supplied.")
print(" 'pattern' is '{}' and glob_dict is '{}'".format(type(pattern), type(glob_dict)))
globbable = "NONSENSE_WITHOUT_A_MATCH"
print("Starting to glob files ('{}')".format(globbable))
before_glob = datetime.now()
files = glob.glob(globbable)
after_glob = datetime.now()
print("File glob complete; discovered {:,} results in {:,.1f} seconds".format(
len(files), (after_glob - before_glob).total_seconds()
))
if pr:
pr.set_progress(0, len(files), "Checking filesystem against database")
dupe_count = 0
new_count = 0
# import random
# for i, path in enumerate(random.sample(files, 500)):
for i, path in enumerate(files):
if PushResult.objects.filter(tsv_path=path).exists():
dupe_count += 1
else:
new_count += 1
r = PushResult()
r.fill_from_tsv_file(tsv_path=path, data_root=data_root)
r.save()
if pr:
pr.set_progress(i, len(files), "Summarizing new files")
print("Processed {:,} files; found {:,} new results and {:,} were already in the database (now {:,}).".format(
len(files), new_count, dupe_count, PushResult.objects.count(),
))
# print(PushResult.objects.values('descriptor').annotate(count=Count('descriptor')))
if PushResult.objects.count() > 0:
print("Saving a summary, dated {}.".format(str(django_timezone.now())))
print("There are {} PushResults in the database.".format(PushResult.objects.count()))
s = ResultSummary.current()
print("Summary: ", s.to_json())
s.save()
unique_groups = list(PushResult.objects.values("comp", "parby", "resample", "norm", "mask", "algo").distinct())
for ug in unique_groups:
results_in_group = PushResult.objects.filter(
comp=ug["comp"], parby=ug["parby"], resample=ug["resample"], mask=ug["mask"], algo=ug["algo"],
)
# TODO: Add StatusCounts model (for each GroupedResultSummary)
if ug.get("resample", "") == "split-half":
split = 299
elif ug.get("resample", "") == "split-quarter":
split = 499
elif ug.get("resample", "") == "whole":
split = 0
else:
split = 0
g = GroupedResultSummary(
summary_date=django_timezone.now(),
sourcedata=build_descriptor(
ug.get("comp", ""), ug.get("parby", ""), ug.get("mask", ""), # splby and parby interchangeable here
ug.get("norm", ""), split, algo=ug.get("algo", "smrt"), level="long",
),
descriptor=build_descriptor(
ug.get("comp", ""), ug.get("parby", ""), ug.get("mask", ""), # splby and parby interchangeable here
ug.get("norm", ""), split, algo=ug.get("algo", "smrt"), level="short",
),
comp=ug['comp'],
parby=ug['parby'],
mask=ug['mask'],
algo=ug["algo"],
resample=ug["resample"],
num_reals=results_in_group.filter(shuf="none").count(),
num_agnos=results_in_group.filter(shuf="agno").count(),
num_dists=results_in_group.filter(shuf="dist").count(),
num_be04s=results_in_group.filter(shuf="be04").count(),
summary=s,
)
g.save()
else:
print("No PushResults, not saving a summary.")
@shared_task(bind=True)
def gather_results_as_task(self, pattern=None, data_root="/data"):
""" Celery wrapper for gather_results() """
progress_recorder = ProgressRecorder(self)
gather_results(pattern=pattern, data_root=data_root, pr=progress_recorder)
@shared_task(bind=True)
def clear_all_jobs(self):
""" Delete all results from the database, not the filesystem, and create a new summary indicating no records.
:param self: available through "bind=True", allows a reference to the celery task this function becomes a part of.
"""
progress_recorder = ProgressRecorder(self)
progress_recorder.set_progress(0, 100, "Deleting file records (not files)")
PushResult.objects.all().delete()
progress_recorder.set_progress(50, 100, "Updating summaries")
s = ResultSummary.empty(timestamp=True)
print("Summary: ", s.to_json())
s.save()
progress_recorder.set_progress(100, 100, "Cleared")
def clear_some_jobs(descriptor=None):
""" Delete all results from the database, not the filesystem, and create a new summary indicating no records.
:param descriptor: id string to specify which group's cache data will be cleared.
"""
rdict = interpret_descriptor(descriptor)
rdict['query_set'].delete()
s = ResultSummary.current()
print("Summary: ", s.to_json())
s.save()
def comp_from_signature(signature, filename=False):
""" Convert signature to comp string
:param signature: The 7-character string indicating which group of results to use.
:param filename: Set filename=True to get the comparator filename rather than its BIDS string representation.
:return:
"""
comp_map = {
'hcpg': 'hcpniftismoothconnparbyglassersim',
'hcpw': 'hcpniftismoothconnsim',
'nkig': 'indiglasserconnsim',
'nkiw': 'indiconnsim',
'f__g': 'fearglassersim',
'f__w': 'fearconnsim',
'n__g': 'neutralglassersim',
'n__w': 'neutralconnsim',
'fn_g': 'fearneutralglassersim',
'fn_w': 'fearneutralconnsim',
'px_w': 'glasserwellidsproximity',
'px_g': 'glasserparcelsproximity',
'pxlw': 'glasserwellidslogproximity',
'pxlg': 'glasserparcelslogproximity',
'hcpniftismoothconnparbyglassersim': 'hcp_niftismooth_conn_parby-glasser_sim.df',
'hcpniftismoothconnsim': 'hcp_niftismooth_conn_sim.df',
'indiglasserconnsim': 'indi-glasser-conn_sim.df',
'indiconnsim': 'indi-connectivity_sim.df',
'fearglasserconnsim': 'fear_glasser_sim.df',
'fearconnsim': 'fear_conn_sim.df',
'neutralglassersim': 'neutral_glasser_sim.df',
'neutralconnsim': 'neutral_conn_sim.df',
'fearneutralglassersim': 'fear-neutral_glasser_sim.df',
'fearneutralconnsim': 'fear-neutral_conn_sim.df',
'glasserwellidsproximity': 'glasser-wellids-proximity',
'glasserparcelsproximity': 'glasser-parcels-proximity',
'glasserwellidslogproximity': 'glasser-wellids-log-proximity',
'glasserparcelslogproximity': 'glasser-parcels-log-proximity',
}
if filename:
return comp_map[comp_map[signature.lower()]]
return comp_map[signature.lower()]
def test_score(tsv_file, base_path='/data', own_expr=False, mask='none', probe_significance_threshold=None):
""" Use tsv_file to figure out its complement test expression dataframe.
Use the trained probe list to index the testing expression dataset.
and return the new Mantel correlation """
# Figure out where to get the test set's expression data
batch = 'none'
if (own_expr & ('batch-test' in tsv_file)) | ((not own_expr) & ('batch-train' in tsv_file)):
batch = bids_val("batch", tsv_file).replace('train', 'test')
elif (own_expr & ('batch-train' in tsv_file)) | ((not own_expr) & ('batch-test' in tsv_file)):
batch = bids_val("batch", tsv_file).replace('test', 'train')
else:
# For 'whole' optimizations, there is no test set.
return 0.0
# Figure out where to get data, based on which score we want.
expr_file = os.path.join(
base_path, "splits", "sub-all_hem-A_samp-glasser_prob-fornito", "batch-{}".format(batch),
"parcelby-{}_splitby-{}.{}.df".format(
bids_val('parby', tsv_file), bids_val('splby', tsv_file),
"raw" if bids_val('norm', tsv_file) == "none" else bids_val('norm', tsv_file)
)
)
# If comp_from_signature is given a comp BIDS string, it will return the filename of the comp file.
comp_file = os.path.join(base_path, "conn", comp_from_signature(bids_val('comp', tsv_file)))
# Get the actual data
if os.path.exists(tsv_file) & os.path.exists(expr_file) & os.path.exists(comp_file):
scoring_probes = algorithms.run_results(tsv_file, top=probe_significance_threshold)['top_probes']
with open(expr_file, 'rb') as f:
expr = pickle.load(f)
with open(comp_file, 'rb') as f:
comp = pickle.load(f)
else:
if not os.path.exists(tsv_file):
print("ERR: NOT A TSV FILE: {}".format(tsv_file))
if not os.path.exists(expr_file):
print("ERR: NOT A EXP FILE: {}".format(expr_file))
if not os.path.exists(comp_file):
print("ERR: NOT A CMP FILE: {}".format(comp_file))
return 0.0
""" Only correlate the top genes, as specified by probe_significance_threshold. """
# And filter them both down to relevant data
# This list MUST be in the same order as the comp columns due to 0's in upper triangle
# print("{}, {},\n\t{} vs\n\t{} using\n\t{} probes.".format(
# "self" if own_expr else "othr", scoring_phase, expr_file, comp_file, tsv_file
# ))
overlapping_samples = [col for col in comp.columns if col in expr.columns]
print(" tsv: {}".format(tsv_file))
print(" expr: {}".format(expr_file))
print(" comp: {}".format(comp_file))
print(" test score from top {:,} probes, and {:,} samples".format(len(scoring_probes), len(overlapping_samples)))
expr = expr.loc[scoring_probes, overlapping_samples]
comp = comp.loc[overlapping_samples, overlapping_samples]
expr_mat = np.corrcoef(expr, rowvar=False)
comp_mat = comp.values
expr_vec = expr_mat[np.tril_indices(n=expr_mat.shape[0], k=-1)]
comp_vec = comp_mat[np.tril_indices(n=comp_mat.shape[0], k=-1)]
if mask != "none":
data = ge.Data(base_path, external_logger=null_handler)
v_mask = algorithms.one_mask(expr, mask, bids_val('parby', tsv_file), data)
expr_vec = expr_vec[v_mask]
comp_vec = comp_vec[v_mask]
# return algorithms.correlate(expr, comp_mat)
return np.corrcoef(expr_vec, comp_vec)[0, 1]
def train_vs_test_overlap(tsv_file, probe_significance_threshold=None):
""" Determine the percentage of overlap between the provided tsv_file and
its complementary train/test tsv_file. """
if 'test' in tsv_file:
comp_file = tsv_file.replace('test', 'train')
elif 'train' in tsv_file:
comp_file = tsv_file.replace('train', 'test')
else:
print("Non train/test file: '{}'".format(tsv_file))
return 0.0
# Sometimes, a comparision will be requested between a train file and its nonexistent test counterpart.
# In that situation, it's best to just return 0.0 rather than add all the logic to determine test completion.
if os.path.isfile(tsv_file) and os.path.isfile(comp_file):
return algorithms.pct_similarity(
[tsv_file, comp_file], map_probes_to_genes_first=False, top=probe_significance_threshold
)
# raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), comp_file)
else:
return 0.00
def results_as_dict(tsv_file, base_path, probe_sig_threshold=None, use_cache=True):
""" Return a key-value description of a single result.
This is the workhorse of all of these plots. All calculations from these runs come from this function.
"""
# These calculations can be expensive when run a lot. Load a cached copy if possible.
analyzed_path = tsv_file.replace(".tsv", ".top-{}.v4.json".format(
"peak" if probe_sig_threshold is None else "{:04}".format(probe_sig_threshold)
))
if use_cache and os.path.isfile(analyzed_path):
saved_dict = json.load(open(analyzed_path, 'r'))
return saved_dict
mask = bids_val('mask', tsv_file)
norm = bids_val('norm', tsv_file)
shuf = bids_val('shuf', tsv_file)
seed = extract_seed(tsv_file, "_seed-")
result_dict = algorithms.run_results(tsv_file, probe_sig_threshold)
result_dict.update({
# Characteristics available in the path, if necessary
'path': tsv_file,
'phase': 'train' if 'batch-train' in tsv_file else 'test',
'algo': bids_val('algo', tsv_file),
'splby': 'glasser' if 'splby-glasser' in tsv_file else 'wellid',
'parby': 'glasser' if 'parby-glasser' in tsv_file else 'wellid',
'mask': mask,
'norm': norm,
'shuf': shuf,
# Neither path nor calculation, the threshold we use for calculations
'threshold': "peak" if probe_sig_threshold is None else "{:04}".format(probe_sig_threshold),
# Calculations on the data within the tsv file - think about what we want to maximize, beyond Mantel.
# These use data from this file, its original train and test expression data, and conn-sim data to calculate
# the following.
'train_score': test_score(
tsv_file, base_path, own_expr=True, mask='none', probe_significance_threshold=probe_sig_threshold),
'test_score': test_score(
tsv_file, base_path, own_expr=False, mask='none', probe_significance_threshold=probe_sig_threshold),
'masked_train_score': test_score(
tsv_file, base_path, own_expr=True, mask=mask, probe_significance_threshold=probe_sig_threshold),
'masked_test_score': test_score(
tsv_file, base_path, own_expr=False, mask=mask, probe_significance_threshold=probe_sig_threshold),
'train_vs_test_overlap': train_vs_test_overlap(tsv_file, probe_significance_threshold=probe_sig_threshold),
'split': extract_seed(tsv_file, "batch-train"),
'seed': seed,
'real_tsv_from_shuffle': tsv_file.replace("shuf-" + shuf, "shuf-none").replace("_seed-{:05d}".format(seed), ""),
})
# Cache results to prevent repeated recalculation of the same thing.
json.dump(result_dict, open(analyzed_path, 'w'))
return result_dict
def calc_ttests(row, df):
""" for a given dataframe row, if it's a real result, return its t-test t-value vs all shuffles in df. """
if row.shuf == 'none':
return stats.ttest_1samp(
df[(df['threshold'] == row.threshold)]['train_score'],
row.train_score,
)[0]
else:
return 0.0
def calc_real_v_shuffle_overlaps(row, df):
""" for a given dataframe row, if it's a real result, return its overlap pctage vs all shuffles in df. """
if row.shuf == 'none':
overlaps = []
for shuffled_tsv in df[(df['threshold'] == row.threshold)]['path']:
top_threshold = row.threshold
if top_threshold == "peak" or top_threshold == 0:
top_threshold = None
overlaps.append(
algorithms.pct_similarity([row.path, shuffled_tsv], map_probes_to_genes_first=True, top=top_threshold)
)
return np.mean(overlaps)
else:
return 0.0
def calc_total_overlap(row, df):
""" This doesn't really work, yet, as experimentation is being done for what it should mean. """
if row.shu == 'none':
overlaps = []
for shuffled_tsv in df[(df['threshold'] == row.threshold)]['path']:
overlaps.append(
algorithms.pct_similarity([row.path, shuffled_tsv], map_probes_to_genes_first=True, top=row.threshold)
)
return np.mean(overlaps)
else:
return 0.0
@print_duration
def update_is_necessary(descriptor, data_root="/data"):
""" Figure out if there are new files to include, necessitating recalculation and cache updates, or not.
:param descriptor: An abbreviation indicating which items we are calculating
:param str data_root: The PYGEST_DATA base path where all PyGEST data files can be found
"""
# Find all results in our database that match our descriptor
rdict = interpret_descriptor(descriptor)
# Find all files in the filesystem that match our descriptor
glob_pattern = os.path.join(data_root, rdict['glob_pattern'])
print("Starting to glob files ('{}')".format(glob_pattern))
files = glob.glob(glob_pattern)
print("For {}, found {} database entries and {} result files downstream of {}.".format(
descriptor, len(rdict['query_set']), len(files), data_root,
))
print("For descriptor {}, {:,} records in database -vs- {:,} files".format(
descriptor, len(rdict['query_set']), len(files)
))
return len(rdict['query_set']) < len(files), rdict
@print_duration
def calculate_individual_stats(
descriptor, progress_recorder, progress_from=0, progress_to=99, data_root="/data", use_cache=True
):
""" Determine the list of results necessary, and build or load a dataframe around them. """
do_update, rdict = update_is_necessary(descriptor)
print("Update necessary? - {}".format(do_update))
if do_update:
# Delete relevant database records, then rebuild them with newly discovered files.
print(" clearing jobs (in calculate_individual_stats, because db out of date)")
clear_some_jobs(descriptor)
print(" clearing macros (in calculate_individual_stats, because db out of date)")
clear_macro_cache(descriptor, data_root=data_root)
print(" gathering results (in calculate_individual_stats, because db out of date)")
gather_results(pattern=rdict['glob_pattern'], data_root=data_root)
rdict = interpret_descriptor(descriptor)
progress_recorder.set_progress(progress_from, progress_to, "Step 1/3<br />Finding results")
print("Found {:,} results in database ({} {} {} {} {} {} {} {}) @{}".format(
rdict['n'], "glasser", "fornito",
rdict['algo'], rdict['comp'], rdict['parby'], rdict['splby'], rdict['mask'], rdict['norm'],
'peak' if rdict['threshold'] is None else rdict['threshold'],
))
cache_file = os.path.join(data_root, "plots", "cache", "{}_summary_individual.df".format(rdict['descriptor']))
if use_cache and os.path.isfile(cache_file):
""" Load results from a cached file, if possible"""
print("Loading individual data from cache, {}".format(cache_file))
df = pickle.load(open(cache_file, "rb"))
else:
""" Calculate results for individual tsv files. """
relevant_results = []
for i, path in enumerate([p[0] for p in rdict['query_set'].values_list('tsv_path')]):
print("Calculating stats for #{}. {}".format(i, path))
if os.path.isfile(path):
relevant_results.append(
results_as_dict(path, base_path=data_root, probe_sig_threshold=rdict['threshold'])
)
run_gene_ontology(path, base_path=data_root)
else:
print("ERR: DOES NOT EXIST: {}".format(path))
complete_portion = ((i + 1) / rdict['n']) * (progress_to - progress_from)
progress_recorder.set_progress(
progress_from + complete_portion, 100,
"Step 1/3<br />Processing {:,}/{:,} results".format(i, rdict['n'])
)
df = | pd.DataFrame(relevant_results) | pandas.DataFrame |
# 極性辞書を用いた文章分散表現を獲得
import pandas as pd
import numpy as np
import json
from gensim.models import Word2Vec, TfidfModel, KeyedVectors
from gensim.corpora import Dictionary
from tqdm import tqdm
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
def createVector(df, model):
print("create Vector")
# Sentiment Dictionary を用いる
f = open("../data/sentiment-dictionary.json", "r")
dic = json.load(f)
for key, value in dic.items():
if value == 0 or value == 1:
dic[key] = 1
else:
dic[key] = -1
num_features = model.vector_size
sdv = np.zeros((len(df), num_features*3))
for idx, (text, label, _) in tqdm(df.iterrows()):
neu = np.zeros((1, num_features))
pos = np.zeros((1, num_features))
neg = np.zeros((1, num_features))
neu_num, pos_num, neg_num = 0,0,0
for word in text.split(","):
if word in dic:
if dic[word] == 1:
if word in model.wv.vocab:
pos_num += 1
pos = pos + np.array(model.wv[word])
else:
if word in model.wv.vocab:
neg_num += 1
neg = neg + np.array(model.wv[word])
else:
if word in model.wv.vocab:
neu_num += 1
neu = neu + np.array(model.wv[word])
neu_num = max(neu_num, 1)
pos_num = max(pos_num,1)
neg_num = max(neg_num, 1)
sdv[idx] = np.hstack((neu/neu_num, pos/pos_num, neg/neg_num))
return sdv
if __name__ == "__main__":
PATH = "../data/corpus-wakati-juman.tsv"
df = pd.read_table(PATH, index_col=0)
df = df[~ | pd.isnull(df["text"]) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import datetime as dt
import numpy as np
import time
from sklearn.feature_extraction.text import CountVectorizer
word_vectorizer = CountVectorizer(ngram_range=(1,2), analyzer='word')
import numpy as np
from nltk import ngrams
import nltk
from nltk.util import ngrams
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
import sys
sys.path.insert(0, r"c:\users\ishaa\anaconda3\envs\tf_gpu\lib\site-packages")
import numpy as np
from keras.datasets import imdb
from keras.preprocessing.text import Tokenizer
from keras import models
from keras import layers
# from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from keras.optimizers import SGD
from keras import Sequential
# Set random seed
np.random.seed(21)
# In[2]:
#df=pd.read_csv(r'C:\Users\ishaa\Documents\Thesis\01 Data\BPI16\12674816\BPI2016_Clicks_Logged_In.csv', sep=';', encoding='latin-1', keep_default_na=False)
df=pd.read_csv('./rawdata/BPI2016_Clicks_Logged_In.csv', sep=';', encoding='latin-1', keep_default_na=False)
# In[3]:
df['time'] = | pd.to_datetime(df['TIMESTAMP']) | pandas.to_datetime |
import pandas as pd
import numpy as np
def btk_data_decoy_old():
df = pd.read_csv('btk_active_decoy/BTK_2810_old.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all.to_csv('btk_active_decoy/btk_2810_add_decoy_old.csv',index=None)
def btk_data_cut_decoy():
df = pd.read_csv('btk_active_decoy/BTK_2810_old.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_cut_decoy = pd.read_csv('btk_active_decoy/similarity_active_decoy.csv')
df_cut_decoy = df_cut_decoy.head(1139)#1139是根据正样本1393个,乘以10比例13930,原先数据decoy总量为15069,15069-13930=1139
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all_filter = df_all[~ df_all['smiles'].isin(df_cut_decoy['train_smiles'])]
df_all_filter.to_csv('btk_active_decoy/btk_2810_cut_decoy.csv',index=None)
def btk_data_decoy():
df = pd.read_csv('btk_active_decoy/BTK_2810.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_decoy = df_decoy.sample(frac=0.5, random_state=123)
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = | pd.concat([df_active,df_ic_decoy]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 15:01:04 2019
@author: lealp
"""
import pandas as pd
import glob
from shapely.geometry import Point
import geopandas as gpd
import numpy as np
import os
import xarray as xr
from unittest import TestCase
from time_space_reductions.match_ups_over_centroids import get_match_up_over_centroids
from time_space_reductions.match_ups_over_polygons import get_zonal_match_up
from time_space_reductions.match_ups_over_points.test_matchup import (test_temporal_and_spatial_matchUps,
test_only_spatial_matchUps)
def make_fake_data(N=200):
# creating example GeoDataframe for match-ups in EPSG 4326
xx = np.random.randint(low=-60, high=-33, size=N)*1.105
yy = np.random.randint(low=-4, high=20, size=N)*1.105
df = | pd.DataFrame({'lon':xx, 'lat':yy}) | pandas.DataFrame |
import argparse
import sys
from os import path, makedirs, scandir
from random import shuffle
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from classifier.FaceClassifier import FaceClassifier
models = [
"random_forest",
"knn",
"svm",
"mlp",
"dtree",
"all"
]
input_folder = path.join(path.abspath(path.curdir), "data", "input")
people_folders = None
number_imgs_list = []
embeddings = []
embeddings_ids = []
embeddings_df = None
people_df = None
vector_size = None
face_classifier = FaceClassifier()
def download(file_path, url):
import requests
import math
r = requests.get(url, stream=True)
total_size = int(r.headers.get('content-length'))
block_size = 1024
with open(file_path, 'wb') as f:
for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size // block_size), desc="Download",
unit='B', unit_scale=True, unit_divisor=1024):
f.write(data)
def down_img_db():
import shutil
import tarfile
input_parent_dir = path.dirname(input_folder)
temp_folder = path.join(path.curdir, "data", "temp")
makedirs(input_parent_dir, exist_ok=True)
makedirs(temp_folder, exist_ok=True)
if path.exists(input_folder):
shutil.move(input_folder, input_folder + "_bkp")
tgz_path = path.join(temp_folder, "lfw.tgz")
download(tgz_path, "http://vis-www.cs.umass.edu/lfw/lfw.tgz")
if not path.exists(tgz_path):
print("Problema no download")
sys.exit()
print("Extraindo arquivo para {}, isso pode levar um tempo".format(temp_folder))
tar = tarfile.open(tgz_path, "r:gz")
tar.extractall(temp_folder)
print("Movendo arquivos extraidos para a pasta de entrada")
shutil.move(path.join(temp_folder, "lfw"), input_folder)
return True
def embeddings_to_df():
global embeddings_df, embeddings, embeddings_ids, vector_size
index = | pd.MultiIndex.from_tuples(embeddings_ids, names=["Name", "Image_Number"]) | pandas.MultiIndex.from_tuples |
import argparse
import csv
import gzip
import re
import numpy as np
import zarr
from scipy import sparse
from zarr import Blosc
import pandas as pd
import logging
COMPRESSOR = Blosc(cname="lz4", clevel=5, shuffle=Blosc.SHUFFLE, blocksize=0)
# the number of rows in a chunk for expression counts
CHUNK_ROW_SIZE = 10000
CHUNK_COL_SIZE = 10000
logging.basicConfig(level=logging.INFO)
def init_zarr(sample_id, path, file_format, schema_version):
"""Initializes the zarr output.
Args:
sample_id (str): sample or cell id
path (str): path to the zarr output
file_format (str): zarr file format [DirectoryStore, ZipStore]
schema_version (str): version string of this output to allow for parsing of future changes
Returns:
root (zarr.hierarchy.Group): initialized zarr group
"""
store = None
if file_format == "DirectoryStore":
store = zarr.DirectoryStore(path)
if file_format == "ZipStore":
store = zarr.ZipStore(path, mode="w")
# create the root group
root = zarr.group(store, overwrite=True)
# root.attrs['README'] = "The schema adopted in this zarr store may undergo changes in the future"
root.attrs["sample_id"] = sample_id
root.attrs["optimus_output_schema_version"] = schema_version
# Create the expression_matrix group
# root.create_group("expression_matrix", overwrite=True);
return root
def add_gene_metrics(data_group, input_path, gene_ids, verbose=False):
"""Converts the gene metrics from the Optimus pipeline to zarr file
Args:
data_group (zarr.hierarchy.Group): datagroup object for the zarr
input_path (str): file containing gene metrics name and values
gene_ids (list): list of gene ids
verbose (bool): whether to output verbose messages for debugging purposes
"""
# read the gene metrics names and values
if input_path.endswith(".gz"):
with gzip.open(input_path, "rt") as f:
gene_metrics = [row for row in csv.reader(f)]
else:
with open(input_path, "r") as f:
gene_metrics = [row for row in csv.reader(f)]
# metric names we use [1:] to remove the empty string
if len(gene_metrics[0][1:]):
data_group.create_dataset(
"gene_metadata_numeric_name",
shape=(len(gene_metrics[0][1:]),),
compressor=COMPRESSOR,
dtype="<U80",
chunks=(len(gene_metrics[0][1:]),),
data=list(gene_metrics[0][1:]),
)
else:
logging.info(
'Not adding "gene_metadata_numeric_name" to zarr output: must have at least one metric'
)
if verbose:
logging.info("# gene numeric metadata", len(gene_metrics[0][1:]))
# Gene metric values, the row and column sizes
gene_ids_location = {
gene_id: index for index, gene_id in enumerate(gene_ids)
}
# ignore the first line with the metric names in text
ncols = 0
gene_id_to_metric_values = {}
for row in gene_metrics:
# only consider genes that are in the count matrix
if not row[0] in gene_ids_location:
continue
row_values = []
for value_string in row[1:]:
# some of the standard deviation values do not exist for one reads matches
try:
value = np.float32(value_string)
except ValueError:
value = np.nan
row_values.append(value)
gene_id_to_metric_values[row[0]] = row_values
# note that all of these lengths are assumed to be equal and this check is already done in the pipeline
ncols = len(row_values)
# now insert the metrics of the cells that are in count matrix, i.e., the global variable "cell_ids"
gene_metric_values = []
for gene_id in gene_ids:
if gene_id in gene_id_to_metric_values:
gene_metric_values.append(gene_id_to_metric_values[gene_id])
else:
# if no metrics for a cell present in the count matrix then fill them with np.nans
gene_metric_values.append([np.nan] * ncols)
nrows = len(gene_ids)
if verbose:
logging.info("# of genes: {}".format(nrows))
logging.info("# of gene metadate metrics: {}".format(ncols))
# now insert the dataset that has the numeric values for the qc metrics for the genes
if nrows and ncols:
data_group.create_dataset(
"gene_metadata_numeric",
shape=(nrows, ncols),
compressor=COMPRESSOR,
dtype=np.float32,
chunks=(nrows, ncols),
data=gene_metric_values,
)
else:
logging.info(
'Not adding "gene_metadata_numeric" to zarr output: either the #genes or # cell ids is 0'
)
def add_cell_metrics(
data_group, metrics_file, cell_ids, emptydrops_file, verbose=False,
):
"""Converts cell metrics from the Optimus pipeline to zarr file
Args:
data_group (zarr.hierarchy.Group): datagroup object for the zarr
input_path (str): file containing gene metrics name and values
cell_ids (list): list of cell ids
verbose (bool): whether to output verbose messages for debugging purposes
emptydrops_path (str): emptydrops csv file
"""
# Read the csv input files
metrics_df = pd.read_csv(metrics_file, dtype=str)
emptydrops_df = pd.read_csv(emptydrops_file, dtype=str)
# Check that input is valid
if metrics_df.shape[0] == 0 or metrics_df.shape[1] == 0:
logging.error("Cell metrics table is not valid")
raise ValueError()
if emptydrops_df.shape[0] == 0 or emptydrops_df.shape[1] == 0:
logging.error("EmptyDrops table is not valid")
raise ValueError()
# Rename cell columns for both datasets to cell_id
emptydrops_df = emptydrops_df.rename(columns={"CellId": "cell_id"})
metrics_df = metrics_df.rename(columns={"Unnamed: 0": "cell_id"})
# Drop first row that contains non-cell information from metrics file, this contains aggregate information
metrics_df = metrics_df.iloc[1:]
# Prefix emptydrops column names (except the key cell_id)
colnames = list(emptydrops_df.columns)
newcolnames = ["emptydrops_" + s for s in colnames]
namemap = dict(zip(colnames, newcolnames))
# Do not map the cell_id as it will be used for the merge
del namemap["cell_id"]
emptydrops_df = emptydrops_df.rename(columns=namemap)
# Confirm that the emptydrops table is a subset of the cell metadata table, fail if not
if not emptydrops_df.cell_id.isin(metrics_df.cell_id).all():
logging.error(
"Not all emptydrops cells can be found in the metrics table."
)
raise Exception(
"Not all emptydrops cells can be found in the metrics table."
)
# Merge the two tables
merged_df = metrics_df.merge(emptydrops_df, on="cell_id", how="outer")
# Order the cells by merging with cell_ids
cellorder_df = | pd.DataFrame(data={"cell_id": cell_ids}) | pandas.DataFrame |
#!/usr/bin/env python
#++++++++++++++++++++++++++++++++++++++++
# LAPART 1 Train ++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++
# Copyright C. 2017, <NAME> ++++++
#++++++++++++++++++++++++++++++++++++++++
import time
import math
import numpy as np
import pandas as pd
from .art import ART
def norm(data,ma,mi):
tnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
tnorm[i,j] = (data[i,j]-mi[j])/(ma[j] - mi[j])
return tnorm
def dnorm(data,ma,mi):
dnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
dnorm[i,j] = (data[i,j]*(ma[j]-mi[j]))+mi[j]
return dnorm
class train:
def __init__(self,xA,xB,rhoA,rhoB,beta,alpha,nep,TA,TB,L,memory_folder,update_templates,normalize_data):
''' Update Existing Templates '''
self.update = update_templates
self.folder = memory_folder
''' LAPART Parameters '''
self.rhoA = rhoA
self.rhoB = rhoB
self.beta = beta
self.alpha = alpha
self.nep = nep
if normalize_data:
maxminA = pd.read_csv('%s/maxminA.csv'%self.folder).values #as_matrix()
maxminB = pd.read_csv('%s/maxminB.csv'%self.folder).values #as_matrix()
self.maxA,self.minA = maxminA[:,1:2],maxminA[:,2:3]
self.maxB,self.minB = maxminB[:,1:2],maxminB[:,2:3]
''' Normalize Input Data '''
self.xAn = norm(xA,self.maxA,self.minA)
self.xBn = norm(xB,self.maxB,self.minB)
else:
self.xAn = xA
self.xBn = xB
''' Complement Code Data '''
self.IA = np.transpose(np.hstack([self.xAn, 1-self.xAn]))
self.IB = np.transpose(np.hstack([self.xBn, 1-self.xBn]))
self.nAB = len(self.IA[0])
if self.update:
self.ncA_old = len(TA)
self.ncB_old = len(TB)
self.TA = TA.T
self.TB = TB.T
#self.L = np.append(np.append(L,np.zeros((len(L),112)),1),np.zeros((8,len(np.append(L,np.zeros((len(L),112)),1)[0]))),0)
# Append X Direction
xd = np.zeros((len(L),64))
L1 = np.concatenate((L,xd),axis=1)
# Append Y Direction
yd = np.zeros((64,len(L1[0])))
self.L = np.concatenate((L1,yd),axis=0)
else:
self.TA = np.ones((len(self.IA),1))
self.TB = np.ones((len(self.IB),1))
self.L = np.zeros((len(self.IA[0]),len(self.IB[0])))
self.minA = np.ones((len(xA[0])*2,1))
self.chAm = np.zeros((len(xA)*10,1))
self.mA = np.zeros((len(xA)*10,1))
self.minB = np.ones((len(xB[0])*2,1))
self.chBm = np.zeros((len(xB)*10,1))
self.mB = np.zeros((len(xB)*10,1))
def lrBfailed(self,IB,TB,L,cmax,j,ch,nc):
if self.mB[ch] >= self.rhoB:
'''Update B-Side Category & Update L '''
TB = self.UpdateTemplate(IB, TB, cmax, j, ch)
L[self.ncA-1, ch] = 1
else:
'''Create new B-Side Category & Update L '''
self.ncB += 1
TB = self.CreateTemplate(IB,TB,nc,j)
L[self.ncA-1, self.ncB-1] = 1
return L, TB
def UpdateTemplate(self,I,T,cmax,j,ch):
"""
Update A and B Templates
:param I: Input
:param T: Template
:param cmax: Maximum choice template
"""
p = np.hstack([np.array([I[:,j]]).T,T[:,cmax]])
T[:,cmax] = np.array([p.min(axis=1)]).T
return T
def CreateTemplate(self,I,T,nc,j):
"""
Create New A and B Templates
:param I: Input
:param T: Template
:param nc: Number of A or B templates
"""
T = np.append(T,np.array([I[:,j]]).T,1)
return T
def lapart_train(self,xA,xB):
"""
Parameters
----------
xA : matrix
xB " maxtrix
"""
if self.update == False:
''' Set first template as first input '''
self.TA[:,0] = self.IA[:,0]
self.TB[:,0] = self.IB[:,0]
self.L[0,0] = 1
self.ncA, self.ncB = 1,1
else:
self.ncA, self.ncB = self.ncA_old, self.ncB_old
for ep in range(self.nep):
for j in range(self.nAB):
cmaxA, chA = ART(self.IA,self.TA,self.mA,self.chAm,self.ncA,self.minA,self.rhoA,self.beta,j)
if cmaxA == -1:
'''
++++++++++++ CASE 1 ++++++++++++++++++++++++++++
A-Side => faild vigalance and creates new node
B-Side => perform as a normal fuzzy ART
'''
self.ncA += 1
self.TA = self.CreateTemplate(self.IA,self.TA,self.ncA,j)
cmaxB, chB = ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)
if cmaxB == -1:
self.ncB += 1
self.TB = self.CreateTemplate(self.IB,self.TB,self.ncB,j)
self.L[self.ncA-1,self.ncB-1] = 1
else:
self.TB = self.UpdateTemplate(self.IB,self.TB,cmaxB,j,chB)
self.L[self.ncA-1,chB] = 1
else:
'''
++++++++++++ CASE 2 ++++++++++++++++++++++++++++
A-Side Resonates
B-Side must consider primed class B template
and at the same time reads its input IB
Present B-Side input and Prime B-Side
Prime = B-Side must consider template associated with A-Side Template
'''
cmaxB, chB = ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)
if cmaxB == -1:
'''
B-Side Failed
Try Other A and B -Side Templates
'''
lr = 1
lrcount = 1
while lr == 1:
self.chAm[chA] = 0
chA = self.chAm.argmax()
if self.mA[chA] >= self.rhoA:
'''
A-Side Passed Vigalance
'''
for li in range(self.ncB):
if self.L[chA,li] == 1:
chB = li
if self.mB[chB] >= self.rhoB:
'''
B-Side Passed Vigalance
Update A and B Side
'''
cmaxA,cmaxB = chA,chB
self.TA = self.UpdateTemplate(self.IA,self.TA,cmaxA,j,chA)
self.TB = self.UpdateTemplate(self.IB,self.TB,cmaxB,j,chB)
lr = 0
else:
if lrcount == self.ncA:
'''
No Match
Create new A-Side Category
'''
self.ncA += 1
self.TA = self.CreateTemplate(self.IA,self.TA,self.ncA,j)
cmaxB, chB = art.ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)
self.L,self.TB = self.lrBfailed(self.IB,self.TB,self.L,cmaxB,j,chB,self.ncB)
lr = 0
else:
lr = 0
lrcount += 1
else:
'''
Next A-Side chA did not pass
Create new A-Side Template
'''
self.ncA += 1
self.TA = self.CreateTemplate(self.IA,self.TA,self.ncA,j)
cmaxB, chB = art.ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)
if cmaxB == -1:
self.ncB += 1
self.TB = self.CreateTemplate(self.IB,self.TB,self.ncB,j)
self.L[self.ncA-1,self.ncB-1] = 1
else:
self.TB = self.UpdateTemplate(self.IB,self.TB,cmaxB,j,chB)
self.L[self.ncA-1,chB] = 1
lr = 0
else:
'''
A and B Side Resonates
Update A and B Templates
'''
self.TA = self.UpdateTemplate(self.IA,self.TA,cmaxA,j,chA)
self.TB = self.UpdateTemplate(self.IB,self.TB,cmaxB,j,chB)
L = self.L[:self.ncA,:self.ncB]
TA = np.transpose(self.TA[:,:self.ncA])
TB = np.transpose(self.TB[:,:self.ncB])
return TA,TB,L
def lapArt_train(xA,xB,rhoA=0.9,rhoB=0.9,beta=0.000001,alpha=1.0,nep=1,memory_folder='',update_templates=True,normalize_data=True):
"""
Train LAPART Algorithm
:param xA: A-Side Input Matrix (float)
:param xB: B-Side Input Matrix (float)
:param rhoA: A-Side free parameter (float)
:param rhoB: B-Side free parameter (float)
:param beta: Learning rate free parameter (float)
:param alpha: Choice Parameter (float)
:param nep: Number of epochs (integer)
:param memory_folder: Folder to store memory (string)
:param update_templates: Command to update or create new templates (boolean)
:return TA: A-Side template matrix (float)
:return TB: B-Side template matrix (float)
:return L: Associator matrix (float)
:return elapsed_time: Seconds to complete training (float)
"""
start_time = time.time()
if update_templates:
TA,TB,L = pd.read_csv('%s/TA.csv'%memory_folder).values,pd.read_csv('%s/TB.csv'%memory_folder).values,pd.read_csv('%s/L.csv'%memory_folder).values
TA,TB,L = TA[:,1:],TB[:,1:],L[:,1:]
else:
TA,TB,L = [],[],[]
ann = train(xA,xB,rhoA,rhoB,beta,alpha,nep,TA,TB,L,memory_folder,update_templates,normalize_data)
TA,TB,L = ann.lapart_train(xA,xB)
TA,TB,L = pd.DataFrame(TA), | pd.DataFrame(TB) | pandas.DataFrame |
from fpdf import FPDF
from tkinter import *
from tkinter import ttk
import os
from tkinter import messagebox
import tkinter
from tkscrolledframe import ScrolledFrame
import pandas as pd
from datetime import date, datetime
import glob
from reportlab.pdfgen import canvas
from PyPDF2 import PdfFileWriter, PdfFileReader
import win32com.client as win32
from openpyxl.reader.excel import load_workbook
import pickle
from PIL import Image, ImageTk
class Analise:
def __init__(self, janela):
self.janela = janela
titulo = ' '
self.janela.title(110 * titulo + 'Consultas')
self.janela.geometry('800x500+350+100')
self.janela.resizable(width=False, height=False)
j = 0
r = 0
for i in range(800):
c = str(222222 + r)
Frame(self.janela, width=10, height=500, bg='#' + c).place(x=j, y=0)
j += 10
r += 1
self.frame1 = Frame(self.janela, width=700, height=400, bd=7, bg='white', relief=RIDGE)
self.frame1.place(x=50, y=50)
lblini = Label(self.frame1, text='ANÁLISE TRIBUTÁRIA', font=('arial', 26, 'bold'), bd=0, bg='white'). \
place(x=155, y=30)
lblini = Label(self.frame1, text='Selecione a opção:', font=('arial', 14, 'bold'), bd=0, bg='white').\
place(x=235, y=120)
btn1 = Button(self.frame1, text='Análises \nPendentes', font=('arial', 16, 'bold'), bg='#FF5733',
fg='white', bd=4, width=12, justify=CENTER, command=self.pendentes).place(x=30, y=180)
btn2 = Button(self.frame1, text='Nova \nAnálise', font=('arial', 16, 'bold'), width=12, bg='#FF5733', bd=4,
fg='white', command= lambda:[self.tela_principal(), self.tela_servicos(),
self.servicos.withdraw(), self.tela_materiais(), self.materiais.withdraw(),
self.tela_observacoes(), self.observacoes.withdraw(), self.tela_contratos(),
self.contratos.withdraw(), self.janela.withdraw()]).place(x=250, y=180)
btn3 = Button(self.frame1, text='Carregar \nAnálise', font=('arial', 16, 'bold'), bg='#FF5733',
fg='white', bd=4, width=12, justify=CENTER, command=lambda:[self.carregar_analise(),
self.tela_principal(), self.principal.withdraw(), self.tela_servicos(), self.servicos.withdraw(), self.tela_materiais(),
self.materiais.withdraw(), self.tela_observacoes(), self.observacoes.withdraw(), self.tela_contratos(),
self.contratos.withdraw()]).place(x=470, y=180)
def pendentes(self):
self.pendente = Toplevel()
titulo = ' '
self.pendente.title(100 * titulo + 'Consultas')
self.pendente.geometry('800x500+350+100')
self.pendente.resizable(width=False, height=False)
estilo = ttk.Style()
estilo.theme_use('default')
estilo.configure('Treeview', background='#D3D3D3', foreground='black', rowheight=25,
fieldbackground='#D3D3D3')
estilo.map('Treeview', background=[('selected', '#347083')])
# Treeview frame
tree_frame = Frame(self.pendente)
tree_frame.pack(pady=100)
# Barra rolagem
tree_scroll = Scrollbar(tree_frame)
tree_scroll.pack(side=RIGHT, fill=Y)
# Criar Treeview
nf_tree = ttk.Treeview(tree_frame, yscrollcommand=tree_scroll.set, selectmode='extended')
nf_tree.pack(side=LEFT)
# Configurar Barra Rolagem
tree_scroll.config(command=nf_tree.yview)
# Definir colunas
colunas2 = ['Análise', 'Data']
nf_tree['columns'] = colunas2
# formatar colunas
nf_tree.column('Análise', width=350)
nf_tree.column('Data', width=100)
# formatar títulos
nf_tree.heading('Análise', text='Análise', anchor=W)
nf_tree.heading('Data', text='Data', anchor=W)
nf_tree['show'] = 'headings'
self.lista = []
# lista = [['DV-004-2021', '29/09/2021'], ['IN-006-2021', '30/09/2021']]
self.pasta1 = os.listdir('G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes')
self.pasta = []
for item in self.pasta1:
if item.endswith('.pdf') is True and item != 'watermark.pdf':
self.pasta.append(item)
for i, n in enumerate(self.pasta):
mod = os.path.getctime('G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes')
mod = datetime.fromtimestamp(mod)
data = date.strftime(mod, '%d/%m/%Y')
self.lista.append([])
self.lista[i].append(n)
self.lista[i].append(data)
# print(self.lista)
# inserir dados do banco no treeview
def inserir_tree(lista):
nf_tree.delete(*nf_tree.get_children())
contagem = 0
for row in lista: # loop para inserir cores diferentes nas linhas
if contagem % 2 == 0:
nf_tree.insert(parent='', index='end', text='', iid=contagem,
values=(row[0], row[1]), tags=('evenrow',))
else:
nf_tree.insert(parent='', index='end', text='', iid=contagem,
values=(row[0], row[1]), tags=('oddrow',))
contagem += 1
py = 115
self.list_check = []
for lin in self.lista:
self.check1 = IntVar()
lbl1 = Checkbutton(self.pendente, var=self.check1, onvalue=1, offvalue=0)
lbl1.place(y=py, x=138)
py += 26
self.list_check.append(self.check1)
# Create the watermark from an image
c = canvas.Canvas('G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes\\watermark.pdf')
# Draw the image at x, y. I positioned the x,y to be where i like here
c.drawImage('G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes\\Paulo.png', 440, 30, 100, 60, mask='auto')
c.save()
# Get the watermark file you just created
watermark = PdfFileReader(open("G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes\\watermark.pdf", "rb"))
# Get our files ready
def assinatura():
caminho = 'G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes\\'
self.salvos = []
for n, arquivo in enumerate(self.pasta):
if self.list_check[n].get() == 1:
dir_atual = 'G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes\\'
os.chdir(dir_atual)
# if n == 0:
# # Create the watermark from an image
# c = canvas.Canvas('watermark.pdf')
# # Draw the image at x, y. I positioned the x,y to be where i like here
# c.drawImage('Paulo.png', 440, 30, 100, 60, mask='auto')
# c.save()
# # Get the watermark file you just created
# watermark = PdfFileReader(open("watermark.pdf", "rb"))
# # Get our files ready
# output_file = PdfFileWriter()
self.output_file = PdfFileWriter()
with open(caminho + arquivo, "rb") as f:
input_file = PdfFileReader(f, "rb")
# Number of pages in input document
page_count = input_file.getNumPages()
# Go through all the input file pages to add a watermark to them
for page_number in range(page_count):
input_page = input_file.getPage(page_number)
if page_number == page_count - 1:
input_page.mergePage(watermark.getPage(0))
self.output_file.addPage(input_page)
# dir = os.getcwd()
path = 'G:\GECOT\Análise Contábil_Tributária_Licitações\\2021'
os.chdir(path)
file = glob.glob(str(arquivo[21:32]) + '*')
file = ''.join(file)
try:
os.chdir(file)
except:
os.chdir(path)
# finally, write "output" to document-output.pdf
with open('Análise Tributária - ' + str(arquivo[21:]), "wb") as outputStream:
self.output_file.write(outputStream)
os.chdir(caminho)
os.remove(arquivo)
self.salvos.append(n)
troca = 0
for i in self.salvos:
self.pasta.pop(i-troca)
self.lista.pop(i-troca)
troca += 1
outlook = win32.Dispatch('outlook.application')
# criar um email
email = outlook.CreateItem(0)
# configurar as informações do seu e-mail
email.To = "<EMAIL>"
email.Subject = "E-mail automático Análise Tributária"
email.HTMLBody = f"""
<p>Análise(s) Tributária(s) assinada(s) com sucesso.</p>
"""
email.Send()
tkinter.messagebox.showinfo('', 'Análise(s) assinada(s) com sucesso!')
# self.pendente.update()
self.pendente.lift()
self.list_check.clear()
inserir_tree(self.lista)
def recusa():
resultado = tkinter.messagebox.askquestion('', 'Deseja Justificar?')
if resultado == 'yes':
self.justifica = Toplevel()
self.justifica.geometry('500x300+500+200')
Label(self.justifica, text='Justificativa:', font=('arial', 14, 'bold')).grid(row=0, column=0, padx=35,
pady=10, sticky=W)
self.texto_just = Text(self.justifica, width=50, height=10, wrap=WORD)
self.texto_just.grid(row=1, column=0, padx=40)
def email():
outlook = win32.Dispatch('outlook.application')
# criar um email
email = outlook.CreateItem(0)
# configurar as informações do seu e-mail
email.To = "<EMAIL>"
email.Subject = "E-mail automático Análise Tributária"
email.HTMLBody = f"""
<p>Análise Tributária foi recusada.</p>
"""
email.Send()
tkinter.messagebox.showinfo('', 'Análise(s) assinada(s) com sucesso!')
self.pendente.lift()
envio = Button(self.justifica, text='Enviar', font=('arial', 12, 'bold'), bd=2).grid(row=2, column=0, pady=20)
else:
tkinter.messagebox.showinfo('', 'Recusado com Sucesso!')
assinar = Button(self.pendente, text='Assinar', font=('arial', 14, 'bold'), width=10, bd=3, command=assinatura).\
place(x=250, y=400)
recusar = Button(self.pendente, text='Recusar', font=('arial', 14, 'bold'), width=10, bd=3, command=recusa). \
place(x=420, y=400)
def NotasInfo2(ev):
# fn_id.delete(0, END)
verinfo2 = nf_tree.focus()
dados2 = nf_tree.item(verinfo2)
row = dados2['values']
print(row)
# entr_atual.delete(0, END)
# entr_atual.insert(0, row[3])
# fn_id.insert(0, row[0])
os.startfile('\\\GBD_VT1NTAQA\Data2\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes' + '\\' + row[0])
# adicionar a tela
nf_tree.tag_configure('oddrow', background='white')
nf_tree.tag_configure('evenrow', background='lightblue')
inserir_tree(self.lista)
nf_tree.bind('<Double-Button>', NotasInfo2)
def carregar_analise(self):
self.carregar = Toplevel()
titulo = ' '
self.carregar.title(160 * titulo + 'Análise Tributária')
self.carregar.geometry('1100x680+200+20')
self.carregar.resizable(width=False, height=False)
estilo = ttk.Style()
estilo.theme_use('default')
estilo.configure('Treeview', background='#D3D3D3', foreground='black', rowheight=25,
fieldbackground='#D3D3D3')
estilo.map('Treeview', background=[('selected', '#347083')])
# Treeview frame
tree_frame1 = Frame(self.carregar)
tree_frame1.pack(pady=100)
# Barra rolagem
tree_scroll1 = Scrollbar(tree_frame1)
tree_scroll1.pack(side=RIGHT, fill=Y)
# Criar Treeview
nf_tree1 = ttk.Treeview(tree_frame1, yscrollcommand=tree_scroll1.set, selectmode='extended')
nf_tree1.pack(side=LEFT)
# Configurar Barra Rolagem
tree_scroll1.config(command=nf_tree1.yview)
# Definir colunas
colunas2 = ['Análise', 'Data']
nf_tree1['columns'] = colunas2
# formatar colunas
nf_tree1.column('Análise', width=200)
nf_tree1.column('Data', width=200)
# formatar títulos
estilo.configure("Treeview.Heading", font=('arial', 11), background='DodgerBlue3', foreground='white')
nf_tree1.heading('Análise', text='Análise', anchor=W)
nf_tree1.heading('Data', text='Data', anchor=W)
nf_tree1['show'] = 'headings'
lista = []
temp_list = []
with open('G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes\\Base.txt', "rb") as carga:
while True:
try:
temp_list.append(pickle.load(carga))
except EOFError:
break
for n, item in enumerate(temp_list):
lista.append([])
lista[n].append(item[2])
lista[n].append(item[0])
# inserir dados do banco no treeview
def inserir_tree(lista):
nf_tree1.delete(*nf_tree1.get_children())
contagem = 0
lista.sort(key=lambda lista: datetime.strptime(lista[1], '%d/%m/%Y, %H:%M:%S'), reverse=True)
print(lista)
for row in lista: # loop para inserir cores diferentes nas linhas
if contagem % 2 == 0:
nf_tree1.insert(parent='', index='end', text='', iid=contagem,
values=(row[0], row[1]), tags=('evenrow',))
else:
nf_tree1.insert(parent='', index='end', text='', iid=contagem,
values=(row[0], row[1]), tags=('oddrow',))
contagem += 1
def NotasInfo2(ev):
# fn_id.delete(0, END)
verinfo3 = nf_tree1.focus()
dados3 = nf_tree1.item(verinfo3)
temp_list.sort(key=lambda lista: datetime.strptime(lista[0], '%d/%m/%Y, %H:%M:%S'), reverse=True)
self.gere.delete(0, END)
self.proc.delete(0, END)
self.req.delete(0, END)
self.orcam.delete(0, END)
self.objcust.delete(0, END)
self.tipoa.deselect()
self.tipob.deselect()
self.tipoc.deselect()
self.objeto.delete(1.0, END)
self.valor.delete(0, END)
self.complem.delete(1.0, END)
self.linha_mat.delete(0, END)
self.cod_serv.delete(0, END)
self.iva.delete(0, END)
self.linha_serv.delete(0, END)
self.obs.delete(1.0, END)
self.obs_serv.delete(1.0, END)
self.obs1.delete(1.0, END)
self.obs2.delete(1.0, END)
[i.delete(1.0, END) for i in self.infos]
# [i.deselect() for i in self.var_check]
self.gere.insert(0, temp_list[int(verinfo3)][1])
self.proc.insert(0, temp_list[int(verinfo3)][2])
self.req.insert(0, temp_list[int(verinfo3)][3])
self.orcam.insert(0, temp_list[int(verinfo3)][4])
self.objcust.insert(0, temp_list[int(verinfo3)][5])
self.tipo1.set(int(temp_list[int(verinfo3)][6]))
self.tipo2.set(int(temp_list[int(verinfo3)][7]))
self.tipo3.set(int(temp_list[int(verinfo3)][8]))
self.objeto.insert(1.0, temp_list[int(verinfo3)][9].strip())
self.valor.insert(0, temp_list[int(verinfo3)][10])
self.complem.insert(1.0, temp_list[int(verinfo3)][11].strip())
for r, val in enumerate(temp_list[int(verinfo3)][12][1:]):
print(val)
for b, value in enumerate(val):
print(value)
self.lista_mat[b][r].delete(0, END)
self.lista_mat[b][r].insert(0, value)
self.linha_mat.insert(0, temp_list[int(verinfo3)][13])
self.serv.insert(1.0, temp_list[int(verinfo3)][14].strip())
self.iva.insert(0, temp_list[int(verinfo3)][15])
cont = 0
for r, val in enumerate(temp_list[int(verinfo3)][16][1:]):
for b, value in enumerate(val):
if cont == 0:
self.lista[b+1][r].delete(0, END)
self.lista[b+1][r].insert(0, value)
elif cont == 1:
self.lista[b-1][r].delete(0, END)
self.lista[b-1][r].insert(0, value)
else:
self.lista[b][r].delete(0, END)
self.lista[b][r].insert(0, value)
cont += 1
cont = 0
self.linha_serv.insert(0, temp_list[int(verinfo3)][17])
self.obs.insert(1.0, temp_list[int(verinfo3)][18].strip().strip())
self.obs_serv.insert(1.0, temp_list[int(verinfo3)][19].strip())
self.obs1.insert(1.0, temp_list[int(verinfo3)][20].strip())
self.obs2.insert(1.0, temp_list[int(verinfo3)][21].strip())
for n, i in enumerate(temp_list[int(verinfo3)][22]):
self.infos[n].insert(1.0, i.strip())
print(i)
for n, i in enumerate(temp_list[int(verinfo3)][23]):
if i == 1:
self.lista_check[n].set(1)
self.carregar.destroy()
self.principal.deiconify()
# adicionar a tela
nf_tree1.tag_configure('oddrow', background='white')
nf_tree1.tag_configure('evenrow', background='lightblue')
inserir_tree(lista)
nf_tree1.bind('<Double-Button>', NotasInfo2)
def tela_principal(self):
self.principal = Toplevel()
titulo = ' '
self.principal.title(160 * titulo + 'Análise Tributária')
self.principal.geometry('1100x680+200+20')
self.principal.resizable(width=False, height=False)
self.mainframe = Frame(self.principal, width=1200, height=680, relief=RIDGE, bg='DeepSkyBlue3')
self.mainframe.place(x=0, y=0)
info_frame = Frame(self.mainframe, width=1080, height=260, relief=RIDGE, bd=7)
info_frame.place(x=10, y=30)
info_frame2 = Frame(self.mainframe, width=1080, height=300, relief=RIDGE, bd=7)
info_frame2.place(x=10, y=290)
info_frame3 = Frame(self.mainframe, width=1080, height=70, relief=RIDGE, bd=5)
info_frame3.place(x=10, y=590)
self.fonte = ('arial', 12, 'bold')
self.ent_fonte = ('arial', 12)
self.rotulos = ['Gerência Contratante: ', 'Nº Processo GECBS: ', 'Requisição de Compras: ', 'Código Material/Serviço:',
'Consta no Orçamento?', 'Sim', 'Não', '1. Classificação Contábil:',
'1. Objeto de Custos:', 'Serviço', 'Material ', 'Serviço com Fornecimento de Material']
Label(info_frame, text='Gerência Contratante: ', font=self.fonte, bd=0).place(x=20, y=50)
Label(info_frame, text='Nº Processo GECBS: ', font=self.fonte, bd=0).place(x=500, y=50)
Label(info_frame, text='Requisição de Compras: ', font=self.fonte, bd=0).place(x=20, y=90)
Label(info_frame, text='Consta no Orçamento?', font=self.fonte, bd=0).place(x=500, y=90)
Label(info_frame, text='Objeto de Custos:', font=self.fonte, bd=0).place(x=20, y=130)
Label(info_frame, text='Tipo de Análise: ', font=self.fonte, bd=0).place(x=500, y=130)
Label(info_frame2, text='Objeto da Contratação: ', font=self.fonte, bd=0).place(x=20, y=20)
Label(info_frame2, text='Valor estimado: ', font=self.fonte, bd=0).place(x=550, y=20)
Label(info_frame2, text='Informações Complementares: ', font=self.fonte, bd=0).place(x=20, y=175)
self.gere = Entry(info_frame, width=20, bd=4, font=self.ent_fonte)
self.gere.place(x=230, y=48)
self.proc = Entry(info_frame, width=20, bd=4, font=self.ent_fonte)
self.proc.place(x=700, y=50)
self.req = Entry(info_frame, width=20, bd=4, font=self.ent_fonte)
self.req.place(x=230, y=88)
self.orcam = ttk.Combobox(info_frame, font=self.fonte, width=12)
self.orcam['values'] = ('Não', 'Sim')
self.orcam.current(1)
self.orcam.place(x=700, y=88)
self.objcust = Entry(info_frame, width=20, bd=4, font=self.ent_fonte)
self.objcust.place(x=230, y=128)
self.tipo1 = IntVar()
self.tipo2 = IntVar()
self.tipo3 = IntVar()
self.tipoa = Checkbutton(info_frame, var=self.tipo1, onvalue=1, offvalue=0, text='Serviço', font=('arial', 12))
self.tipoa.place(x=690, y=125)
self.tipob = Checkbutton(info_frame, var=self.tipo2, onvalue=1, offvalue=0, text='Material', font=('arial', 12))
self.tipob.place(x=690, y=155)
self.tipoc = Checkbutton(info_frame, var=self.tipo3, onvalue=1, offvalue=0, text='Serviço com Fornc. Material',
font=('arial', 12))
self.tipoc.place(x=690, y=185)
self.objeto = Text(info_frame2, width=50, height=5, bd=4, font='arial')
self.objeto.place(x=20, y=55)
self.valor = Entry(info_frame2, width=20, bd=4, font=self.ent_fonte)
self.valor.place(x=550, y=55)
self.complem = Text(info_frame2, width=50, height=3, bd=4, font='arial')
self.complem.place(x=20, y=200)
self.chama_mat = Button(info_frame3, font=('arial', 14, 'bold'), text='Materiais', bd=3, width=20,
command=lambda :[self.principal.withdraw(), self.materiais.deiconify()])
self.chama_mat.place(x=20, y=2)
self.chama_serv = Button(info_frame3, font=('arial', 14, 'bold'), text='Serviços', bd=3, width=20,
command=lambda :[self.principal.withdraw(), self.servicos.deiconify()])
self.chama_serv.place(x=270, y=2)
self.chama_obs2 = Button(info_frame3, font=('arial', 14, 'bold'), text='Observações', bd=3, width=20,
command=lambda :[self.principal.withdraw(),self.observacoes.deiconify()]).place(x=520, y=2)
self.chama_contr = Button(info_frame3, font=('arial', 14, 'bold'), text='Clausulas', bd=3, width=20,
command=lambda :[self.principal.withdraw(), self.contratos.deiconify()]).place(x=770, y=2)
def tela_servicos(self):
self.servicos = Toplevel()
titulo = ' '
self.servicos.title(160 * titulo + 'Serviços')
self.servicos.geometry('1100x690+200+20')
self.servicos.config(bg='DeepSkyBlue2')
self.serv_frame = Frame(self.servicos, width=1080, height=600, relief=RIDGE, bd=7)
self.serv_frame.place(x=10, y=10)
self.serv_frame1 = Frame(self.servicos, width=1080, height=70, relief=RIDGE, bd=7)
self.serv_frame1.place(x=10, y=610)
# frame_1 = Frame(serv_frame, height=500, width=1190, bd=5).place(x=0, y=0)
Label(self.serv_frame, text='Codigo Serviço', font=self.fonte, bd=0).place(x=20, y=30)
Label(self.serv_frame, text='Codigo IVA', font=self.fonte, bd=0).place(x=20, y=190)
self.cod_serv = Entry(self.serv_frame, width=7, font=self.fonte, bd=3)
self.cod_serv.place(x=150, y=30)
self.serv = Text(self.serv_frame, width=70, height=7, bd=4, font='arial', wrap=WORD)
self.serv.place(x=240, y=30)
self.iva = Entry(self.serv_frame, width=10, bd=4, font=self.fonte)
self.iva.place(x=150, y=190)
Label(self.serv_frame, text='Quebra', font=('arial', 10, 'bold'), bd=0).place(x=990, y=505)
self.linha_serv = Entry(self.serv_frame, font=('arial', 10, 'bold'), bd=2, width=3)
self.linha_serv.place(x=1000, y=530)
self.linha_serv.insert(0, 0)
def busca_servico(ev):
self.path = 'G:\GECOT\Análise Contábil_Tributária_Licitações\\2021\\1Pendentes\\'
data_serv = pd.read_excel(self.path + 'material.xlsx', sheet_name='116', dtype=str)
data_serv = | pd.DataFrame(data_serv) | pandas.DataFrame |
import abc
import logging
from time import perf_counter
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sklearn.linear_model import SGDClassifier
from . import optimizer, settings
from .datasets import Dataset
from .l2s_sampling import l2s_sampling
from .sketch import Sketch
logger = logging.getLogger(settings.LOGGER_NAME)
_rng = np.random.default_rng()
class BaseExperiment(abc.ABC):
def __init__(
self,
num_runs,
min_size,
max_size,
step_size,
dataset: Dataset,
results_filename,
):
self.num_runs = num_runs
self.min_size = min_size
self.max_size = max_size
self.step_size = step_size
self.dataset = dataset
self.results_filename = results_filename
@abc.abstractmethod
def get_reduced_matrix_and_weights(self, config):
pass
def get_config_grid(self):
"""
Returns a list of configurations that are used to run the experiments.
"""
grid = []
for size in np.arange(
start=self.min_size,
stop=self.max_size + self.step_size,
step=self.step_size,
):
for run in range(1, self.num_runs + 1):
grid.append({"run": run, "size": size})
return grid
def optimize(self, reduced_matrix, weights):
return optimizer.optimize(Z=reduced_matrix, w=weights).x
def run(self, parallel=False, n_jobs=4):
Z = self.dataset.get_Z()
beta_opt = self.dataset.get_beta_opt()
objective_function = optimizer.get_objective_function(Z)
f_opt = objective_function(beta_opt)
logger.info("Running experiments...")
def job_function(cur_config):
logger.info(f"Current experimental config: {cur_config}")
start_time = perf_counter()
reduced_matrix, weights = self.get_reduced_matrix_and_weights(cur_config)
sampling_time = perf_counter() - start_time
cur_beta_opt = self.optimize(reduced_matrix, weights)
total_time = perf_counter() - start_time
cur_ratio = objective_function(cur_beta_opt) / f_opt
return {
**cur_config,
"ratio": cur_ratio,
"sampling_time_s": sampling_time,
"total_time_s": total_time,
}
if parallel:
results = Parallel(n_jobs=n_jobs)(
delayed(job_function)(cur_config)
for cur_config in self.get_config_grid()
)
else:
results = [
job_function(cur_config) for cur_config in self.get_config_grid()
]
logger.info(f"Writing results to {self.results_filename}")
df = | pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Test extraction of features across (shifted) windows."""
__author__ = ["danbartl"]
import numpy as np
import pandas as pd
import pytest
from sktime.datasets import load_airline, load_longley
from sktime.datatypes import get_examples
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.transformations.series.summarize import WindowSummarizer
def check_eval(test_input, expected):
"""Test which columns are returned for different arguments.
For a detailed description what these arguments do,
and how theyinteract see docstring of DateTimeFeatures.
"""
if test_input is not None:
assert len(test_input) == len(expected)
assert all([a == b for a, b in zip(test_input, expected)])
else:
assert expected is None
# Load data that will be the basis of tests
y = load_airline()
y_pd = get_examples(mtype="pd.DataFrame", as_scitype="Series")[0]
y_series = get_examples(mtype="pd.Series", as_scitype="Series")[0]
y_multi = get_examples(mtype="pd-multiindex", as_scitype="Panel")[0]
# y Train will be univariate data set
y_train, y_test = temporal_train_test_split(y)
# Create Panel sample data
mi = pd.MultiIndex.from_product([[0], y.index], names=["instances", "timepoints"])
y_group1 = pd.DataFrame(y.values, index=mi, columns=["y"])
mi = pd.MultiIndex.from_product([[1], y.index], names=["instances", "timepoints"])
y_group2 = pd.DataFrame(y.values, index=mi, columns=["y"])
y_grouped = | pd.concat([y_group1, y_group2]) | pandas.concat |
import os
import pandas as pd
import numpy as np
import re
import logging
DATA_PATH = os.getenv('DATA_PATH')
if DATA_PATH is None:
raise ValueError("DATA_PATH needs to be set")
def changeTrade(eba, rightba, wrongba, start=None, end=None, tol=1):
logger = logging.getLogger("clean")
ind = [True]*len(eba.df.index)
if start is not None:
ind &= eba.df.index > start
if end is not None:
ind &= eba.df.index < end
ind_diff = ((
(eba.df.loc[:, eba.KEY["ID"] % (rightba, wrongba)] + eba.df.loc[
:, eba.KEY["ID"] % (wrongba, rightba)]).abs() > tol)
| eba.df.loc[:, eba.KEY["ID"] % (wrongba, rightba)].isna())
ind_diff &= ind
eba.df.loc[ind_diff, eba.KEY["ID"] % (wrongba, rightba)] = (
-eba.df.loc[ind_diff, eba.KEY["ID"] % (rightba, wrongba)])
nchange = sum(ind_diff)
if nchange > 0:
logger.debug("Picking %s over %s for %d pts" % (
rightba, wrongba, nchange))
return eba
def fillNAs(eba, col, pad_limit=2, limit=3):
logger = logging.getLogger("clean")
ind_na = eba.df.loc[:, col].isna()
nchange = ind_na.sum()
if nchange > 0:
logger.debug("%s: %d NA values to deal with" % (
col, nchange))
# first try pad for 2 hours
eba.df.loc[:, col] = eba.df.loc[:, col].fillna(
method='pad', limit=pad_limit)
ind_na = eba.df.loc[:, col].isna()
nchange = ind_na.sum()
if nchange > 0:
logger.debug("%s: replacing %d NA values with next/prev week" % (
col, nchange))
if nchange > 50:
logger.warning("%s: replacing %d NA values with next/prev week" % (
col, nchange))
for ts in eba.df.index[ind_na]:
try:
eba.df.loc[ts, col] = eba.df.loc[
ts-pd.Timedelta("%dH" % (7*24)), col]
except KeyError:
eba.df.loc[ts, col] = eba.df.loc[
ts+pd.Timedelta("%dH" % (7*24)), col]
# If we didn't manage to get the right value, look forward
cnt = 0
while np.isnan(eba.df.loc[ts, col]):
cnt += 1
if cnt > limit:
logger.error("Tried to look %d times ahead for %s" %
(limit, str(ts)))
raise ValueError("Can't fill this NaN")
eba.df.loc[ts, col] = eba.df.loc[
ts+pd.Timedelta("%dH" % (cnt*7*24)), col]
return eba
def removeOutliers(eba, col, start=None, end=None, thresh_u=None,
thresh_l=None, remove=True, limit=4):
logger = logging.getLogger("clean")
if start is None:
start = pd.to_datetime("2016-01-01")
if end is None:
end = pd.to_datetime("2017-01-02")
if (thresh_u is None) and (thresh_l is None):
mu = eba.df.loc[start:end, col].mean()
sigma = eba.df.loc[start:end, col].std()
ind_out = np.abs(eba.df.loc[:, col]-mu) > (3*sigma)
else:
if thresh_l is None:
thresh_l = -np.inf
if thresh_u is None:
thresh_u = +np.inf
ind_out = (eba.df.loc[:, col] < thresh_l)
ind_out |= (eba.df.loc[:, col] > thresh_u)
ind_out &= (eba.df.index > start) & (eba.df.index < end)
nchange = sum(ind_out)
logger.debug("%s: %d outliers out of [%.2g, %.2g]" % (
col, nchange, thresh_l, thresh_u))
if nchange > 10:
logger.warning("%s: %d outliers out of [%.2g, %.2g]" % (
col, nchange, thresh_l, thresh_u))
if remove:
eba.df.loc[ind_out, col] = np.nan
return eba
def applyFixes3(eba, log_level=logging.INFO):
logger = logging.getLogger("clean")
log_level_old = logger.level
logger.setLevel(log_level)
# special changes
logger.debug("\tSpecial changes")
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-02-12"),
end=pd.to_datetime("2016-02-14"))
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-08-01"),
end=pd.to_datetime("2016-08-15"))
eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5.,
start=pd.to_datetime("2016-08-01"),
end=pd.to_datetime("2016-08-15"))
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-10-07"),
end=pd.to_datetime("2016-10-08 03:00"))
eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5.,
start=pd.to_datetime("2016-10-07"),
end=pd.to_datetime("2016-10-08 03:00"))
for ba, ba2 in [("IID", "CISO"), ("PJM", "CPLW"), ("PJM", "DUK"),
("PJM", "TVA"),
("FPL", "SOCO"), ("SC", "SOCO"), ("SEPA", "SOCO"),
("CPLW", "TVA"), ("DUK", "TVA"),
("FMPP", "FPL"), ("FPC", "FPL"), ("JEA", "FPL"),
("SEC", "FPL"),
("CPLW", "DUK"), ("YAD", "DUK"), ("SEPA", "DUK"),
("DOPD", "BPAT"), ("LDWP", "BPAT"),
("FMPP", "FPC"), ("SEC", "FPC"),
("LDWP", "PACE"),
("LDWP", "NEVP"),
("SEPA", "SC"),
("FMPP", "TEC"),
("SEC", "JEA"),
("NSB", "FPC"), ("NSB", "FPL")]:
eba = fillNAs(eba, eba.KEY["ID"] % (ba, ba2))
eba = changeTrade(eba, ba, ba2, tol=0.)
for field in ["D", "NG"]:
eba = removeOutliers(eba, eba.get_cols(
r="FPC", field=field)[0], thresh_l=200.)
eba = removeOutliers(eba, eba.get_cols(
r="TVA", field=field)[0], thresh_l=3000.)
eba = removeOutliers(eba, eba.get_cols(r="PSCO", field=field)[
0], thresh_l=2000., thresh_u=10000.)
eba = removeOutliers(eba, eba.get_cols(
r="PACE", field=field)[0], thresh_u=10000.)
eba = removeOutliers(
eba, eba.get_cols(r="SRP", field=field)[0], thresh_l=1000.,
thresh_u=5000., start=pd.to_datetime("2016-12-01"),
end=pd.to_datetime("2016-12-31"))
eba = removeOutliers(
eba, eba.get_cols(r="SRP", field=field)[0], thresh_u=4900.,
start=pd.to_datetime("2016-01-01"),
end=pd.to_datetime("2016-05-01"))
eba = removeOutliers(eba, eba.get_cols(
r="LDWP", field=field)[0], thresh_l=100.)
eba = removeOutliers(
eba, eba.get_cols(r="IPCO", field=field)[0], thresh_l=800.,
start=pd.to_datetime("2016-08-01"),
end=pd.to_datetime("2016-08-05"))
eba = removeOutliers(eba, eba.get_cols(
r="EPE", field=field)[0], thresh_l=100.)
eba = removeOutliers(eba, eba.get_cols(r="GVL", field=field)[
0], thresh_l=50., thresh_u=500.)
eba = removeOutliers(
eba, eba.get_cols(r="SCL", field="D")[0], thresh_l=500.,
start=pd.to_datetime("2016-12-01"), end=pd.to_datetime("2016-12-31"))
# WACM outliers
eba = removeOutliers(eba, eba.get_cols(
r="WACM", field="NG")[0], thresh_l=2500.)
eba = removeOutliers(eba, eba.get_cols(
r="WACM", field="D")[0], thresh_l=2000.)
eba = removeOutliers(
eba, eba.get_cols(r="WACM", field="D")[0], thresh_u=3000.,
start=pd.to_datetime("2016-05-01"), end=pd.to_datetime("2016-05-31"))
eba = removeOutliers(
eba, eba.get_cols(r="WACM", field="NG")[0], thresh_l=3500.,
start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-01-31"))
eba = removeOutliers(
eba, eba.get_cols(r="WACM", field="D")[0], thresh_u=4000.,
start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-01-31"))
for field in ["D", "NG", "TI"]:
eba = fillNAs(eba, eba.get_cols(r="WACM", field=field)[0])
# WALC outliers
for field in ["D", "NG"]:
eba = removeOutliers(
eba, eba.get_cols(r="WALC", field=field)[0], thresh_u=2000.,
start=pd.to_datetime("2016-01-01"),
end=pd.to_datetime("2016-03-15"))
eba = removeOutliers(eba, "EBA.WALC-LDWP.ID.H", thresh_l=100.)
eba = fillNAs(eba, eba.KEY["ID"] % ("WALC", "LDWP"))
eba = changeTrade(eba, "WALC", "LDWP", tol=0.)
eba = removeOutliers(
eba, eba.get_cols(r="WALC", field="D")[0], thresh_l=700.,
start=pd.to_datetime("2016-02-17"), end=pd.to_datetime("2016-02-19"))
eba = removeOutliers(
eba, eba.get_cols(r="WALC", field="D")[0], thresh_l=200.,
start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-05-01"))
eba = removeOutliers(
eba, eba.get_cols(r="WALC", field="D")[0], thresh_l=700.,
start=pd.to_datetime("2016-03-01"), end=pd.to_datetime("2016-03-08"))
eba = removeOutliers(
eba, eba.get_cols(r="TPWR", field="D")[0], thresh_l=300.,
start=pd.to_datetime("2016-10-15"), end= | pd.to_datetime("2016-10-17") | pandas.to_datetime |
import pandas as pd
import warnings
import os
import sys
import codecs
import torch
from modules import BertNerData as NerData
from modules import NerLearner
from modules.models.bert_models import BertBiLSTMAttnNMT
from modules.data.bert_data import get_bert_data_loader_for_predict
from modules.train.train import validate_step
from modules.utils.plot_metrics import get_bert_span_report, bert_preds_to_ys, bert_preds_to_y, \
write_true_and_pred_to_conll, flat_classification_report
from pathlib import Path
sys.path.append("../")
warnings.filterwarnings("ignore")
data_path = "/media/liah/DATA/ner_data_other/norne/"
# data_path = "/media/liah/DATA/ner_data_other/conll03/"
train_path = data_path + "train.txt" # "onto.train.ner"
dev_path = data_path + "valid.txt" # "onto.development.ner"
test_path = data_path + "test.txt" # "onto.test.ner"
result_conll_path = Path('/media/liah/DATA/log/company_tagging_no/bert_norne.conll')
def read_data(input_file, tkn_field_idx=0, label_field_idx=-1, delim='\t'):
"""Reads a BIO data."""
with codecs.open(input_file, "r", encoding="utf-8") as f:
lines = []
words = []
labels = []
for line in f:
content = line.strip()
if content.startswith("-DOCSTART-"):
# words.append('')
continue
elif len(content) == 0 and not len(words):
continue
elif len(content) == 0:
# empty line, means a sentence is finished
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
if len(l) != 0 and len(w) != 0:
assert len(labels) == len(words)
lines.append([l, w])
words = []
labels = []
continue
word = line.rstrip().split(delim)[tkn_field_idx]
label = line.rstrip().split(delim)[label_field_idx]
words.append(word)
labels.append(label.replace("-", "_"))
return lines
delim = '\t'
train_f = read_data(train_path, tkn_field_idx=1, delim=delim)
dev_f = read_data(dev_path, tkn_field_idx=1, delim=delim)
test_f = read_data(test_path, tkn_field_idx=1, delim=delim)
train_df = pd.DataFrame(train_f, columns=["0", "1"])
train_df.to_csv(data_path + "train.csv", index=False)
valid_df = | pd.DataFrame(dev_f, columns=["0", "1"]) | pandas.DataFrame |
# Created by rahman at 15:39 2020-03-09 using PyCharm
import os
import pandas as pd
from scipy.spatial.distance import cosine, euclidean, correlation, chebyshev,braycurtis, canberra, cityblock, sqeuclidean
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import scipy.sparse
from sklearn.metrics import roc_auc_score
def make_dfwords(city, DATAPATH):
"""
joins all captions_results for each user
:param city:
:param DATAPATH:
:return:
"""
caption = pd.read_csv(DATAPATH + city + ".target_caption")
caption['caption'] = caption['caption'].str.lower()
#collect all captions for each user
grouped = caption.groupby('uid')
dataarr = []
for uid, group in grouped:
caplist = group.caption
capstr = ' '.join(map(str, caplist))
dataarr.append([uid, capstr])
# print len(capstr)
df = pd.DataFrame(data=dataarr, columns=['uid', 'words'])
df.to_csv(DATAPATH + "words.csv", index=False)
# df=pd.read_csv(DATAPATH + "words.csv")
print ("joined all captions_results for each user", df.shape)
return df
def get_TFIDF_filtered(sublinear,th, df, DATAPATH, npzBOWfile):
"""
Convert a collection of words to a matrix of TF-IDF features.
:param sublinear: boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
:param th: When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific stop words).
:param df: df containiing all words per user
:param DATAPATH:
:param npzBOWfile: file containing sparse TFIDF matrix
:return:
"""
vectorizer = TfidfVectorizer(max_df=th, sublinear_tf=sublinear, min_df=2) # CountVectorizer(min_df=2)
tfidf_matrix=vectorizer.fit_transform(df.words)
scipy.sparse.save_npz(DATAPATH + npzBOWfile, tfidf_matrix)
print ("created ", npzBOWfile)
print ("tfidf_matrix.shape", tfidf_matrix.shape)
# get idf for each word
wordids = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_))
wordidDF = pd.DataFrame(wordids.items(), columns=['word', 'tf_idf'])
wordidDF.to_csv(DATAPATH +str(sublinear)+ str(th)+"_tfids.csv")
print ("created" +str(sublinear)+ str(th)+"_tfids.csv")
print ("number of words len(vectorizer.idf_)", len(vectorizer.idf_))
assert(df.shape[0] == tfidf_matrix.shape[0])
return tfidf_matrix
def make_features(dataFile, df, TFIDF_matrix, pairs, DATAPATH):
"""
:param dataFile: output features in this file
:param df: df of users and words
:param TFIDF_matrix: tfidf matrix of each user
:param pairs:
:param DATAPATH:
:return:
"""
print ("pairs.shape", pairs.shape, pairs.columns)
if os.path.exists(DATAPATH + dataFile):
print ("datafile exists, removing", dataFile)
os.remove(DATAPATH + dataFile)
count=0
with open(DATAPATH + dataFile, "wb") as f:
for item in ['u1', 'u2', 'label',\
'cosine', 'euclidean', 'correlation', 'chebyshev', \
'braycurtis', 'canberra', 'cityblock', 'sqeuclidean']:
f.write( ","+ item )
f.write("\n")
for i in range(len(pairs)):
if not (i % 500):
print (i , "out of ", len(pairs))
label = pairs.loc[i, 'label']
try:
u1, u2 = pairs.loc[i, 'u1'], pairs.loc[i, 'u2']
# retrieve the index of the user from the df containing words
pos1ind, pos2ind = df[df.uid == u1].index, df[df.uid == u2].index
pos1arr, pos2arr = pos1ind.get_values(), pos2ind.get_values()
pos1, pos2 = pos1arr[0], pos2arr[0] # these 2 are still
# and use the index to get the correct row from the tfidf matrix
u1_vector, u2_vector = TFIDF_matrix[pos1, :].toarray(), TFIDF_matrix[pos2, :].toarray()
i_feature = pd.DataFrame([[u1, u2, label, \
cosine(u1_vector, u2_vector), \
euclidean(u1_vector, u2_vector), \
correlation(u1_vector, u2_vector), \
chebyshev(u1_vector, u2_vector), \
braycurtis(u1_vector, u2_vector), \
canberra(u1_vector, u2_vector), \
cityblock(u1_vector, u2_vector), \
sqeuclidean(u1_vector, u2_vector)]])
i_feature.to_csv(DATAPATH + dataFile, index=False, header=None, mode='a')
# print "feature created"
except Exception as e:
print ("EXCEPTION!", u1, u2, e.message)
count+=1
print (count , " pairs not found out of ", len(pairs))
def score_all_aucs(dataFile, classifiers, DATAPATH):
"""
:param dataFile:
:param classifiers:
:param DATAPATH:
:return:
"""
dataset = pd.read_csv(DATAPATH+dataFile, error_bad_lines=False)#names = ['u1', 'u2', 'label', \
#'cosine', 'euclidean', 'correlation', 'chebyshev', \
# 'braycurtis', 'canberra', 'cityblock', 'sqeuclidean'],
print ("before dropna", dataset.shape)
dataset.drop(dataset[dataset.label!=0][dataset.label!=1].index, inplace=True)
for col in dataset.columns:
dataset[col] = | pd.to_numeric(dataset[col]) | pandas.to_numeric |
import os
import pickle
import random
import time
import numpy as np
import pandas as pd
from sklearn.neighbors import KDTree
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import struct
import config as cfg
import gputransform
BASE_DIR = "/media/mav-lab/1T/Data/Datasets/NCLT/NCLT/"
base_path = "/media/mav-lab/1T/Data/Datasets/NCLT/NCLT/"
runs_folder = "/media/mav-lab/1T/Data/Datasets/NCLT/NCLT/"
pointcloud_fols = "/velodyne_data/velodyne_sync/"
all_folders = sorted(os.listdir(os.path.join(base_path, runs_folder)))
folders = []
velo_file = []
# All runs are used for training (both full and partial)
index_list = range(len(all_folders)-1)
print("Number of runs: " + str(len(index_list)))
for index in index_list:
if index == 0:
folders.append(all_folders[index])
print(folders)
p = [-50.0, 150.0, -250.0, 150.0]
# check if the location is in the test set
def check_in_test_set(northing, easting, points):
in_test_set = False
if(points[0] < northing and northing < points[1] and points[2] < easting and easting < points[3]):
in_test_set = True
return in_test_set
# check if it's a new place in test set
def check_submap_test(northing, easting, prev_northing, prev_easting):
is_submap = False
euclidean = np.abs(np.sqrt((prev_northing-northing)**2 + (prev_easting-easting)**2))
if(euclidean < cfg.SUBMAP_INTERVAL_TEST + 0.5 and euclidean >= cfg.SUBMAP_INTERVAL_TEST):
is_submap = True
return is_submap
# check if it's a new place in train set
def check_submap_train(northing, easting, prev_northing, prev_easting):
is_submap = False
euclidean = np.abs(np.sqrt((prev_northing-northing)**2 + (prev_easting-easting)**2))
if(euclidean < cfg.SUBMAP_INTERVAL_TRAIN + 1.0 and euclidean >= cfg.SUBMAP_INTERVAL_TRAIN):
is_submap = True
return is_submap
# find closest place timestamp with index returned
def find_closest_timestamp(A, target):
# A must be sorted
idx = A.searchsorted(target)
idx = np.clip(idx, 1, len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target - left < right - target
return idx
# nclt pointcloud utils
def convert(x_s, y_s, z_s):
scaling = 0.005 # 5 mm
offset = -100.0
x = x_s * scaling + offset
y = y_s * scaling + offset
z = z_s * scaling + offset
return x, y, z
# load lidar file in nclt dataset
def load_lidar_file_nclt(file_path):
n_vec = 4
f_bin = open(file_path,'rb')
hits = []
while True:
x_str = f_bin.read(2)
if x_str == b"": # eof
break
x = struct.unpack('<H', x_str)[0]
y = struct.unpack('<H', f_bin.read(2))[0]
z = struct.unpack('<H', f_bin.read(2))[0]
i = struct.unpack('B', f_bin.read(1))[0]
l = struct.unpack('B', f_bin.read(1))[0]
x, y, z = convert(x, y, z)
s = "%5.3f, %5.3f, %5.3f, %d, %d" % (x, y, z, i, l)
# filter and normalize the point cloud to -1 ~ 1
if np.abs(x) < 70. and z > -20. and z < -2. and np.abs(y) < 70. and not(np.abs(x) < 5. and np.abs(y) < 5.):
hits += [[x/70., y/70., z/20.]]
f_bin.close()
hits = np.asarray(hits)
hits[:, 2] = -hits[:, 2]
return hits
# load pointcloud and process it using CUDA accelerate
def load_pc_file(filename):
# returns Nx3 matrix
# scale the original pointcloud
pc = load_lidar_file_nclt(os.path.join("/media/mav-lab/1T/Data/Datasets/NCLT/NCLT/", filename))
pc[:,0] = pc[:,0] / np.max(pc[:,0] + 1e-15) - 0.0001
pc[:,1] = pc[:,1] / np.max(pc[:,1] + 1e-15) - 0.0001
pc[:,2] = pc[:,2] / np.max(pc[:,2] + 1e-15) - 0.0001
# !Debug
# x = pc[...,0]
# y = pc[...,1]
# z = pc[...,2]
# fig2 = plt.figure()
# ax2 = Axes3D(fig2)
# ax2.scatter(x, y, z)
# plt.show()
size = pc.shape[0]
pc_img = np.zeros([cfg.num_height * cfg.num_ring * cfg.num_sector])
pc = pc.transpose().flatten().astype(np.float32)
transer = gputransform.GPUTransformer(pc, size, cfg.max_length, cfg.max_height, cfg.num_ring, cfg.num_sector, cfg.num_height, 1)
transer.transform()
point_t = transer.retreive()
point_t = point_t.reshape(-1, 3)
point_t = point_t[...,2]
pc_img = point_t.reshape(cfg.num_height, cfg.num_ring, cfg.num_sector)
pc = np.sum(pc_img, axis=0)
# plt.imshow(pc)
# plt.show()
return pc_img
# construct query dict for training
def construct_query_dict(df_centroids, filename, pickle_flag):
tree = KDTree(df_centroids[['northing','easting']])
# get neighbors pair
ind_nn = tree.query_radius(df_centroids[['northing','easting']],r=1.5)
# get far away pairs
ind_r = tree.query_radius(df_centroids[['northing','easting']], r=2)
queries = {}
print("ind_nn",len(ind_nn))
print("ind_r",len(ind_r))
for i in range(len(ind_nn)):
print("index",i,' / ',len(ind_nn))
# get query info
query = df_centroids.iloc[i]["file"]
# get yaw info of this query
query_yaw = df_centroids.iloc[i]["yaw"]
# get positive filename and shuffle
positives = np.setdiff1d(ind_nn[i],[i]).tolist()
random.shuffle(positives)
# positives = positives[0:2]
# get negative filename and shuffle
negatives = np.setdiff1d(
df_centroids.index.values.tolist(),ind_r[i]).tolist()
random.shuffle(negatives)
# negatives = negatives[0:50]
# add all info to query dict
queries[i] = {"query":query, "heading":query_yaw,
"positives":positives,"negatives":negatives}
# dump all queries into pickle file for training
if pickle_flag:
with open(filename, 'wb') as handle:
pickle.dump(queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
# Initialize pandas DataFrame
df_train = pd.DataFrame(columns=['file','northing','easting','yaw'])
df_test = | pd.DataFrame(columns=['file','northing','easting','yaw']) | pandas.DataFrame |
import copy
import numpy as np
import pandas as pd
import time
import datetime
from itertools import product
from copy import deepcopy
import os
import sys
import inspect
from collections import namedtuple, defaultdict
from tabulate import tabulate
from pprint import pprint, pformat
import traceback
import argparse
import clify
from dps import cfg, init
from dps.utils import (
gen_seed, Config, ExperimentStore, edit_text, NumpySeed, AttrDict, get_default_config, redirect_stream
)
from dps.train import training_loop
from dps.parallel import Job, ReadOnlyJob
from dps.train import FrozenTrainingLoopData
from dps.hyper.parallel_session import submit_job, ParallelSession
class HyperSearch(object):
""" Interface to a directory storing a hyper-parameter search.
Approximately a `frozen`, read-only handle for a directoy created by ParallelSession.
"""
def __init__(self, path):
self.path = path
job_path = os.path.join(path, 'results.zip')
if not os.path.exists(job_path):
job_path = os.path.join(path, 'orig.zip')
assert os.path.exists(job_path)
self.job = ReadOnlyJob(job_path)
@property
def objects(self):
return self.job.objects
def dist_keys(self):
""" The keys that were searched over. """
distributions = self.objects.load_object('metadata', 'distributions')
if isinstance(distributions, list):
keys = set()
for d in distributions:
keys |= set(d.keys())
keys = list(keys)
else:
distributions = Config(distributions)
keys = list(distributions.keys())
keys.append('idx')
return sorted(set(keys))
def dist(self):
return self.objects.load_object('metadata', 'distributions')
def sampled_configs(self):
pass
@property
def experiment_paths(self):
experiments_dir = os.path.join(self.path, 'experiments')
exp_dirs = os.listdir(experiments_dir)
return [os.path.join(experiments_dir, ed) for ed in exp_dirs]
def extract_stage_data(self, fields=None, bare=False):
""" Extract stage-by-stage data about the training runs.
Parameters
----------
bare: boolean
If True, only returns the data. Otherwise, additionally returns the stage-by-stage config and meta-data.
Returns
-------
A nested data structure containing the requested data.
{param-setting-key: {(repeat, seed): (df, sc, md)
where:
df is a pandas DataFrame
sc is a list giving the config for each stage
md is a dictionary storing metadata
"""
stage_data = defaultdict(dict)
if isinstance(fields, str):
fields = fields.split()
config_keys = self.dist_keys()
KeyTuple = namedtuple(self.__class__.__name__ + "Key", config_keys)
for exp_path in self.experiment_paths:
try:
exp_data = FrozenTrainingLoopData(exp_path)
md = {}
md['host'] = exp_data.host
for k in config_keys:
md[k] = exp_data.get_config_value(k)
sc = []
records = []
for stage in exp_data.history:
record = stage.copy()
stage_config = record['stage_config'].copy()
sc.append(stage_config)
del record['stage_config']
record = AttrDict(record).flatten()
if 'best_path' in record:
del record['best_path']
if 'final_path' in record:
del record['final_path']
# Fix and filter keys
_record = {}
for k, v in record.items():
if k.startswith("best_"):
k = k[5:]
if (fields and k in fields) or not fields:
_record[k] = v
records.append(_record)
key = KeyTuple(*(exp_data.get_config_value(k) for k in config_keys))
repeat = exp_data.get_config_value("repeat")
seed = exp_data.get_config_value("seed")
if bare:
stage_data[key][(repeat, seed)] = pd.DataFrame.from_records(records)
else:
stage_data[key][(repeat, seed)] = ( | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import os
import sys
sys.path.append('../../code/scripts')
from dataset_chunking_fxns import add_stratified_kfold_splits, add_stratified_test_splits
# open the harvard/mit data
file_dir = '../../data'
file_dir = os.path.join(file_dir, 'mooc')
file_path = os.path.join(file_dir,'harvard_mit.csv')
all_df = pd.read_csv(file_path, header=None, na_values='NaN', low_memory=False)
features = ["course_id", "userid_DI", "registered", "viewed", "explored", "certified", "final_cc_cname_DI",
"LoE_DI", "YoB", "gender", "grade", "start_time_DI", "last_event_DI", "nevents", "ndays_act",
"nplay_video", "nchapters", "nforum_posts", "roles", "incomplete_flag"]
all_df.columns = features
all_df = all_df.iloc[1:]
all_df.reset_index(drop=True)
# only keep the following features
print("Dropping nans based on the following features: ")
features_drop_na = ['gender', 'LoE_DI', 'final_cc_cname_DI', 'YoB', 'ndays_act', 'nplay_video', 'nforum_posts',
'nevents', 'course_id', 'certified']
for f in features_drop_na:
print(f)
print()
subset_all_df = all_df[features_drop_na]
print('original number of points:',len(subset_all_df))
subset_all_df = subset_all_df.dropna()
subset_all_df = subset_all_df.reset_index(drop=True)
print()
print('One hot encoding course id, education level, and country...')
print()
# change gender labels from 'M' or 'F' to binary
gender_to_binary = {'m':0, 'f':1}
subset_all_df['gender'] = subset_all_df['gender'].apply(func=(lambda g: gender_to_binary[g]))
# one hot encode course id
one_hot_encoding_course_id = pd.get_dummies(subset_all_df['course_id'])
course_id_keys = list(one_hot_encoding_course_id.keys())
subset_all_df = subset_all_df.join(one_hot_encoding_course_id)
# # one hot encode education level
# one_hot_encoding_edu = pd.get_dummies(subset_all_df['LoE_DI'])
# subset_all_df = subset_all_df.join(one_hot_encoding_edu)
# subset_all_df = subset_all_df.drop(['LoE_DI'], axis=1)
# change education level to be binary on whether the individual has completed any post-secondary education
def edu_to_binary(label):
if label == 'Master\'s':
return 1
elif label == 'Secondary':
return 0
elif label == 'Less than Secondary':
return 0
elif label == 'Bachelor\'s':
return 1
elif label == 'Doctorate':
return 1
else:
print('Unexpected input:', label)
# one hot encode eduation label
one_hot_encoding_edu = | pd.get_dummies(subset_all_df['LoE_DI']) | pandas.get_dummies |
import os
import numpy
import pandas as pd
import scipy.stats as st
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs')
def summary_cost(int_details,ctrl_m,ctrl_f,trt_m,trt_f, text):
int_dwc = 1 / (1 + discount_rate) ** numpy.array(range(time_horizon))
int_c = numpy.array([[prog_cost] * time_horizon for i in range(1)])
int_cost = numpy.sum(numpy.dot(int_c, int_dwc))
female_pop = 188340000
male_pop = 196604000
pop = female_pop + male_pop
f_prop = female_pop / pop
m_prop = male_pop / pop
samples = ctrl_m.shape[0]
cs = 0
nq = 0
ic = [0.00 for i in range(samples)]
q_gained = [0.00 for i in range(samples)]
q_inc_percent = [0.00 for i in range(samples)]
htn_cost = [0.00 for i in range(samples)]
cvd_cost = [0.00 for i in range(samples)]
net_cost = [0.00 for i in range(samples)]
exp_inc_per = [0.00 for i in range(samples)]
for i in range(samples):
q_gained[i] = (((ctrl_m.loc[i, "Average DALYs"] - trt_m.loc[i, "Average DALYs"])* m_prop) + ((ctrl_f.loc[i, "Average DALYs"] - trt_f.loc[i, "Average DALYs"])* f_prop))
q_inc_percent[i] = q_gained[i] * 100/((ctrl_m.loc[i, "Average DALYs"] * m_prop) + (ctrl_f.loc[i, "Average DALYs"] *f_prop))
htn_cost[i] = int_cost + ((trt_m.loc[i, "Average HTN Cost"] - ctrl_m.loc[i, "Average HTN Cost"]) * m_prop) + ((trt_f.loc[i, "Average HTN Cost"] - ctrl_f.loc[i, "Average HTN Cost"]) * f_prop)
cvd_cost[i] = ((trt_m.loc[i, "Average CVD Cost"] - ctrl_m.loc[i, "Average CVD Cost"] + trt_m.loc[i, "Average Chronic Cost"] - ctrl_m.loc[i, "Average Chronic Cost"]) * m_prop) + ((trt_f.loc[i, "Average CVD Cost"] - ctrl_f.loc[i, "Average CVD Cost"] + trt_f.loc[i, "Average Chronic Cost"] - ctrl_f.loc[i, "Average Chronic Cost"]) * f_prop)
exp_inc_per[i] = (((trt_m.loc[i, "Average Cost"] - ctrl_m.loc[i, "Average Cost"] + int_cost) * m_prop) + ((trt_f.loc[i, "Average Cost"] - ctrl_f.loc[i, "Average Cost"] + int_cost) * f_prop)) * 100 / ((ctrl_m.loc[i, "Average Cost"] * m_prop ) + (ctrl_f.loc[i, "Average Cost"] * f_prop))
net_cost[i] = htn_cost[i] + cvd_cost[i]
ic[i] = net_cost[i] / q_gained[i]
if net_cost[i] < 0:
cs = cs + 1
if q_gained[i] < 0:
nq = nq + 1
budget_impact = numpy.mean(net_cost) * pop / time_horizon
htn_percap = numpy.mean(htn_cost) / time_horizon
cvd_percap = numpy.mean(cvd_cost) / time_horizon
htn_annual = numpy.mean(htn_cost) * pop / time_horizon
cvd_annual = numpy.mean(cvd_cost) * pop / time_horizon
cost_inc = numpy.mean(exp_inc_per)
ICER = numpy.mean(ic)
QALY = numpy.mean(q_inc_percent)
HTN = numpy.mean(htn_cost)
CVD = numpy.mean(cvd_cost)
icer_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(ic), scale=st.sem(ic))
qaly_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(q_inc_percent), scale=st.sem(q_inc_percent))
htn = st.t.interval(0.95, samples - 1, loc=numpy.mean(htn_cost), scale=st.sem(htn_cost))
cvd = st.t.interval(0.95, samples - 1, loc=numpy.mean(cvd_cost), scale=st.sem(cvd_cost))
cost_inc_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(exp_inc_per), scale=st.sem(exp_inc_per))
if budget_impact < 0:
m_icer = 'Cost Saving'
s_icer = 'CS'
else:
m_icer = numpy.mean(net_cost) / numpy.mean(q_gained)
s_icer = str(numpy.round(m_icer,1))
m_daly = str(numpy.round(QALY,3)) + "\n(" + str(numpy.round(qaly_95[0],3)) + " to " + str(numpy.round(qaly_95[1],3)) + ")"
m_htn = str(numpy.round(HTN,2)) + "\n(" + str(numpy.round(htn[0],2)) + " to " + str(numpy.round(htn[1],2)) + ")"
m_cvd = str(numpy.round(CVD,2)) + "\n(" + str(numpy.round(cvd[0],2)) + " to " + str(numpy.round(cvd[1],2)) + ")"
m_costinc = str(numpy.round(cost_inc, 2)) + "\n(" + str(numpy.round(cost_inc_95[0], 2)) + " to " + str(numpy.round(cost_inc_95[1], 2)) + ")"
m_budget = str(numpy.round(budget_impact,0)/1000)
err_cost = 1.96 * st.sem(exp_inc_per)
err_daly = 1.96 * st.sem(q_inc_percent)
str_icer = text + " (" + s_icer + ")"
detailed = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], ICER, icer_95[0],icer_95[1], QALY, qaly_95[0], qaly_95[1], htn[0], htn[1], cvd[0], cvd[1], budget_impact, htn_annual, cvd_annual, htn_percap, cvd_percap, cs, nq]
manuscript = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], m_icer, m_daly, m_costinc, m_htn, m_cvd, m_budget, cs]
plot = [text, str_icer, cost_inc, QALY, err_cost, err_daly]
return detailed, manuscript, plot
summary_output = []
appendix_output = []
plot_output = []
'''Analysis 0: Baseline'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Base Case')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 1: Doubled Medication Cost'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 2, 0, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 2, 0, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'2X Medication Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 2: Increased Programmatic Cost'''
time_horizon = 20
prog_cost = 0.13*4
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'4X Programmatic Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 3: 20% reduction in baseline CVD risk'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 1, 0.2, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 1, 0.2, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Reduced Baseline Risk')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 4: NPCDCS Medication Protocol'''
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 0, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_m = pd.read_csv(file_name_m)
treatment_f = | pd.read_csv(file_name_f) | pandas.read_csv |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
DataWig HPO
Implements hyperparameter optimisation for datawig
"""
import os
import time
from datetime import datetime
import pandas as pd
from pandas.api.types import is_numeric_dtype
from sklearn.metrics import mean_squared_error, f1_score, recall_score
from datawig.utils import random_cartesian_product
from .column_encoders import BowEncoder, CategoricalEncoder, NumericalEncoder, TfIdfEncoder
from .mxnet_input_symbols import BowFeaturizer, NumericalFeaturizer, EmbeddingFeaturizer
from .utils import logger, get_context, random_split, flatten_dict
class _HPO:
"""
Implements systematic hyperparameter optimisation for datawig
Example usage:
imputer = SimpleImputer(input_columns, output_column)
hps = dict( ... ) # specify hyperparameter choices
hpo = HPO(impter, hps)
results = hpo.tune
"""
def __init__(self):
"""
Init method also defines default hyperparameter choices, global and for each input column type.
"""
self.hps = None
self.results = pd.DataFrame()
self.output_path = None
def __preprocess_hps(self,
train_df: pd.DataFrame,
simple_imputer,
num_evals) -> pd.DataFrame:
"""
Generates list of all possible combinations of hyperparameter from the nested hp dictionary.
Requires the data to check whether the relevant columns are present and have the appropriate type.
:param train_df: training data as dataframe
:param simple_imputer: Parent instance of SimpleImputer
:param num_evals is the maximum number of hpo configurations to consider.
:return: Data frame where each row is a hyperparameter configuration and each column is a parameter.
Column names have the form colum:parameter, e.g. title:max_tokens or global:learning rate.
"""
default_hps = dict()
# Define default hyperparameter choices for each column type (string, categorical, numeric)
default_hps['global'] = {}
default_hps['global']['learning_rate'] = [4e-3]
default_hps['global']['weight_decay'] = [0]
default_hps['global']['num_epochs'] = [100]
default_hps['global']['patience'] = [5]
default_hps['global']['batch_size'] = [16]
default_hps['global']['final_fc_hidden_units'] = [[]]
default_hps['string'] = {}
default_hps['string']['ngram_range'] = {}
default_hps['string']['max_tokens'] = [] # [2 ** exp for exp in [12, 15, 18]]
default_hps['string']['tokens'] = [] # [['chars'], ['words']]
default_hps['string']['ngram_range']['words'] = [(1, 3)]
default_hps['string']['ngram_range']['chars'] = [(1, 5)]
default_hps['categorical'] = {}
default_hps['categorical']['max_tokens'] = [2 ** 12]
default_hps['categorical']['embed_dim'] = [10]
default_hps['numeric'] = {}
default_hps['numeric']['normalize'] = [True]
default_hps['numeric']['numeric_latent_dim'] = [10]
default_hps['numeric']['numeric_hidden_layers'] = [1]
# create empty dict if global hps not passed
if 'global' not in self.hps.keys():
self.hps['global'] = {}
# merge data type default parameters with the ones in self.hps
# giving precedence over the parameters specified in self.hps
for data_type in ['string', 'categorical', 'numeric']:
for parameter_key, values in default_hps[data_type].items():
if parameter_key in self.hps[data_type]:
default_hps[data_type][parameter_key] = self.hps[data_type][parameter_key]
# add type to column dictionaries if it was not specified, does not support categorical types
for column_name in simple_imputer.input_columns:
if column_name not in self.hps.keys():
self.hps[column_name] = {}
if 'type' not in self.hps[column_name].keys():
if | is_numeric_dtype(train_df[column_name]) | pandas.api.types.is_numeric_dtype |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 9 14:13:23 2021
@author: willrichardson
"""
# This script calculates other relevant stats and quantities of interest for each half-hour period
# includes 30-minute quantites and spectral quantites in each period
# leaving RMSDs as local variables; pass them into this script
#%% import libraries
import sys
sys.path.insert(0, '/Users/willrichardson/opt/anaconda3/lib/python3.8/site-packages')
import os
import glob
import pandas as pd
import numpy as np
from funcs import sort_ByDate_DMY, read_conc
from funcs import anc_unit_conv, Fco2_name, sigma_m
#%% get current working directory, relevant environment variables
args = sys.argv
RMSDq_slope = np.float64(args[1])
RMSDc_slope = np.float64(args[2])
base_path = os.getcwd()
site_yr = os.environ['site_yr']
run_ID = os.environ['run_ID']
fn_LB = os.environ['LB']
fn_UB = os.environ['UB']
DT = np.float64(os.environ['DT'])
nj = int(np.rint(np.log2((30*60)/DT)))
zm = np.float64(os.environ['Zm'])
N_wvlt = 2**nj
# make output file name
output_fname = base_path + '/ref_data/Output/%s/%s/%s_Interm_RefDf_allHH_PostProcStats.csv'%(site_yr, run_ID, site_yr)
#%% load reference dfs, outputs from partitioning program, other ancillary info
df_ref = pd.read_csv(base_path + '/ref_data/Output/%s/%s_Interm_RefDf_allHH.csv' %(site_yr, site_yr),
index_col='Timestamp', infer_datetime_format=True, parse_dates=True)
RLM_df = pd.read_csv(base_path + '/ref_data/Output/%s/%s_RLMfitRMSDs_fnLB_%s_fnUB_%s.csv' %(site_yr, site_yr, fn_LB, fn_UB),
index_col=0, infer_datetime_format=True, parse_dates=True)
zc_df = pd.read_csv(os.environ['zc_file'], sep='\t', index_col=0, names=['Timestamp', 'zc'], parse_dates=True, infer_datetime_format=True)
# partitioning program outputs
## load and sort partitioned fluxes and random error estimates
flux = glob.glob(base_path + '/flux/%s/flux*.txt'%site_yr)
rerr = glob.glob(base_path + '/flux/%s/rerror*.txt'%site_yr)
flux = sort_ByDate_DMY(flux); rerr = sort_ByDate_DMY(rerr)
raw_files, part_df = read_conc(flux, rerr)
## partitioning program yields fluxes in mmol m-2 s-1; convert to umol m-2 s-1
part_df = part_df*1000
#%% join into a unified dataframe (use 'inner' join so as to only keep rows where raw data made it all the way to the partitioning stage)
df_master = df_ref.join([part_df, RLM_df], how='inner')
#%% 30 minute stats
# add raw filenames as a column in the data frame
df_master['raw_file'] = raw_files
# fraction of total CH4 flux that is ebullition
df_master['frac_eb_q'] = df_master['CH4_eb_q']/df_master['CH4_tot']
df_master['frac_eb_c'] = df_master['CH4_eb_c']/df_master['CH4_tot']
df_master['frac_eb_T'] = df_master['CH4_eb_T']/df_master['CH4_tot']
# diffusive fluxes
df_master['CH4_diff_q'] = df_master['CH4_tot'] - df_master['CH4_eb_q']
df_master['CH4_diff_c'] = df_master['CH4_tot'] - df_master['CH4_eb_c']
df_master['CH4_diff_T'] = df_master['CH4_tot'] - df_master['CH4_eb_T']
# Some unit sonversions if desired
if anc_unit_conv == True:
from funcs import air_temp_K, T_dew, P_atm, VP, VP_sat, VPD_name
df_master['T_air_C'] = df_master[air_temp_K] - 273.15; df_master[T_dew] = df_master[T_dew] - 273.15 #[K to C]
df_master[VP] = df_master[VP]/1000; df_master[VP_sat] = df_master[VP_sat]/1000 #[Pa to kPa]
df_master[VPD_name] = df_master[VPD_name]/1000; df_master['P'] = df_master[P_atm]/1000 # [Pa to kPa]
df_master.drop([P_atm, air_temp_K], axis=1, inplace=True)
# co2 flux magnitude (for reference scalar thresholding)
df_master['co2_flux_mag'] = np.absolute(df_master[Fco2_name])
# normalized random error stats
df_master['Ebq_rerr_FebNorm'] = df_master['CH4_ebq_err']/df_master['CH4_eb_q']
df_master['Ebq_rerr_FtotNorm'] = df_master['CH4_ebq_err']/df_master['CH4_tot']
df_master['Diffq_rerr_FdiffNorm'] = df_master['CH4_diffq_err']/df_master['CH4_diff_q']
df_master['Diffq_rerr_FtotNorm'] = df_master['CH4_diffq_err']/df_master['CH4_tot']
df_master['Ebc_rerr_FebNorm'] = df_master['CH4_ebc_err']/df_master['CH4_eb_c']
df_master['Ebc_rerr_FtotNorm'] = df_master['CH4_ebc_err']/df_master['CH4_tot']
df_master['Diffc_rerr_FdiffNorm'] = df_master['CH4_diffc_err']/df_master['CH4_diff_c']
df_master['Diffc_rerr_FtotNorm'] = df_master['CH4_diffc_err']/df_master['CH4_tot']
#%% function for spectral stats on each period
def SpectralStats(tstamp):
# convert timestamp to format of file naming convention
day = tstamp.strftime('%Y%m%d'); datetime = tstamp.strftime('%Y%m%d_%H%M')
# load data; first row is coarse-grained mean, skip it
wvlt_df = pd.read_table(base_path + '/wvlet/%s/'%site_yr + day + '/' + 'wvlet-' + datetime + '.dat', sep='\s+',
names=['Index_wvlt', 'u', 'w', 'T', 'q', 'c', 'm'], delim_whitespace=False,
skiprows=1)
# get canopy height
zc = np.float64(zc_df.loc[tstamp.floor('D')])
# calculate displacement height as 0.66*canopy height
d = 0.67*zc #[m]
# calculate frequency for filtering
u = np.mean(wvlt_df['u'])
wvlt_df['j'] = (N_wvlt/wvlt_df['Index_wvlt'])*0.05
wvlt_df['fn'] = (zm - d)/(wvlt_df['j']*u)
# filter out low frequency components for partitioning
wvlt_df_filt = wvlt_df[wvlt_df['fn'] > np.float64(fn_LB)]
# add ebullition flags so that stats on diffusive/ebullitive fluxes can be calculated
wvlt_df_filt.loc[:, 'thresh_q_upp'] = df_master.loc[tstamp, 'slope_mq']*wvlt_df_filt.loc[:, 'q'] + (3*(RMSDq_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_q_low'] = df_master.loc[tstamp, 'slope_mq']*wvlt_df_filt.loc[:, 'q'] - (3*(RMSDq_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_c_upp'] = df_master.loc[tstamp, 'slope_mc']*wvlt_df_filt.loc[:, 'c'] + (3*(RMSDc_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_c_low'] = df_master.loc[tstamp, 'slope_mc']*wvlt_df_filt.loc[:, 'c'] - (3*(RMSDc_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'Ebq'] = (wvlt_df_filt.loc[:, 'm'] > wvlt_df_filt.loc[:, 'thresh_q_upp']) | (wvlt_df_filt.loc[:, 'm'] < wvlt_df_filt.loc[:, 'thresh_q_low'])
wvlt_df_filt.loc[:, 'Ebc'] = (wvlt_df_filt.loc[:, 'm'] > wvlt_df_filt.loc[:, 'thresh_c_upp']) | (wvlt_df_filt.loc[:, 'm'] < wvlt_df_filt.loc[:, 'thresh_c_low'])
# do spectral stats
## result objects
### master output for funtion
out = pd.DataFrame(index=[tstamp])
### frames for scale-wise stats
#### frequency values
fn_res = pd.DataFrame(index=[tstamp])
#### variances
mvar_res = pd.DataFrame(index=[tstamp]); qvar_res = pd.DataFrame(index=[tstamp])
cvar_res = pd.DataFrame(index=[tstamp]); Tvar_res = pd.DataFrame(index=[tstamp]); wvar_res = pd.DataFrame(index=[tstamp])
#### ebullitive covariances
Ebq_cov_res = pd.DataFrame(index=[tstamp]); Ebc_cov_res = pd.DataFrame(index=[tstamp])
#### covariances
wmcov_res = | pd.DataFrame(index=[tstamp]) | pandas.DataFrame |
import tsfel
import numpy as np
import pandas as pd
from tsfresh import extract_features
from tsfresh import select_features
from tsfresh.utilities.dataframe_functions import impute
import pickle
import numpy, scipy.io
acc_data = np.loadtxt(open("../original_data/acc_data.csv", "rb"), delimiter=",", skiprows=1)
gyro_data = np.loadtxt(open("../original_data/gyro_data.csv", "rb"), delimiter=",", skiprows=1)
bt_data = np.loadtxt(open("../original_data/bt_data.csv", "rb"), delimiter=",", skiprows=1)
data_acc_test = pd.DataFrame(acc_data[:,0:3], columns=["acc_x", "acc_y", "acc_z"])
data_gyro_test = | pd.DataFrame(gyro_data[:,0:3], columns=["gyro_x", "gyro_y", "gyro_z"]) | pandas.DataFrame |
#!/usr/bin/env python3
"""
This script generates the diagrams for micro benchmark experiments.
In particular, it generates Figures 4, 5, and 6 in the paper, using the
measurements obtained through the vldb2020_microbenchmarks.sh script.
"""
import argparse
import os
import sys
import matplotlib as mpl
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
_pathMorphStore = "MorphStore"
sys.path.append(os.path.join(_pathMorphStore, "Benchmarks", "tools", "mal2x"))
import mal2morphstore.processingstyles as pss
import utils
# *****************************************************************************
# Utility functions
# *****************************************************************************
# -----------------------------------------------------------------------------
# Loading measurements
# -----------------------------------------------------------------------------
def loadMeaFigure4():
"""Loads the measurements for Figure 4 (experiment on operator classes)."""
# Utility function.
def getInputSize(inDataFmt):
countValues = 512 * 1024 * 1024
if inDataFmt == "uncompr_f":
bytes = countValues * 8
elif inDataFmt.startswith("static_vbp_f<vbp_l<4, "):
bytes = countValues * 4 / 8
elif inDataFmt.startswith("static_vbp_f<vbp_l<3, "):
bytes = countValues * 3 / 8
else:
raise RuntimeError()
return bytes / 1024 ** 3
# Load the measurements from the individual repetitions.
dfs = []
for repIdx in range(1, countReps + 1):
df = pd.read_csv(
os.path.join(pathArtifacts, "example_{}.csv".format(repIdx)),
sep="\t",
skiprows=2
).query("vector_extension != 'ps_scalar'")
dfs.append(df)
# Combine the repetitions.
dfMea = | pd.concat(dfs) | pandas.concat |
import warnings
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from rayml.pipelines.components import PerColumnImputer
from rayml.utils.woodwork_utils import infer_feature_types
@pytest.fixture
def non_numeric_df():
X = pd.DataFrame(
[
["a", "a", "a", "a"],
["b", "b", "b", "b"],
["a", "a", "a", "a"],
[np.nan, np.nan, np.nan, np.nan],
]
)
X.columns = ["A", "B", "C", "D"]
return X
def test_invalid_parameters():
with pytest.raises(ValueError):
strategies = ("impute_strategy", "mean")
PerColumnImputer(impute_strategies=strategies)
with pytest.raises(ValueError):
strategies = ["mean"]
PerColumnImputer(impute_strategies=strategies)
def test_all_strategies():
X = pd.DataFrame(
{
"A": pd.Series([2, 4, 6, np.nan]),
"B": pd.Series([4, 6, 4, np.nan]),
"C": pd.Series([6, 8, 8, np.nan]),
"D": pd.Series(["a", "a", "b", np.nan]),
}
)
X.ww.init(logical_types={"D": "categorical"})
X_expected = pd.DataFrame(
{
"A": pd.Series([2, 4, 6, 4]),
"B": pd.Series([4, 6, 4, 4]),
"C": pd.Series([6, 8, 8, 100]),
"D": pd.Series(["a", "a", "b", "a"], dtype="category"),
}
)
strategies = {
"A": {"impute_strategy": "mean"},
"B": {"impute_strategy": "median"},
"C": {"impute_strategy": "constant", "fill_value": 100},
"D": {"impute_strategy": "most_frequent"},
}
transformer = PerColumnImputer(impute_strategies=strategies)
X_t = transformer.fit_transform(X)
assert_frame_equal(X_expected, X_t, check_dtype=False)
def test_fit_transform():
X = pd.DataFrame([[2], [4], [6], [np.nan]])
X_expected = pd.DataFrame([[2], [4], [6], [4]])
X.columns = ["A"]
X_expected.columns = ["A"]
strategies = {"A": {"impute_strategy": "median"}}
transformer = PerColumnImputer(impute_strategies=strategies)
transformer.fit(X)
X_t = transformer.transform(X)
transformer = PerColumnImputer(impute_strategies=strategies)
X_fit_transform = transformer.fit_transform(X)
assert_frame_equal(X_t, X_fit_transform)
def test_non_numeric_errors(non_numeric_df):
# test col with all strings
X = non_numeric_df
X.ww.init(
logical_types={
"A": "categorical",
"B": "categorical",
"C": "categorical",
"D": "categorical",
}
)
# mean with all strings
strategies = {"A": {"impute_strategy": "mean"}}
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
transformer = PerColumnImputer(impute_strategies=strategies)
transformer.fit_transform(X)
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
transformer = PerColumnImputer(impute_strategies=strategies)
transformer.fit(X)
# median with all strings
strategies = {"B": {"impute_strategy": "median"}}
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
transformer = PerColumnImputer(impute_strategies=strategies)
transformer.fit_transform(X)
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
transformer = PerColumnImputer(impute_strategies=strategies)
transformer.fit(X)
def test_non_numeric_valid(non_numeric_df):
X = non_numeric_df.copy()
X.ww.init(
logical_types={
"A": "categorical",
"B": "categorical",
"C": "categorical",
"D": "categorical",
}
)
# most frequent with all strings
strategies = {
"A": {"impute_strategy": "most_frequent"},
"B": {"impute_strategy": "most_frequent"},
"C": {"impute_strategy": "most_frequent"},
"D": {"impute_strategy": "most_frequent"},
}
transformer = PerColumnImputer(impute_strategies=strategies)
X_expected = pd.DataFrame(
{
"A": pd.Series(["a", "b", "a", "a"], dtype="category"),
"B": pd.Series(["a", "b", "a", "a"], dtype="category"),
"C": pd.Series(["a", "b", "a", "a"], dtype="category"),
"D": pd.Series(["a", "b", "a", "a"], dtype="category"),
}
)
X_t = transformer.fit_transform(X)
assert_frame_equal(X_expected, X_t)
X = non_numeric_df.copy()
# constant with all strings
strategies = {
"B": {"impute_strategy": "most_frequent"},
"C": {"impute_strategy": "most_frequent"},
"D": {"impute_strategy": "constant", "fill_value": 100},
}
transformer = PerColumnImputer(impute_strategies=strategies)
X.ww.init(
logical_types={
"A": "categorical",
"B": "categorical",
"C": "categorical",
"D": "categorical",
}
)
X_expected = pd.DataFrame(
{
"A": pd.Series(["a", "b", "a", np.nan], dtype="category"),
"B": pd.Series(["a", "b", "a", "a"], dtype="category"),
"C": pd.Series(["a", "b", "a", "a"], dtype="category"),
"D": pd.Series(["a", "b", "a", 100], dtype="category"),
}
)
X_t = transformer.fit_transform(X)
assert_frame_equal(X_expected, X_t)
def test_datetime_does_not_error(fraud_100):
X, y = fraud_100
pci = PerColumnImputer(
impute_strategies={"country": {"impute_strategy": "most_frequent"}}
)
pci.fit(X, y)
assert pci._is_fitted
def test_fit_transform_drop_all_nan_columns():
X = pd.DataFrame(
{
"all_nan": [np.nan, np.nan, np.nan],
"some_nan": [np.nan, 1, 0],
"another_col": [0, 1, 2],
}
)
X.ww.init(logical_types={"all_nan": "Double"})
strategies = {
"all_nan": {"impute_strategy": "most_frequent"},
"some_nan": {"impute_strategy": "most_frequent"},
"another_col": {"impute_strategy": "most_frequent"},
}
transformer = PerColumnImputer(impute_strategies=strategies)
X_expected_arr = | pd.DataFrame({"some_nan": [0, 1, 0], "another_col": [0, 1, 2]}) | pandas.DataFrame |
"""
Test indicators.py functions for common indicators to be extracted from an OHLC dataset
Author: <NAME>
"""
import unittest
import indicators
import pandas as pd
class TestIndicators(unittest.TestCase):
def test_checkGreenCandle(self):
candleGreen = {"Open": 1.2, "Close": 1.5}
candleRed = {"Open": 3.4, "Close": 2}
self.assertEqual(indicators.checkGreenCandle(candleGreen),True)
self.assertEqual(indicators.checkGreenCandle(candleRed),False)
def test_checkEngulfingCandleOverPeriod(self):
candleSet = []
candleSet.append({"Open": 1, "Close": 2})
candleSet.append({"Open": 3, "Close": 0.5})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.checkEngulfingCandleOverPeriod(candleSet), [0,-1])
candleSet = []
candleSet.append({"Open": 5, "Close": 4})
candleSet.append({"Open": 3, "Close": 6})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.checkEngulfingCandleOverPeriod(candleSet), [0,1])
def test_sumGainsAndLossesOverPeriod(self):
candleSet = []
candleSet.append({"Open": 1, "Close": 2})
candleSet.append({"Open": 2, "Close": 1})
candleSet = pd.DataFrame(candleSet)
gainsTotal,lossesTotal = indicators.sumGainsAndLossesOverPeriod(candleSet)
self.assertEqual(gainsTotal,1)
self.assertEqual(lossesTotal,1)
candleSet = []
candleSet.append({"Open": 1, "Close": 2})
candleSet.append({"Open": 2, "Close": 3})
candleSet = pd.DataFrame(candleSet)
gainsTotal,lossesTotal = indicators.sumGainsAndLossesOverPeriod(candleSet)
self.assertEqual(gainsTotal,2)
self.assertEqual(lossesTotal,0)
candleSet = []
candleSet.append({"Open": 3, "Close": 2})
candleSet.append({"Open": 2, "Close": 1})
candleSet = pd.DataFrame(candleSet)
gainsTotal,lossesTotal = indicators.sumGainsAndLossesOverPeriod(candleSet)
self.assertEqual(gainsTotal,0)
self.assertEqual(lossesTotal,2)
"""
def test_computeRSI(self):
candleSet = []
for i in range (1,29):
candleSet.append({"Open": i, "Close": i+1})
candleSet = pd.DataFrame(candleSet)
"""
def test_computeSMAsOverPeriod(self):
candleSet = []
for i in range(250):
candleSet.append({"Open": 5, "Close": 5})
candleSet = pd.DataFrame(candleSet)
solution10,solution20,solution50,solution100,solution150,solution200 = [],[],[],[],[],[]
for i in range(250):
if i>10:
solution10.append(0)
else:
solution10.append(None)
if i>20:
solution20.append(0)
else:
solution20.append(None)
if i>50:
solution50.append(0)
else:
solution50.append(None)
if i>100:
solution100.append(0)
else:
solution100.append(None)
if i>150:
solution150.append(0)
else:
solution150.append(None)
if i>200:
solution200.append(0)
else:
solution200.append(None)
sMA10,sMA20,sMA50,sMA100,sMA150,sMA200 = indicators.computeSMAsOverPeriod(candleSet)
self.assertEqual(sMA10,solution10)
self.assertEqual(sMA20,solution20)
self.assertEqual(sMA50,solution50)
self.assertEqual(sMA100,solution100)
self.assertEqual(sMA150,solution150)
self.assertEqual(sMA200,solution200)
candleSet = []
for i in range(125):
candleSet.append({"Open": 5, "Close": 5})
candleSet.append({"Open": 10, "Close": 10})
candleSet = pd.DataFrame(candleSet)
solution10,solution20,solution50,solution100,solution150,solution200 = [],[],[],[],[],[]
for i in range(125):
if i==0:
solution10.append(None)
elif i*2-1>10:
solution10.append(0.25)
solution10.append(0.5)
else:
solution10.append(None)
solution10.append(None)
if i==0:
solution20.append(None)
elif i*2-1>20:
solution20.append(0.25)
solution20.append(0.5)
else:
solution20.append(None)
solution20.append(None)
if i==0:
solution50.append(None)
elif i*2-1>50:
solution50.append(0.25)
solution50.append(0.5)
else:
solution50.append(None)
solution50.append(None)
if i==0:
solution100.append(None)
elif i*2-1>100:
solution100.append(0.25)
solution100.append(0.5)
else:
solution100.append(None)
solution100.append(None)
if i==0:
solution150.append(None)
elif i*2-1>150:
solution150.append(0.25)
solution150.append(0.5)
else:
solution150.append(None)
solution150.append(None)
if i==0:
solution200.append(None)
elif i*2-1>200:
solution200.append(0.25)
solution200.append(0.5)
else:
solution200.append(None)
solution200.append(None)
solution10.append(0.25)
solution20.append(0.25)
solution50.append(0.25)
solution100.append(0.25)
solution150.append(0.25)
solution200.append(0.25)
sMA10,sMA20,sMA50,sMA100,sMA150,sMA200 = indicators.computeSMAsOverPeriod(candleSet)
self.assertEqual(sMA10,solution10)
self.assertEqual(sMA20,solution20)
self.assertEqual(sMA50,solution50)
self.assertEqual(sMA100,solution100)
self.assertEqual(sMA150,solution150)
self.assertEqual(sMA200,solution200)
def test_computeAverageTrueRange(self):
candleSet = []
for i in range (1,29):
candleSet.append({"Low": i, "High": i+1,"Close": i+1})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.computeAverageTrueRange(candleSet),1)
candleSet = []
for i in range (1,29):
candleSet.append({"Low": i, "High": i+5,"Close": i+5})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.computeAverageTrueRange(candleSet),5)
candleSet = []
for i in range (1,29):
candleSet.append({"Low": 1, "High": 1,"Close": 1})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.computeAverageTrueRange(candleSet),0)
def test_checkIncreaseTomorrow(self):
candleSet = []
candleSet.append({"Open": 1, "Close": 1})
candleSet.append({"Open": 1, "Close": 2})
candleSet = | pd.DataFrame(candleSet) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pickle
import nltk
from nltk.stem.porter import *
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import nltk
import string
from nltk.corpus import stopwords
import enchant
from nltk.tokenize import RegexpTokenizer
# Load data
dfC = pd.read_pickle("../data/lyrics_clean.pickle")
# Init assisting data
stemmer = PorterStemmer()
stopwordsSet = stopwords.words("english")
d = enchant.Dict("en_US")
def mapAndDict(song):
tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
tokens = tokenizer.tokenize(song.lower())
countDict = defaultdict(int)
stems = map(lambda x: stemmer.stem(x),[t for t in tokens if (not t in stopwordsSet)])
#unusual = list([t for t in tokens if (not d.check(t))])
#print(unusual)
for stem in stems:
countDict[stem] += 1
return countDict
countDicts = map(lambda s: mapAndDict(s), dfC["lyrics"])
fullFrame = pd.DataFrame(list(countDicts)).fillna(0)
fullFrame.to_pickle("../data/fullFrame.pickle")
sortedCounts = fullFrame.sum(axis=0).sort_values()
top2kWords = sortedCounts[-2000:].keys()
topBagofWords = fullFrame[top2kWords]
topBagofWords.to_pickle("../data/topBagofWords10k.pickle")
# def unusual_words(text):
# text_vocab = set(w.lower() for w in text if w.isalpha())
# english_vocab = set(w.lower() for w in nltk.corpus.words.words())
# unusual = text_vocab - english_vocab
# return sorted(unusual)
for k in sampleFrame.keys():
print(k)
for k in sampleFrame.keys():
print(k)
wordSet = set()
dfC['stemmed'] = | pd.Series("", index=dfC.index) | pandas.Series |
import os
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.neural_network import MLPRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import GridSearchCV, cross_val_score
print(os.listdir("data"))
train_data = | pd.read_csv('data/train.csv') | pandas.read_csv |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, NullFormatter, ScalarFormatter)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Build haplotypes and make scatter plot for vizualization")
db_p = parser.add_argument_group('Sample name parameters')
db_p.add_argument('-gm', '--gmm_file', required=True, help='Tab separated file with IBS and non_IBS categorized by GMM model')
db_p.add_argument('-db', '--db_file', required=True, help='Tab separated file with variations genetared by IBSpy output')
db_p.add_argument('-rf', '--refId', required=True, help='Name of the genome reference used')
db_p.add_argument('-qr', '--qryId', required=True, help='Name of the query sample')
db_p.add_argument('-chr', '--chrNme', required=True, help='Chromosome name to be plotted')
db_p.add_argument('-cl', '--chr_length_file', required=True, help='Reference chromosome lenghts file')
hap_block_p = parser.add_argument_group('Haplotype blocks parameters')
hap_block_p.add_argument('-w', '--windSize', required=True, help='Windows size to count variations within')
hap_block_p.add_argument('-vf', '--varfltr', required=True, help='Filter variations above this threshold to compute GMM model')
hap_block_p.add_argument('-st', '--StitchVarNum', required=True, help='Stitching haplotypes: number of non-IBS "outliers" that must a appear consecutively in a windows to be called non-IBS')
out_files = parser.add_argument_group('Output files')
out_files.add_argument('-o','--out_img_file', help='Output scatter plot and bars with haplotypes in ".jpg" format ')
args = parser.parse_args()
gmm_file = args.gmm_file
db_file = args.db_file
refId = args.refId
qryId = args.qryId
chrNme = args.chrNme
chr_length_file = args.chr_length_file
windSize = int(args.windSize)
varfltr = int(args.varfltr)
StitchVarNum = int(args.StitchVarNum)
out_img_file = args.out_img_file
#### GMM input file ####
'''
It is a temporary file where IBS and non-IBS were computed (to 1 as IBS, and 0 non-IBS)
using the number of variations found in a windows of a size employing the GMM model.
'''
inFile = | pd.read_csv(gmm_file, delimiter='\t') | pandas.read_csv |
'''
Boston house prices dataset
'''
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn import linear_model
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.preprocessing import PolynomialFeatures
boston = load_boston()
data = | pd.DataFrame(boston.data,columns=boston.feature_names) | pandas.DataFrame |
"""
Tests for SARIMAX models
Author: <NAME>
License: Simplified-BSD
"""
import os
import warnings
from statsmodels.compat.platform import PLATFORM_WIN
import numpy as np
import pandas as pd
import pytest
from statsmodels.tsa.statespace import sarimax, tools
from .results import results_sarimax
from statsmodels.tools import add_constant
from statsmodels.tools.tools import Bunch
from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_raises, assert_allclose
)
current_path = os.path.dirname(os.path.abspath(__file__))
realgdp_path = os.path.join('results', 'results_realgdpar_stata.csv')
realgdp_results = pd.read_csv(current_path + os.sep + realgdp_path)
coverage_path = os.path.join('results', 'results_sarimax_coverage.csv')
coverage_results = pd.read_csv(os.path.join(current_path, coverage_path))
class TestSARIMAXStatsmodels(object):
"""
Test ARIMA model using SARIMAX class against statsmodels ARIMA class
Notes
-----
Standard errors are quite good for the OPG case.
"""
@classmethod
def setup_class(cls):
cls.true = results_sarimax.wpi1_stationary
endog = cls.true['data']
# Old results from statsmodels.arima.ARIMA taken before it was removed
# to let test continue to run. On old statsmodels, can run
# result_a = arima.ARIMA(endog, order=(1, 1, 1)).fit(disp=-1)
result_a = Bunch()
result_a.llf = -135.3513139733829
result_a.aic = 278.7026279467658
result_a.bic = 289.9513653682555
result_a.hqic = 283.27183681851653
result_a.params = np.array([0.74982449, 0.87421135, -0.41202195])
result_a.bse = np.array([0.29207409, 0.06377779, 0.12208469])
cls.result_a = result_a
cls.model_b = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
simple_differencing=True,
hamilton_representation=True)
cls.result_b = cls.model_b.fit(disp=-1)
def test_loglike(self):
assert_allclose(self.result_b.llf, self.result_a.llf)
def test_aic(self):
assert_allclose(self.result_b.aic, self.result_a.aic)
def test_bic(self):
assert_allclose(self.result_b.bic, self.result_a.bic)
def test_hqic(self):
assert_allclose(self.result_b.hqic, self.result_a.hqic)
def test_mle(self):
# ARIMA estimates the mean of the process, whereas SARIMAX estimates
# the intercept. Convert the mean to intercept to compare
params_a = self.result_a.params.copy()
params_a[0] = (1 - params_a[1]) * params_a[0]
assert_allclose(self.result_b.params[:-1], params_a, atol=5e-5)
def test_bse(self):
# Test the complex step approximated BSE values
cpa = self.result_b._cov_params_approx(approx_complex_step=True)
bse = cpa.diagonal()**0.5
assert_allclose(bse[1:-1], self.result_a.bse[1:], atol=1e-5)
def test_t_test(self):
import statsmodels.tools._testing as smt
# to trigger failure, un-comment the following:
# self.result_b._cache['pvalues'] += 1
smt.check_ttest_tvalues(self.result_b)
smt.check_ftest_pvalues(self.result_b)
class TestRealGDPARStata(object):
"""
Includes tests of filtered states and standardized forecast errors.
Notes
-----
Could also test the usual things like standard errors, etc. but those are
well-tested elsewhere.
"""
@classmethod
def setup_class(cls):
dlgdp = np.log(realgdp_results['value']).diff()[1:].values
cls.model = sarimax.SARIMAX(dlgdp, order=(12, 0, 0), trend='n',
hamilton_representation=True)
# Estimated by Stata
params = [
.40725515, .18782621, -.01514009, -.01027267, -.03642297,
.11576416, .02573029, -.00766572, .13506498, .08649569, .06942822,
-.10685783, .00007999607
]
cls.results = cls.model.filter(params)
def test_filtered_state(self):
for i in range(12):
assert_allclose(
realgdp_results.iloc[1:]['u%d' % (i+1)],
self.results.filter_results.filtered_state[i],
atol=1e-6
)
def test_standardized_forecasts_error(self):
assert_allclose(
realgdp_results.iloc[1:]['rstd'],
self.results.filter_results.standardized_forecasts_error[0],
atol=1e-3
)
class SARIMAXStataTests(object):
def test_loglike(self):
assert_almost_equal(
self.result.llf,
self.true['loglike'], 4
)
def test_aic(self):
assert_almost_equal(
self.result.aic,
self.true['aic'], 3
)
def test_bic(self):
assert_almost_equal(
self.result.bic,
self.true['bic'], 3
)
def test_hqic(self):
hqic = (
-2*self.result.llf +
2*np.log(np.log(self.result.nobs_effective)) *
self.result.params.shape[0]
)
assert_almost_equal(
self.result.hqic,
hqic, 3
)
def test_standardized_forecasts_error(self):
cython_sfe = self.result.standardized_forecasts_error
self.result._standardized_forecasts_error = None
python_sfe = self.result.standardized_forecasts_error
assert_allclose(cython_sfe, python_sfe)
class ARIMA(SARIMAXStataTests):
"""
ARIMA model
Stata arima documentation, Example 1
"""
@classmethod
def setup_class(cls, true, *args, **kwargs):
cls.true = true
endog = true['data']
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
*args, **kwargs)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
cls.result = cls.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestARIMAStationary(ARIMA):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestARIMAStationary, cls).setup_class(
results_sarimax.wpi1_stationary
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[1], self.true['se_ar_opg'], atol=1e-7)
assert_allclose(self.result.bse[2], self.true['se_ma_opg'], atol=1e-7)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-7)
assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-7)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# finite difference, non-centered
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-1)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-3)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[1], self.true['se_ar_oim'], atol=1e-3)
assert_allclose(oim_bse[2], self.true['se_ma_oim'], atol=1e-2)
def test_bse_robust(self):
robust_oim_bse = self.result.cov_params_robust_oim.diagonal()**0.5
cpra = self.result.cov_params_robust_approx
robust_approx_bse = cpra.diagonal()**0.5
true_robust_bse = np.r_[
self.true['se_ar_robust'], self.true['se_ma_robust']
]
assert_allclose(robust_oim_bse[1:3], true_robust_bse, atol=1e-2)
assert_allclose(robust_approx_bse[1:3], true_robust_bse, atol=1e-3)
class TestARIMADiffuse(ARIMA):
"""
Notes
-----
Standard errors are very good for the OPG and quite good for the complex
step approximation cases.
"""
@classmethod
def setup_class(cls, **kwargs):
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = (
results_sarimax.wpi1_diffuse['initial_variance']
)
super(TestARIMADiffuse, cls).setup_class(results_sarimax.wpi1_diffuse,
**kwargs)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[1], self.true['se_ar_opg'], atol=1e-7)
assert_allclose(self.result.bse[2], self.true['se_ma_opg'], atol=1e-7)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4)
# The below tests do not pass
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered : failure
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4)
# # finite difference, centered : failure
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
bse = self.result._cov_params_oim().diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-1)
class AdditiveSeasonal(SARIMAXStataTests):
"""
ARIMA model with additive seasonal effects
Stata arima documentation, Example 2
"""
@classmethod
def setup_class(cls, true, *args, **kwargs):
cls.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(
endog, order=(1, 1, (1, 0, 0, 1)), trend='c', *args, **kwargs
)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
cls.result = cls.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestAdditiveSeasonal(AdditiveSeasonal):
"""
Notes
-----
Standard errors are very good for the OPG and quite good for the complex
step approximation cases.
"""
@classmethod
def setup_class(cls):
super(TestAdditiveSeasonal, cls).setup_class(
results_sarimax.wpi1_seasonal
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[1], self.true['se_ar_opg'], atol=1e-6)
assert_allclose(self.result.bse[2:4], self.true['se_ma_opg'],
atol=1e-5)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-4)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-3)
def test_bse_oim(self):
# OIM covariance type
bse = self.result._cov_params_oim().diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-1)
class Airline(SARIMAXStataTests):
"""
Multiplicative SARIMA model: "Airline" model
Stata arima documentation, Example 3
"""
@classmethod
def setup_class(cls, true, *args, **kwargs):
cls.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(
endog, order=(0, 1, 1), seasonal_order=(0, 1, 1, 12),
trend='n', *args, **kwargs
)
params = np.r_[true['params_ma'], true['params_seasonal_ma'],
true['params_variance']]
cls.result = cls.model.filter(params)
def test_mle(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-4
)
class TestAirlineHamilton(Airline):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestAirlineHamilton, cls).setup_class(
results_sarimax.air2_stationary
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6)
assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-6)
assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-6)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[0], self.true['se_ma_oim'], atol=1e-1)
assert_allclose(oim_bse[1], self.true['se_seasonal_ma_oim'], atol=1e-1)
class TestAirlineHarvey(Airline):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestAirlineHarvey, cls).setup_class(
results_sarimax.air2_stationary, hamilton_representation=False
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6)
assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-6)
assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-6)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[0], self.true['se_ma_oim'], atol=1e-1)
assert_allclose(oim_bse[1], self.true['se_seasonal_ma_oim'], atol=1e-1)
class TestAirlineStateDifferencing(Airline):
"""
Notes
-----
Standard errors are very good for the OPG and quite good for the complex
step approximation cases.
"""
@classmethod
def setup_class(cls):
super(TestAirlineStateDifferencing, cls).setup_class(
results_sarimax.air2_stationary, simple_differencing=False,
hamilton_representation=False
)
def test_bic(self):
# Due to diffuse component of the state (which technically changes the
# BIC calculation - see Durbin and Koopman section 7.4), this is the
# best we can do for BIC
assert_almost_equal(
self.result.bic,
self.true['bic'], 0
)
def test_mle(self):
result = self.model.fit(method='nm', maxiter=1000, disp=0)
assert_allclose(
result.params, self.result.params,
atol=1e-3)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6)
assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-4)
# The below tests do not pass
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered : failure with NaNs
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-2)
# # finite difference, centered : failure with NaNs
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[0], self.true['se_ma_oim'], atol=1e-1)
assert_allclose(oim_bse[1], self.true['se_seasonal_ma_oim'], atol=1e-1)
class Friedman(SARIMAXStataTests):
"""
ARMAX model: Friedman quantity theory of money
Stata arima documentation, Example 4
"""
@classmethod
def setup_class(cls, true, exog=None, *args, **kwargs):
cls.true = true
endog = np.r_[true['data']['consump']]
if exog is None:
exog = add_constant(true['data']['m2'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(
endog, exog=exog, order=(1, 0, 1), *args, **kwargs
)
params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
cls.result = cls.model.filter(params)
class TestFriedmanMLERegression(Friedman):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestFriedmanMLERegression, cls).setup_class(
results_sarimax.friedman2_mle
)
def test_mle(self):
result = self.model.fit(disp=-1)
# Use ratio to make atol more meaningful parameter scale differs
ratio = result.params / self.result.params
assert_allclose(ratio, np.ones(5), atol=1e-2, rtol=1e-3)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0:2], self.true['se_exog_opg'],
atol=1e-4)
assert_allclose(self.result.bse[2], self.true['se_ar_opg'], atol=1e-6)
assert_allclose(self.result.bse[3], self.true['se_ma_opg'], atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0:2], self.true['se_exog_oim'], atol=1e-4)
assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-6)
assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-6)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_exog_oim'][0], rtol=1)
# assert_allclose(bse[1], self.true['se_exog_oim'][1], atol=1e-2)
# assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_exog_oim'][0], rtol=1)
# assert_allclose(bse[1], self.true['se_exog_oim'][1], atol=1e-2)
# assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-2)
def test_bse_oim(self):
# OIM covariance type
bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(bse[0], self.true['se_exog_oim'][0], rtol=1)
assert_allclose(bse[1], self.true['se_exog_oim'][1], atol=1e-2)
assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-2)
assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-2)
class TestFriedmanStateRegression(Friedman):
"""
Notes
-----
MLE is not very close and standard errors are not very close for any set of
parameters.
This is likely because we're comparing against the model where the
regression coefficients are also estimated by MLE. So this test should be
considered just a very basic "sanity" test.
"""
@classmethod
def setup_class(cls):
# Remove the regression coefficients from the parameters, since they
# will be estimated as part of the state vector
true = dict(results_sarimax.friedman2_mle)
exog = add_constant(true['data']['m2']) / 10.
true['mle_params_exog'] = true['params_exog'][:]
true['mle_se_exog'] = true['se_exog_opg'][:]
true['params_exog'] = []
true['se_exog'] = []
super(TestFriedmanStateRegression, cls).setup_class(
true, exog=exog, mle_regression=False
)
cls.true_params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
cls.result = cls.model.filter(cls.true_params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-1, rtol=2e-1
)
def test_regression_parameters(self):
# The regression effects are integrated into the state vector as
# the last two states (thus the index [-2:]). The filtered
# estimates of the state vector produced by the Kalman filter and
# stored in `filtered_state` for these state elements give the
# recursive least squares estimates of the regression coefficients
# at each time period. To get the estimates conditional on the
# entire dataset, use the filtered states from the last time
# period (thus the index [-1]).
assert_almost_equal(
self.result.filter_results.filtered_state[-2:, -1] / 10.,
self.true['mle_params_exog'], 1
)
# Loglikelihood (and so aic, bic) is slightly different when states are
# integrated into the state vector
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ar_opg'], atol=1e-2)
assert_allclose(self.result.bse[1], self.true['se_ma_opg'], atol=1e-2)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-1)
assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-1)
# The below tests do not pass
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered :
# # failure (catastrophic cancellation)
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-2)
# # finite difference, centered : failure (nan)
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-3)
def test_bse_oim(self):
# OIM covariance type
bse = self.result._cov_params_oim().diagonal()**0.5
assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-1)
assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-1)
class TestFriedmanPredict(Friedman):
"""
ARMAX model: Friedman quantity theory of money, prediction
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This follows the given Stata example, although it is not truly forecasting
because it compares using the actual data (which is available in the
example but just not used in the parameter MLE estimation) against dynamic
prediction of that data. Here `test_predict` matches the first case, and
`test_dynamic_predict` matches the second.
"""
@classmethod
def setup_class(cls):
super(TestFriedmanPredict, cls).setup_class(
results_sarimax.friedman2_predict
)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_predict(self):
assert_almost_equal(
self.result.predict(),
self.true['predict'], 3
)
def test_dynamic_predict(self):
dynamic = len(self.true['data']['consump'])-15-1
assert_almost_equal(
self.result.predict(dynamic=dynamic),
self.true['dynamic_predict'], 3
)
class TestFriedmanForecast(Friedman):
"""
ARMAX model: Friedman quantity theory of money, forecasts
Variation on:
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This is a variation of the Stata example, in which the endogenous data is
actually made to be missing so that the predict command must forecast.
As another unit test, we also compare against the case in State when
predict is used against missing data (so forecasting) with the dynamic
option also included. Note, however, that forecasting in State space models
amounts to running the Kalman filter against missing datapoints, so it is
not clear whether "dynamic" forecasting (where instead of missing
datapoints for lags, we plug in previous forecasted endog values) is
meaningful.
"""
@classmethod
def setup_class(cls):
true = dict(results_sarimax.friedman2_predict)
true['forecast_data'] = {
'consump': true['data']['consump'][-15:],
'm2': true['data']['m2'][-15:]
}
true['data'] = {
'consump': true['data']['consump'][:-15],
'm2': true['data']['m2'][:-15]
}
super(TestFriedmanForecast, cls).setup_class(true)
cls.result = cls.model.filter(cls.result.params)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_forecast(self):
end = len(self.true['data']['consump'])+15-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, exog=exog),
self.true['forecast'], 3
)
def test_dynamic_forecast(self):
end = len(self.true['data']['consump'])+15-1
dynamic = len(self.true['data']['consump'])-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, dynamic=dynamic, exog=exog),
self.true['dynamic_forecast'], 3
)
class SARIMAXCoverageTest(object):
@classmethod
def setup_class(cls, i, decimal=4, endog=None, *args, **kwargs):
# Dataset
if endog is None:
endog = results_sarimax.wpi1_data
# Loglikelihood, parameters
cls.true_loglike = coverage_results.loc[i]['llf']
cls.true_params = np.array([
float(x) for x in coverage_results.loc[i]['parameters'].split(',')]
)
# Stata reports the standard deviation; make it the variance
cls.true_params[-1] = cls.true_params[-1]**2
# Test parameters
cls.decimal = decimal
# Compare using the Hamilton representation and simple differencing
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(endog, *args, **kwargs)
def test_loglike(self):
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=0.7 * 10**(-self.decimal)
)
def test_start_params(self):
# just a quick test that start_params is not throwing an exception
# (other than related to invertibility)
stat = self.model.enforce_stationarity
inv = self.model.enforce_invertibility
self.model.enforce_stationarity = False
self.model.enforce_invertibility = False
self.model.start_params
self.model.enforce_stationarity = stat
self.model.enforce_invertibility = inv
def test_transform_untransform(self):
model = self.model
stat, inv = model.enforce_stationarity, model.enforce_invertibility
true_constrained = self.true_params
# Sometimes the parameters given by Stata are not stationary and / or
# invertible, so we need to skip those transformations for those
# parameter sets
model.update(self.true_params)
par = model.polynomial_ar
psar = model.polynomial_seasonal_ar
contracted_psar = psar[psar.nonzero()]
model.enforce_stationarity = (
(model.k_ar == 0 or tools.is_invertible(np.r_[1, -par[1:]])) and
(len(contracted_psar) <= 1 or
tools.is_invertible(np.r_[1, -contracted_psar[1:]]))
)
pma = model.polynomial_ma
psma = model.polynomial_seasonal_ma
contracted_psma = psma[psma.nonzero()]
model.enforce_invertibility = (
(model.k_ma == 0 or tools.is_invertible(np.r_[1, pma[1:]])) and
(len(contracted_psma) <= 1 or
tools.is_invertible(np.r_[1, contracted_psma[1:]]))
)
unconstrained = model.untransform_params(true_constrained)
constrained = model.transform_params(unconstrained)
assert_almost_equal(constrained, true_constrained, 4)
model.enforce_stationarity = stat
model.enforce_invertibility = inv
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# Make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
self.result.cov_params_approx
self.result.cov_params_oim
self.result.cov_params_opg
self.result.cov_params_robust_oim
self.result.cov_params_robust_approx
@pytest.mark.matplotlib
def test_plot_diagnostics(self, close_figures):
# Make sure that no exceptions are thrown during plot_diagnostics
self.result = self.model.filter(self.true_params)
self.result.plot_diagnostics()
def test_predict(self):
result = self.model.filter(self.true_params)
# Test predict does not throw exceptions, and produces the right shaped
# output
predict = result.predict()
assert_equal(predict.shape, (self.model.nobs,))
predict = result.predict(start=10, end=20)
assert_equal(predict.shape, (11,))
predict = result.predict(start=10, end=20, dynamic=10)
assert_equal(predict.shape, (11,))
# Test forecasts
if self.model.k_exog == 0:
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
assert_equal(predict.shape, (11,))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
forecast = result.forecast()
assert_equal(forecast.shape, (1,))
forecast = result.forecast(10)
assert_equal(forecast.shape, (10,))
else:
k_exog = self.model.k_exog
exog = np.r_[[0]*k_exog*11].reshape(11, k_exog)
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
assert_equal(predict.shape, (11,))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
exog = np.r_[[0]*k_exog].reshape(1, k_exog)
forecast = result.forecast(exog=exog)
assert_equal(forecast.shape, (1,))
def test_init_keys_replicate(self):
mod1 = self.model
kwargs = self.model._get_init_kwds()
endog = mod1.data.orig_endog
exog = mod1.data.orig_exog
model2 = sarimax.SARIMAX(endog, exog, **kwargs)
res1 = self.model.filter(self.true_params)
res2 = model2.filter(self.true_params)
rtol = 1e-6 if PLATFORM_WIN else 1e-13
assert_allclose(res2.llf, res1.llf, rtol=rtol)
class Test_ar(SARIMAXCoverageTest):
# // AR: (p, 0, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 0) noconstant vce(oim)
# save_results 1
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
super(Test_ar, cls).setup_class(0, *args, **kwargs)
class Test_ar_as_polynomial(SARIMAXCoverageTest):
# // AR: (p, 0, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 0) noconstant vce(oim)
# save_results 1
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = ([1, 1, 1], 0, 0)
super(Test_ar_as_polynomial, cls).setup_class(0, *args, **kwargs)
class Test_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3, 0, 0) noconstant vce(oim)
# save_results 2
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['trend'] = 'c'
super(Test_ar_trend_c, cls).setup_class(1, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[0] = (1 - tps[1:4].sum()) * tps[0]
class Test_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3, 0, 0) noconstant vce(oim)
# save_results 3
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['trend'] = 'ct'
super(Test_ar_trend_ct, cls).setup_class(2, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, arima(3, 0, 0) noconstant vce(oim)
# save_results 4
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_ar_trend_polynomial, cls).setup_class(3, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_ar_diff(SARIMAXCoverageTest):
# // AR and I(d): (p, d, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 2, 0) noconstant vce(oim)
# save_results 5
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 0)
super(Test_ar_diff, cls).setup_class(4, *args, **kwargs)
class Test_ar_seasonal_diff(SARIMAXCoverageTest):
# // AR and I(D): (p, 0, 0) x (0, D, 0, s)
# arima wpi, arima(3, 0, 0) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 6
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_ar_seasonal_diff, cls).setup_class(5, *args, **kwargs)
class Test_ar_diffuse(SARIMAXCoverageTest):
# // AR and diffuse initialization
# arima wpi, arima(3, 0, 0) noconstant vce(oim) diffuse
# save_results 7
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ar_diffuse, cls).setup_class(6, *args, **kwargs)
class Test_ar_no_enforce(SARIMAXCoverageTest):
# // AR: (p, 0, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 0) noconstant vce(oim)
# save_results 1
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['enforce_stationarity'] = False
kwargs['enforce_invertibility'] = False
kwargs['initial_variance'] = 1e9
kwargs['loglikelihood_burn'] = 0
super(Test_ar_no_enforce, cls).setup_class(6, *args, **kwargs)
# Reset loglikelihood burn, which gets automatically set to the number
# of states if enforce_stationarity = False
cls.model.ssm.loglikelihood_burn = 0
def test_init_keys_replicate(self):
mod1 = self.model
kwargs = self.model._get_init_kwds()
endog = mod1.data.orig_endog
exog = mod1.data.orig_exog
model2 = sarimax.SARIMAX(endog, exog, **kwargs)
# Fixes needed for edge case model
model2.ssm.initialization = mod1.ssm.initialization
res1 = self.model.filter(self.true_params)
res2 = model2.filter(self.true_params)
rtol = 1e-6 if PLATFORM_WIN else 1e-13
assert_allclose(res2.llf, res1.llf, rtol=rtol)
class Test_ar_exogenous(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3, 0, 0) noconstant vce(oim)
# save_results 8
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ar_exogenous, cls).setup_class(7, *args, **kwargs)
class Test_ar_exogenous_in_state(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3, 0, 0) noconstant vce(oim)
# save_results 8
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['mle_regression'] = False
super(Test_ar_exogenous_in_state, cls).setup_class(7, *args, **kwargs)
cls.true_regression_coefficient = cls.true_params[0]
cls.true_params = cls.true_params[1:]
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
def test_regression_coefficient(self):
# Test that the regression coefficient (estimated as the last filtered
# state estimate for the regression state) is the same as the Stata
# MLE state
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.filter_results.filtered_state[3][-1],
self.true_regression_coefficient,
self.decimal
)
class Test_ma(SARIMAXCoverageTest):
# // MA: (0, 0, q) x (0, 0, 0, 0)
# arima wpi, arima(0, 0, 3) noconstant vce(oim)
# save_results 9
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
super(Test_ma, cls).setup_class(8, *args, **kwargs)
class Test_ma_as_polynomial(SARIMAXCoverageTest):
# // MA: (0, 0, q) x (0, 0, 0, 0)
# arima wpi, arima(0, 0, 3) noconstant vce(oim)
# save_results 9
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, [1, 1, 1])
super(Test_ma_as_polynomial, cls).setup_class(8, *args, **kwargs)
class Test_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(0, 0, 3) noconstant vce(oim)
# save_results 10
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['trend'] = 'c'
super(Test_ma_trend_c, cls).setup_class(9, *args, **kwargs)
class Test_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(0, 0, 3) noconstant vce(oim)
# save_results 11
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['trend'] = 'ct'
super(Test_ma_trend_ct, cls).setup_class(10, *args, **kwargs)
class Test_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, arima(0, 0, 3) noconstant vce(oim)
# save_results 12
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_ma_trend_polynomial, cls).setup_class(11, *args, **kwargs)
class Test_ma_diff(SARIMAXCoverageTest):
# // MA and I(d): (0, d, q) x (0, 0, 0, 0)
# arima wpi, arima(0, 2, 3) noconstant vce(oim)
# save_results 13
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 3)
super(Test_ma_diff, cls).setup_class(12, *args, **kwargs)
class Test_ma_seasonal_diff(SARIMAXCoverageTest):
# // MA and I(D): (p, 0, 0) x (0, D, 0, s)
# arima wpi, arima(0, 0, 3) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 14
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_ma_seasonal_diff, cls).setup_class(13, *args, **kwargs)
class Test_ma_diffuse(SARIMAXCoverageTest):
# // MA and diffuse initialization
# arima wpi, arima(0, 0, 3) noconstant vce(oim) diffuse
# save_results 15
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ma_diffuse, cls).setup_class(14, *args, **kwargs)
class Test_ma_exogenous(SARIMAXCoverageTest):
# // MAX
# arima wpi x, arima(0, 0, 3) noconstant vce(oim)
# save_results 16
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ma_exogenous, cls).setup_class(15, *args, **kwargs)
class Test_arma(SARIMAXCoverageTest):
# // ARMA: (p, 0, q) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 3) noconstant vce(oim)
# save_results 17
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 3)
super(Test_arma, cls).setup_class(16, *args, **kwargs)
class Test_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3, 0, 2) noconstant vce(oim)
# save_results 18
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = 'c'
super(Test_arma_trend_c, cls).setup_class(17, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1]
class Test_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3, 0, 2) noconstant vce(oim)
# save_results 19
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = 'ct'
super(Test_arma_trend_ct, cls).setup_class(18, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, arima(3, 0, 2) noconstant vce(oim)
# save_results 20
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_arma_trend_polynomial, cls).setup_class(19, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_arma_diff(SARIMAXCoverageTest):
# // ARMA and I(d): (p, d, q) x (0, 0, 0, 0)
# arima wpi, arima(3, 2, 2) noconstant vce(oim)
# save_results 21
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
super(Test_arma_diff, cls).setup_class(20, *args, **kwargs)
class Test_arma_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(D): (p, 0, q) x (0, D, 0, s)
# arima wpi, arima(3, 0, 2) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 22
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_arma_seasonal_diff, cls).setup_class(21, *args, **kwargs)
class Test_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(d) and I(D): (p, d, q) x (0, D, 0, s)
# arima wpi, arima(3, 2, 2) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 23
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_arma_diff_seasonal_diff, cls).setup_class(
22, *args, **kwargs)
class Test_arma_diffuse(SARIMAXCoverageTest):
# // ARMA and diffuse initialization
# arima wpi, arima(3, 0, 2) noconstant vce(oim) diffuse
# save_results 24
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_arma_diffuse, cls).setup_class(23, *args, **kwargs)
class Test_arma_exogenous(SARIMAXCoverageTest):
# // ARMAX
# arima wpi x, arima(3, 0, 2) noconstant vce(oim)
# save_results 25
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_arma_exogenous, cls).setup_class(24, *args, **kwargs)
class Test_seasonal_ar(SARIMAXCoverageTest):
# // SAR: (0, 0, 0) x (P, 0, 0, s)
# arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 26
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
super(Test_seasonal_ar, cls).setup_class(25, *args, **kwargs)
class Test_seasonal_ar_as_polynomial(SARIMAXCoverageTest):
# // SAR: (0, 0, 0) x (P, 0, 0, s)
# arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 26
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = ([1, 1, 1], 0, 0, 4)
super(Test_seasonal_ar_as_polynomial, cls).setup_class(
25, *args, **kwargs)
class Test_seasonal_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 27
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['trend'] = 'c'
super(Test_seasonal_ar_trend_c, cls).setup_class(26, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1]
class Test_seasonal_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 28
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ar_trend_ct, cls).setup_class(27, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_seasonal_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 29
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_seasonal_ar_trend_polynomial, cls).setup_class(
28, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_seasonal_ar_diff(SARIMAXCoverageTest):
# // SAR and I(d): (0, d, 0) x (P, 0, 0, s)
# arima wpi, arima(0, 2, 0) sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 30
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
super(Test_seasonal_ar_diff, cls).setup_class(29, *args, **kwargs)
class Test_seasonal_ar_seasonal_diff(SARIMAXCoverageTest):
# // SAR and I(D): (0, 0, 0) x (P, D, 0, s)
# arima wpi, sarima(3, 2, 0, 4) noconstant vce(oim)
# save_results 31
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 2, 0, 4)
super(Test_seasonal_ar_seasonal_diff, cls).setup_class(
30, *args, **kwargs)
class Test_seasonal_ar_diffuse(SARIMAXCoverageTest):
# // SAR and diffuse initialization
# arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim) diffuse
# save_results 32
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ar_diffuse, cls).setup_class(31, *args, **kwargs)
class Test_seasonal_ar_exogenous(SARIMAXCoverageTest):
# // SARX
# arima wpi x, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 33
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ar_exogenous, cls).setup_class(32, *args, **kwargs)
class Test_seasonal_ma(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 34
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
super(Test_seasonal_ma, cls).setup_class(33, *args, **kwargs)
class Test_seasonal_ma_as_polynomial(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 34
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, [1, 1, 1], 4)
super(Test_seasonal_ma_as_polynomial, cls).setup_class(
33, *args, **kwargs)
class Test_seasonal_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 35
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['trend'] = 'c'
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_c, cls).setup_class(34, *args, **kwargs)
class Test_seasonal_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 36
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ma_trend_ct, cls).setup_class(35, *args, **kwargs)
class Test_seasonal_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 37
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['trend'] = [1, 0, 0, 1]
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_polynomial, cls).setup_class(
36, *args, **kwargs)
class Test_seasonal_ma_diff(SARIMAXCoverageTest):
# // SMA and I(d): (0, d, 0) x (0, 0, Q, s)
# arima wpi, arima(0, 2, 0) sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 38
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
super(Test_seasonal_ma_diff, cls).setup_class(37, *args, **kwargs)
class Test_seasonal_ma_seasonal_diff(SARIMAXCoverageTest):
# // SMA and I(D): (0, 0, 0) x (0, D, Q, s)
# arima wpi, sarima(0, 2, 3, 4) noconstant vce(oim)
# save_results 39
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 2, 3, 4)
super(Test_seasonal_ma_seasonal_diff, cls).setup_class(
38, *args, **kwargs)
class Test_seasonal_ma_diffuse(SARIMAXCoverageTest):
# // SMA and diffuse initialization
# arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim) diffuse
# save_results 40
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ma_diffuse, cls).setup_class(39, *args, **kwargs)
class Test_seasonal_ma_exogenous(SARIMAXCoverageTest):
# // SMAX
# arima wpi x, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 41
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ma_exogenous, cls).setup_class(40, *args, **kwargs)
class Test_seasonal_arma(SARIMAXCoverageTest):
# // SARMA: (0, 0, 0) x (P, 0, Q, s)
# arima wpi, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 42
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
super(Test_seasonal_arma, cls).setup_class(41, *args, **kwargs)
class Test_seasonal_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 43
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['trend'] = 'c'
super(Test_seasonal_arma_trend_c, cls).setup_class(42, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1]
class Test_seasonal_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 44
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['trend'] = 'ct'
super(Test_seasonal_arma_trend_ct, cls).setup_class(
43, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_seasonal_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 45
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['trend'] = [1, 0, 0, 1]
kwargs['decimal'] = 3
super(Test_seasonal_arma_trend_polynomial, cls).setup_class(
44, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# Make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_approx
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diff(SARIMAXCoverageTest):
# // SARMA and I(d): (0, d, 0) x (P, 0, Q, s)
# arima wpi, arima(0, 2, 0) sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 46
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
super(Test_seasonal_arma_diff, cls).setup_class(45, *args, **kwargs)
class Test_seasonal_arma_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(D): (0, 0, 0) x (P, D, Q, s)
# arima wpi, sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 47
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 2, 2, 4)
super(Test_seasonal_arma_seasonal_diff, cls).setup_class(
46, *args, **kwargs)
class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(d) and I(D): (0, d, 0) x (P, D, Q, s)
# arima wpi, arima(0, 2, 0) sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 48
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (3, 2, 2, 4)
super(Test_seasonal_arma_diff_seasonal_diff, cls).setup_class(
47, *args, **kwargs)
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# Make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_approx
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diffuse(SARIMAXCoverageTest):
# // SARMA and diffuse initialization
# arima wpi, sarima(3, 0, 2, 4) noconstant vce(oim) diffuse
# save_results 49
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['decimal'] = 3
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_arma_diffuse, cls).setup_class(48, *args, **kwargs)
class Test_seasonal_arma_exogenous(SARIMAXCoverageTest):
# // SARMAX
# arima wpi x, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 50
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_arma_exogenous, cls).setup_class(
49, *args, **kwargs)
class Test_sarimax_exogenous(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 51
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (3, 2, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_sarimax_exogenous, cls).setup_class(50, *args, **kwargs)
def test_results_params(self):
result = self.model.filter(self.true_params)
assert_allclose(self.true_params[1:4], result.arparams)
assert_allclose(self.true_params[4:6], result.maparams)
assert_allclose(self.true_params[6:9], result.seasonalarparams)
assert_allclose(self.true_params[9:11], result.seasonalmaparams)
class Test_sarimax_exogenous_not_hamilton(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 51
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (3, 2, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['hamilton_representation'] = False
kwargs['simple_differencing'] = False
super(Test_sarimax_exogenous_not_hamilton, cls).setup_class(
50, *args, **kwargs)
class Test_sarimax_exogenous_diffuse(SARIMAXCoverageTest):
# // SARIMAX and exogenous diffuse
# arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim)
# diffuse
# save_results 52
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (3, 2, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['decimal'] = 2
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_sarimax_exogenous_diffuse, cls).setup_class(
51, *args, **kwargs)
class Test_arma_exog_trend_polynomial_missing(SARIMAXCoverageTest):
# // ARMA and exogenous and trend polynomial and missing
# gen wpi2 = wpi
# replace wpi2 = . in 10/19
# arima wpi2 x c t3, arima(3, 0, 2) noconstant vce(oim)
# save_results 53
@classmethod
def setup_class(cls, *args, **kwargs):
endog = np.r_[results_sarimax.wpi1_data]
# Note we're using the non-missing exog data
kwargs['exog'] = ((endog - np.floor(endog))**2)[1:]
endog[9:19] = np.nan
endog = endog[1:] - endog[:-1]
endog[9] = np.nan
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = [0, 0, 0, 1]
kwargs['decimal'] = 1
super(Test_arma_exog_trend_polynomial_missing, cls).setup_class(
52, endog=endog, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[0] = (1 - tps[2:5].sum()) * tps[0]
# Miscellaneous coverage tests
def test_simple_time_varying():
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
mod = sarimax.SARIMAX(
endog,
exog=exog,
order=(0, 0, 0),
time_varying_regression=True,
mle_regression=False)
# Ignore the warning that MLE does not converge
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
# Test that the estimated variances of the errors are essentially zero
# 5 digits necessary to accommodate 32-bit numpy/scipy with OpenBLAS 0.2.18
assert_almost_equal(res.params, [0, 0], 5)
# Test that the time-varying coefficients are all 0.5 (except the first
# one)
assert_almost_equal(res.filter_results.filtered_state[0][1:], [0.5]*99, 9)
def test_invalid_time_varying():
assert_raises(
ValueError,
sarimax.SARIMAX,
endog=[1, 2, 3],
mle_regression=True,
time_varying_regression=True)
def test_manual_stationary_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3, 0, 0))
res1 = mod1.filter([0.5, 0.2, 0.1, 1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3, 0, 0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
res2 = mod2.filter([0.5, 0.2, 0.1, 1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(
endog, order=(3, 0, 0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5, 0.2, 0.1, 1])
# Create the forth model with stationary initialization specified in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3, 0, 0), initialization='stationary')
res4 = mod4.filter([0.5, 0.2, 0.1, 1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_manual_approximate_diffuse_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3, 0, 0))
mod1.ssm.initialize_approximate_diffuse(1e9)
res1 = mod1.filter([0.5, 0.2, 0.1, 1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3, 0, 0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
res2 = mod2.filter([0.5, 0.2, 0.1, 1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(
endog, order=(3, 0, 0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5, 0.2, 0.1, 1])
# Create the forth model with approximate diffuse initialization specified
# in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3, 0, 0),
initialization='approximate_diffuse',
initial_variance=1e9)
res4 = mod4.filter([0.5, 0.2, 0.1, 1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_results():
endog = results_sarimax.wpi1_data
mod = sarimax.SARIMAX(endog, order=(1, 0, 1))
res = mod.filter([0.5, -0.5, 1], cov_type='oim')
assert_almost_equal(res.arroots, 2.)
assert_almost_equal(res.maroots, 2.)
assert_almost_equal(res.arfreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.mafreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.arparams, [0.5])
assert_almost_equal(res.maparams, [-0.5])
def test_misc_exog():
# Tests for missing data
nobs = 20
k_endog = 1
np.random.seed(1208)
endog = np.random.normal(size=(nobs, k_endog))
endog[:4, 0] = np.nan
exog1 = np.random.normal(size=(nobs, 1))
exog2 = np.random.normal(size=(nobs, 2))
index = | pd.date_range('1970-01-01', freq='QS', periods=nobs) | pandas.date_range |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_apply1(self):
def test_impl(S):
return S.apply(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_abs1(self):
def test_impl(S):
return S.abs()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, -2., 3., 0.5E-01, 0xFF, 0o7, 0b101])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_cov1(self):
def test_impl(S1, S2):
return S1.cov(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_corr1(self):
def test_impl(S1, S2):
return S1.corr(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_str_len1(self):
def test_impl(S):
return S.str.len()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'abc', 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str2str(self):
str2str_methods = ('capitalize', 'lower', 'lstrip', 'rstrip',
'strip', 'swapcase', 'title', 'upper')
for method in str2str_methods:
func_text = "def test_impl(S):\n"
func_text += " return S.str.{}()\n".format(method)
test_impl = _make_func_from_text(func_text)
hpat_func = hpat.jit(test_impl)
S = pd.Series([' \tbbCD\t ', 'ABC', ' mCDm\t', 'abc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_append1(self):
def test_impl(S, other):
return S.append(other).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
# Test single series
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_append2(self):
def test_impl(S1, S2, S3):
return S1.append([S2, S3]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
S3 = pd.Series([1.0])
# Test series tuple
np.testing.assert_array_equal(hpat_func(S1, S2, S3),
test_impl(S1, S2, S3))
def test_series_isin_list1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = [1, 2, 5, 7, 8]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = [1., 2., 5., 7., 8.]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'q', 'w', 'c', 'd', 'e', 'r'])
values = ['a', 'q', 'c', 'd', 'e']
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = {1, 2, 5, 7, 8}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = {1., 2., 5., 7., 8.}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
@unittest.skip('TODO: requires hashable unicode strings in Numba')
def test_series_isin_set3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'c', 'd', 'e'] * 2)
values = {'b', 'c', 'e'}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3., np.inf])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull1(self):
def test_impl(S):
return S.isnull()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull_full(self):
def test_impl(series):
return series.isnull()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_numeric + [test_global_input_data_unicode_kind4]:
series = pd.Series(data * 3)
ref_result = test_impl(series)
jit_result = hpat_func(series)
pd.testing.assert_series_equal(ref_result, jit_result)
def test_series_notna1(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_notna_noidx_float(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_int(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_num(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_notna_noidx_str(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_str_notna(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different')
def test_series_dt_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_nlargest1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_default1(self):
def test_impl(S):
return S.nlargest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_nan1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_str(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_int(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=[2, 3, 4, 5, 6])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_default1(self):
def test_impl(S):
return S.nsmallest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_nan1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_str(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_int(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=[1, 2, 3, 4, 5])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_head1(self):
def test_impl(S):
return S.head(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_default1(self):
'''Verifies default head method for non-distributed pass of Series with no index'''
def test_impl(S):
return S.head()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_index1(self):
'''Verifies head method for Series with integer index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index2(self):
'''Verifies head method for Series with string index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index3(self):
'''Verifies head method for non-distributed pass of Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip("Passed if run single")
def test_series_head_index4(self):
'''Verifies head method for non-distributed pass of Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 4, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_parallel1(self):
'''Verifies head method for distributed Series with string data and no index'''
def test_impl(S):
return S.head(7)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
# need to test different lenghts, as head's size is fixed and implementation
# depends on relation of size of the data per processor to output data size
for n in range(1, 5):
S = pd.Series(['a', 'ab', 'abc', 'c', 'f', 'hh', ''] * n)
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_index_parallel1(self):
'''Verifies head method for distributed Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip("Passed if run single")
def test_series_head_index_parallel2(self):
'''Verifies head method for distributed Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_noidx_float(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_int(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_num(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Old implementation not work with n negative and data str")
def test_series_head_noidx_str(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Broke another three tests")
def test_series_head_idx(self):
def test_impl(S):
return S.head()
def test_impl_param(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
['as', 'b', 'abb', 'sss', 'ytr65', '', 'qw', 'a', 'b'],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
pd.testing.assert_series_equal(result, result_ref)
hpat_func_param1 = hpat.jit(test_impl_param)
for param1 in [1, 3, 7]:
result_param1_ref = test_impl_param(S, param1)
result_param1 = hpat_func_param1(S, param1)
pd.testing.assert_series_equal(result_param1, result_param1_ref)
def test_series_median1(self):
'''Verifies median implementation for float and integer series of random data'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
# odd size
m = 101
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"BUG: old-style median implementation doesn't filter NaNs")
def test_series_median_skipna_default1(self):
'''Verifies median implementation with default skipna=True argument on a series with NA values'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
S = | pd.Series([2., 3., 5., np.nan, 5., 6., 7.]) | pandas.Series |
import pandas as pd
import numpy as np
from anndata import AnnData
import gc
data_folder = '../data/Kinker_Kim/'
def read_data(return_barcodes=False):
X_combined = pd.read_pickle(
'%s/combined_count.pkl'%(data_folder),
compression='gzip'
)
X_combined.index.names = ['barcode', 'sample'] + X_combined.index.names[2:]
combined_annot_df = X_combined.reset_index()[X_combined.index.names]
gene_names = np.array(X_combined.columns).astype(str)
X_source = X_combined.loc[(slice(None), slice(None), slice(None), 'CELL_LINE')]
X_target = X_combined.loc[(slice(None), slice(None), slice(None), 'TUMOR')]
if X_source.index.nlevels == 4:
X_source.index = X_source.index.droplevel(3)
if X_target.index.nlevels == 4:
X_target.index = X_target.index.droplevel(3)
assert X_source.shape[1] == X_combined.shape[1]
assert X_target.shape[1] == X_combined.shape[1]
assert X_source.shape[0] + X_target.shape[0] == X_combined.shape[0]
X_source = AnnData(
X_source.values,
obs=pd.DataFrame(np.array([np.array(e) for e in X_source.index]),
columns=['UMI', 'sample', 'pool']),
var=pd.DataFrame(X_source.columns)
)
X_target = AnnData(
X_target.values,
obs=pd.DataFrame(np.array([np.array(e) for e in X_target.index]),
columns=['UMI', 'sample', 'pool']),
var= | pd.DataFrame(X_target.columns) | pandas.DataFrame |
import glob
import datetime
import os
import pandas as pd
import numpy as np
import re
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
# pyinstaller --onefile --noconsole --icon GetCSV.ico Arca_GetCSVConverter_2-0-0.py
#for MMW 18-6 spreadsheets
probCol = False
#infer desktop
desktopPath = os.path.expanduser("~/Desktop/")
filelist=['']
probRecords = []
probColls = []
#filename = r'arms_modsonly_May9.csv'
col_names = ["IslandoraContentModel","BCRDHSimpleObjectPID",'imageLink','filename','directory','childKey','title', 'alternativeTitle', 'creator1', 'creator2','creator3']
col_names += ['corporateCreator1','corporateCreator2','contributor1','contributor2','corporateContributor1','publisher_original','publisher_location']
col_names += ['dateCreated','description','extent','topicalSubject1','topicalSubject2','topicalSubject3','topicalSubject4','topicalSubject5']
col_names += ['geographicSubject1','coordinates','personalSubject1','personalSubject2','corporateSubject1','corporateSubject2', 'dateIssued_start']
col_names += ['dateIssued_end','dateRange', 'frequency','genre','genreAuthority','type','internetMediaType','language1','language2','notes']
col_names += ['accessIdentifier','localIdentifier','ISBN','classification','URI']
col_names += ['source','rights','creativeCommons_URI','rightsStatement_URI','relatedItem_title','relatedItem_PID','recordCreationDate','recordOrigin']
pattern1 = r'^[A-Z][a-z]{2}-\d{2}$' #%b-%Y date (e.g. Jun-17)
pattern2 = r'^\d{2}-\d{2}-[1-2]\d{3}$'
contentModels = {
r"info:fedora/islandora:sp_large_image_cmodel": "Large Image",
r"info:fedora/islandora:sp_basic_image": "Basic Image",
r"info:fedora/islandora:bookCModel": "Book",
r"info:fedora/islandora:newspaperIssueCModel":"Newspaper - issue",
r"info:fedora/islandora:newspaperPageCModel":"Newspaper",
r"info:fedora/islandora:sp_PDF":"PDF",
r"info:fedora/islandora:sp-audioCModel":"Audio",
r"info:fedora/islandora:sp_videoCModel":"Video",
r"info:fedora/islandora:sp_compoundCModel":"Compound",
r"info:fedora/ir:citationCModel":"Citation"
}
def browse_button():
# Allow user to select a directory and store it in global var
# called folder_path1
lbl1['text'] = ""
csvname = filedialog.askopenfilename(initialdir = desktopPath,title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
if ".csv" not in csvname:
lbl1['text'] = "**Please choose a file with a .csv extension!"
else:
filelist[0] = csvname
lbl1['text'] = csvname
def splitMultiHdgs(hdgs):
if pd.notna(hdgs):
hdgs = hdgs.replace("\\,",";")
hdgs = hdgs.split(",")
newhdgs = []
for hdg in hdgs:
newhdg = hdg.replace(";", ",")
newhdgs.append(newhdg)
return newhdgs
else:
return None
def getMultiVals(item, string, df, pd):
hdgs = df.filter(like=string).columns
for hdg in hdgs:
vals = df.at[item.Index,hdg]
if pd.notna(vals):
vals = splitMultiHdgs(vals)
return vals
return None
def convert_date(dt_str, letter_date):
"""
Converts an invalid formatted date into a proper date for ARCA Mods
Correct format: Y-m-d
Fixes:
Incorrect format: m-d-Y
Incorrect format (letter date): m-d e.g. Jun-17
:param dt_str: the date string
:param letter_date: whether the string is a letter date. Letter date is something like Jun-17
:return: the correctly formatted date
"""
if letter_date:
rev_date = datetime.datetime.strptime(dt_str, '%b-%y').strftime('%Y-%m') # convert date to yymm string format
rev_date_pts = rev_date.split("-")
year_num = int(rev_date_pts[0])
if year_num > 1999:
year_num = year_num - 100
year_str = str(year_num)
rev_date_pts[0] = year_str
revised = "-".join(rev_date_pts)
else:
revised = datetime.datetime.strptime(dt_str, '%d-%m-%Y').strftime(
'%Y-%m-%d') # convert date to YY-mm string format
return revised
def sortValues(lst):
for item in lst:
if pd.isna(item):
lst.remove(item)
lst = set(lst)
lst = list(lst)
return lst
def dropNullCols(df):
nullcols = []
for col in df.columns:
notNull = df[col].notna().sum()
if notNull < 1:
nullcols.append(col)
return nullcols
def convert():
probCol = False
df2 = pd.DataFrame(columns = col_names)
df2.append(pd.Series(), ignore_index=True)
f=filelist[0]
# if not os.path.exists(savePath): #if folder does not exist
# os.makedirs(savePath)
try:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_7')
except UnicodeDecodeError:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_8')
nullcols = dropNullCols(df)
df.drop(nullcols, axis=1, inplace=True)
i = 1
for item in df.itertuples():
#PID
df2.at[i, 'BCRDHSimpleObjectPID'] = item.PID
if 'mods_subject_name_personal_namePart_ms' in df.columns:
pNames = item.mods_subject_name_personal_namePart_ms
#ContentModel
cModel = item.RELS_EXT_hasModel_uri_s
df2.at[i,"IslandoraContentModel"] =contentModels[cModel]
#Local Identifier
if 'mods_identifier_local_ms' in df.columns:
localID = item.mods_identifier_local_ms
if pd.notna(localID) and localID != "None":
df2.at[i,'localIdentifier'] = localID
#Access Identifer
if 'mods_identifier_access_ms' in df.columns:
accessID = item.mods_identifier_access_ms
if pd.notna(accessID):
df2.at[i,'accessIdentifier'] = accessID
#Image Link
# Link to Image
PIDparts = item.PID.split(":")
repo = PIDparts[0] #repository code
num = PIDparts[1] #auto-generated accession number
imageLink = "https://bcrdh.ca/islandora/object/" + repo + "%3A" + num
df2.at[i, 'imageLink'] = imageLink
#Title
if 'mods_titleInfo_title_ms' in df.columns:
title = item.mods_titleInfo_title_ms
if pd.notna(title):
df2.at[i,'title'] = title.replace("\,",",")
#Alternative Title
if "mods_titleInfo_alternative_title_ms" in df.columns:
altTitle = item.mods_titleInfo_alternative_title_ms
if pd.notna(altTitle):
df2.at[i, 'alternativeTitle'] = altTitle.replace("\,",",")
#Date
if "mods_originInfo_dateIssued_ms" in df.columns:
dt = item.mods_originInfo_dateIssued_ms
if pd.notna(dt):
if (re.match(pattern1, dt)): #letter date, i.e. Jun-17
dt = convert_date(dt, True)
elif (re.match(pattern2, dt)): #reverse date
dt = convert_date(dt, False)
df2.at[i,'dateCreated'] = dt
#Date Issued Start
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms' in df.columns:
startDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms
if | pd.notna(startDt) | pandas.notna |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# Copyright StateOfTheArt.quant.
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scipy.stats import rankdata
import torch
import numpy as np
import pandas as pd
from featurizer.functions.algebra_statistic import weighted_average, weighted_std, downside_std, upside_std
import pdb
# https://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy
def rolling_sum(tensor, window=1, dim=0):
ret = torch.cumsum(tensor, dim=dim)
ret[window:] = ret[window:] - ret[:-window]
ret[:window-1]= float("nan")
return ret
def rolling_sum_(tensor, window=1, dim=0):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).sum()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_sum3d(tensor, window=1, dim=1):
ret = torch.cumsum(tensor, dim=dim)
ret[:,window:] = ret[:,window:] - ret[:,:-window]
ret[:,:window-1]= float("nan")
return ret
def rolling_mean(tensor, window=1):
#to-do fixme
#ret = torch.cumsum(tensor, dim=0)
#ret[window:] = ret[window:] - ret[:-window]
#ret[:window-1]= float("nan")
#output = ret/window
return rolling_mean_(tensor=tensor, window=window)
def rolling_mean_(tensor, window=1):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).mean()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_weighted_mean(tensor, window=1, halflife=90):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).apply(lambda x: weighted_average(x,halflife=halflife))
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
# https://stackoverflow.com/questions/30399534/shift-elements-in-a-numpy-array
def shift(tensor, window=1):
if window == 0:
return tensor
e = torch.empty_like(tensor, dtype=tensor.dtype, device=tensor.device)
if window > 0:
e[:window] = float("nan")
e[window:] = tensor[:-window]
else:
e[window:] = float("nan")
e[:window] = tensor[-window:]
return e
def diff(tensor, period=1):
shiftd_tensor = shift(tensor, window=period)
diff = tensor - shiftd_tensor
return diff
def pct_change(tensor, period=1):
shiftd_tensor = shift(tensor, window=period)
diff = tensor - shiftd_tensor
output = diff.div(shiftd_tensor)
return output
#https://stackoverflow.com/questions/54564253/how-to-calculate-the-cumulative-product-of-a-rolling-window-in-pandas
def rolling_prod(tensor, window):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).apply(np.prod)
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_var(tensor, window):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).var()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_std(tensor, window):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).std()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_std_dof_0(tensor, window): # dof: degree of freedom
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).std(ddof=0)
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_weighted_std(tensor, window, halflife=90):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).apply(lambda x: weighted_std(x, halflife=halflife))
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_downside_std(tensor, tensor_benchmark, window):
diff_ts = tensor - tensor_benchmark
diff_np = diff_ts.cpu().detach().numpy()
diff_df = | pd.DataFrame(diff_np) | pandas.DataFrame |
"""
Turn text files to dataframes for calculations
"""
import pandas as pd
import numpy as np
from scipy import stats
import scipy.stats
from scipy.stats import ttest_ind
from scipy.stats import ttest_ind_from_stats
dfmutated = pd.read_csv('/home/ec2-user/environment/Mutated.txt', delimiter = "\t")
dfnonmutated = pd.read_csv('/home/ec2-user/environment/Nonmutated.txt', delimiter = "\t")
dfcombined = pd.concat([dfmutated, dfnonmutated], axis=1)
#isolate dataframes for comparison from 1 sample
#replace all 0 values with 1 to prevent errors from using the logarithm function
#sample/group 1
dfreads1 = dfmutated.read_count
dfreads1 = dfreads1.replace(0,1)
dfrpm1 = dfmutated.reads_per_million_miRNA_mapped
dfrpm1 = dfrpm1.replace(0,1)
#sample/group 2
dfreads2 = dfnonmutated.read_count
dfreads2 = dfreads2.replace(0,1)
dfrpm2 = dfnonmutated.reads_per_million_miRNA_mapped
dfrpm2 = dfrpm2.replace(0,1)
#compare group 1 (treatment) to group 2 (control)
#create new columns for the difference in expression between samples
dfcombined["read_count_difference"] = dfreads1.subtract(dfreads2)
dfcombined["reads_per_million_difference"] = dfrpm1.subtract(dfrpm2)
#create new columns for log 2-fold change between sample/group 1 and 2
dfcombined['log2fc_reads'] = np.log2(dfreads1) - np.log2(dfreads2)
dfcombined['log2fc_rpm'] = np.log2(dfrpm1) - np.log2(dfrpm2)
#T-test to calulate p-values
#set sample sizes for groups 1 and 2
n1 = 3
n2 = 3
df_mreads = pd.merge(dfreads1, dfreads2, left_index=True, right_index=True)
df_mrpm = | pd.merge(dfrpm1, dfrpm2, left_index=True, right_index=True) | pandas.merge |
import os
import csv
import pandas as pd
from datetime import datetime
def run_Census(args):
# Get tables and keys
age_dist = pd.read_csv("../US_Census/raw/data/cbg_b01.csv")
cbg_fips_codes = pd.read_csv("../US_Census/raw/metadata/cbg_fips_codes.csv")
cbg_key = pd.read_csv("../US_Census/raw/metadata/cbg_field_descriptions.csv")
# Process raw
drop_cols = []
for col in age_dist.columns:
if ("1001" not in col) and ("census_block_group" != col):
drop_cols.append(col)
age_dist = age_dist.drop(columns=drop_cols)
# Splitting Error from Estimate
error_data = age_dist
error_cols =[]
estimate_cols = []
for col in age_dist.columns:
if "census_block_group" != col:
if "m" in col:
error_cols.append(col)
else:
estimate_cols.append(col)
error_data = error_data.drop(columns=estimate_cols)
age_dist = age_dist.drop(columns=error_cols)
# Renaming Columns
age_dist = age_dist.rename(columns=dict(zip(cbg_key["table_id"],cbg_key["field_full_name"] )))
error_data = error_data.rename(columns=dict(zip(cbg_key["table_id"],cbg_key["field_full_name"] )))
# Assigning State to age_dist
for index, row in age_dist.iterrows():
state_code = 0
if len(str(row["census_block_group"])) == 11:
state_code = int(str(row["census_block_group"])[0])
elif len(str(row["census_block_group"])) == 12:
state_code = int(str(row["census_block_group"])[0:2])
else:
raise Exception("FIPS unexpected size")
age_dist.at[index, "census_block_group"] = state_code
# Aggregating data by state
state_codes = age_dist["census_block_group"].drop_duplicates().tolist()
agg_data = []
for code in state_codes:
state_data = {}
for col in age_dist.columns:
if col == "census_block_group":
continue
else:
sum_val = age_dist.loc[age_dist["census_block_group"] == code, col].sum()
new_col = (col.replace("SEX BY AGE: ", "")).replace(": Total population -- (Estimate)", "")
state_data[new_col] = sum_val
state_data["state_fips"] = code
agg_data.append(state_data)
agg_df = | pd.DataFrame(agg_data) | pandas.DataFrame |
"""
using output ingest parquets update <dataset_id>_tables.parquet with context_from_text column
"""
import functools
import os
import glob
import dask
from dask.distributed import progress
import pandas as pd
from tqdm import tqdm
from typing import Tuple
import re
import logging
logging.basicConfig(format='%(levelname)s :: %(filename) :: %(funcName)s :: %(asctime)s :: %(message)s',
level=logging.ERROR)
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
def context_enrichment(file_path: str, dataset_id: str, pp_threshold: float, d_threshold: float,
spans: int, client: dask.distributed.Client, use_qa_table_enrichment: bool = False):
"""
add rows to dataset_id_tables.parquet
:param file_path: a directory full of parquets (ingest output) to process
:param dataset_id: ingest process dataset_id
:param pp_threshold: float cut off for postprocess_score to process as table caption
:param d_threshold: float cut off for detect_score to process as table caption
:param spans: number of words each side of label to pull in as context for each table label in content text
if None will use regex to pull out full stop to full stop span around the table label
:param client: dask distributed client to pass jobs to
:param use_qa_table_enrichment: if qa process should run in contexts
"""
dataset_id_df_path = ''
tables_df_path = ''
for pq in glob.glob(os.path.join(file_path, '*.parquet')):
if os.path.basename(pq) == dataset_id + '.parquet':
dataset_id_df_path = pq
if os.path.basename(pq) == dataset_id + '_tables.parquet':
tables_df_path = pq
logger.info(f'getting all pdfs with tables: {tables_df_path}')
table_file_name = os.path.basename(tables_df_path).split('.')[0]
tables_df = | pd.read_parquet(tables_df_path) | pandas.read_parquet |
"""
SparseArray data structure
"""
from __future__ import division
import numbers
import operator
import re
from typing import Any, Callable, Union
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
pandas_dtype)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
import pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.dtypes.common import (
pandas_dtype, is_string_dtype, is_scalar
)
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError("fill_value must be a scalar. Got {} "
"instead".format(fill_value))
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super(SparseDtype, self).__hash__()
def __eq__(self, other):
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, compat.string_types):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value and
isinstance(self.fill_value, type(other.fill_value)) or
isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
from pandas.core.dtypes.missing import isna
return isna(self.fill_value)
@property
def _is_numeric(self):
from pandas.core.dtypes.common import is_object_dtype
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self):
from pandas.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return 'Sparse[{}, {}]'.format(self.subtype.name, self.fill_value)
def __repr__(self):
return self.name
@classmethod
def construct_array_type(cls):
return SparseArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".format(string)
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
result = SparseDtype(sub_type)
except Exception:
raise TypeError(msg)
else:
msg = ("Could not construct SparseDtype from '{}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead.")
if has_fill_value and str(result) != string:
raise TypeError(msg.format(string))
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype):
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(
r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$"
)
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()['subtype']
has_fill_value = m.groupdict()['fill_value'] or has_fill_value
elif dtype == "Sparse":
subtype = 'float64'
else:
raise ValueError("Cannot parse {}".format(dtype))
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype):
dtype = getattr(dtype, 'dtype', dtype)
if (isinstance(dtype, compat.string_types) and
dtype.startswith("Sparse")):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == 'Sparse'
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
fill_value = astype_nansafe(np.array(self.fill_value),
dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
str
"""
if isinstance(self.fill_value, compat.string_types):
return type(self.fill_value)
return self.subtype
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass='SparseArray')
def _get_fill(arr):
# type: (SparseArray) -> np.ndarray
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name):
# type: (SparseArray, SparseArray, Callable, str) -> Any
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Callable
The binary operation to perform
name str
Name of the callable.
Returns
-------
SparseArray
"""
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not is_dtype_equal(ltype, rtype):
subtype = find_common_type([ltype, rtype])
ltype = SparseDtype(subtype, left.fill_value)
rtype = SparseDtype(subtype, right.fill_value)
# TODO(GH-23092): pass copy=False. Need to fix astype_nansafe
left = left.astype(ltype)
right = right.astype(rtype)
dtype = ltype.subtype
else:
dtype = ltype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all='ignore'):
result = op(left.get_values(), right.get_values())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.format(name=name)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all='ignore'):
result, index, fill = sparse_op(
left_sp_values, left.sp_index, left.fill_value,
right_sp_values, right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith('__'):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype)
class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
"""
An ExtensionArray for storing sparse data.
.. versionchanged:: 0.24.0
Implements the ExtensionArray interface.
Parameters
----------
data : array-like
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
index : Index
fill_value : scalar, optional
Elements in `data` that are `fill_value` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The fill value is potentiall specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_length` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped together, with large
regsions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
determines the dtype of ``self.sp_values``. For SparseDtype,
this determines ``self.sp_values`` and ``self.fill_value``.
copy : bool, default False
Whether to explicitly copy the incoming `data` array.
"""
__array_priority__ = 15
_pandas_ftype = 'sparse'
_subtyp = 'sparse_array' # register ABCSparseArray
def __init__(self, data, sparse_index=None, index=None, fill_value=None,
kind='integer', dtype=None, copy=False):
from pandas.core.internals import SingleBlockManager
if isinstance(data, SingleBlockManager):
data = data.internal_values()
if fill_value is None and isinstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if isinstance(data, (type(self), ABCSparseSeries)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if isinstance(dtype, compat.string_types):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = pandas_dtype(dtype)
if isinstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if index is not None and not is_scalar(data):
raise Exception("must only pass scalars with an index ")
if is_scalar(data):
if index is not None:
if data is None:
data = np.nan
if index is not None:
npoints = len(index)
elif sparse_index is None:
npoints = 1
else:
npoints = sparse_index.length
dtype = infer_dtype_from_scalar(data)[0]
data = construct_1d_arraylike_from_scalar(
data, npoints, dtype
)
if dtype is not None:
dtype = pandas_dtype(dtype)
# TODO: disentangle the fill_value dtype inference from
# dtype inference
if data is None:
# XXX: What should the empty dtype be? Object or float?
data = np.array([], dtype=dtype)
if not is_array_like(data):
try:
# probably shared code in sanitize_series
from pandas.core.internals.construction import sanitize_array
data = sanitize_array(data, index=None)
except ValueError:
# NumPy may raise a ValueError on data like [1, []]
# we retry with object dtype here.
if dtype is None:
dtype = object
data = np.atleast_1d(np.asarray(data, dtype=dtype))
else:
raise
if copy:
# TODO: avoid double copy when dtype forces cast.
data = data.copy()
if fill_value is None:
fill_value_dtype = data.dtype if dtype is None else dtype
if fill_value_dtype is None:
fill_value = np.nan
else:
fill_value = na_value_for_dtype(fill_value_dtype)
if isinstance(data, type(self)) and sparse_index is None:
sparse_index = data._sparse_index
sparse_values = np.asarray(data.sp_values, dtype=dtype)
elif sparse_index is None:
sparse_values, sparse_index, fill_value = make_sparse(
data, kind=kind, fill_value=fill_value, dtype=dtype
)
else:
sparse_values = np.asarray(data, dtype=dtype)
if len(sparse_values) != sparse_index.npoints:
raise AssertionError("Non array-like type {type} must "
"have the same length as the index"
.format(type=type(sparse_values)))
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
@classmethod
def _simple_new(cls, sparse_array, sparse_index, dtype):
# type: (np.ndarray, SparseIndex, SparseDtype) -> 'SparseArray'
new = cls([])
new._sparse_index = sparse_index
new._sparse_values = sparse_array
new._dtype = dtype
return new
def __array__(self, dtype=None, copy=True):
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
# Compat for na dtype and int values.
return self.sp_values
if dtype is None:
# Can NumPy represent this type?
# If not, `np.result_type` will raise. We catch that
# and return object.
if is_datetime64_any_dtype(self.sp_values.dtype):
# However, we *do* special-case the common case of
# a datetime64 with pandas NaT.
if fill_value is NaT:
# Can't put pd.NaT in a datetime64[ns]
fill_value = np.datetime64('NaT')
try:
dtype = np.result_type(self.sp_values.dtype, type(fill_value))
except TypeError:
dtype = object
out = np.full(self.shape, fill_value, dtype=dtype)
out[self.sp_index.to_int_index().indices] = self.sp_values
return out
def __setitem__(self, key, value):
# I suppose we could allow setting of non-fill_value elements.
# TODO(SparseArray.__setitem__): remove special cases in
# ExtensionBlock.where
msg = "SparseArray does not support item assignment via setitem"
raise TypeError(msg)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
# ------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------
@property
def sp_index(self):
"""
The SparseIndex containing the location of non- ``fill_value`` points.
"""
return self._sparse_index
@property
def sp_values(self):
"""
An ndarray containing the non- ``fill_value`` values.
Examples
--------
>>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
>>> s.sp_values
array([1, 2])
"""
return self._sparse_values
@property
def dtype(self):
return self._dtype
@property
def fill_value(self):
"""
Elements in `data` that are `fill_value` are not stored.
For memory savings, this should be the most common value in the array.
"""
return self.dtype.fill_value
@fill_value.setter
def fill_value(self, value):
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
def kind(self):
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
if isinstance(self.sp_index, IntIndex):
return 'integer'
else:
return 'block'
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
def __len__(self):
return self.sp_index.length
@property
def _null_fill_value(self):
return self._dtype._is_na_fill_value
def _fill_value_matches(self, fill_value):
if self._null_fill_value:
return isna(fill_value)
else:
return self.fill_value == fill_value
@property
def nbytes(self):
return self.sp_values.nbytes + self.sp_index.nbytes
@property
def density(self):
"""
The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.density
0.6
"""
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
@property
def npoints(self):
"""
The number of non- ``fill_value`` points.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.npoints
3
"""
return self.sp_index.npoints
@property
def values(self):
"""
Dense values
"""
return self.to_dense()
def isna(self):
from pandas import isna
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
return type(self)._simple_new(isna(self.sp_values),
self.sp_index, dtype)
def fillna(self, value=None, method=None, limit=None):
"""
Fill missing values with `value`.
Parameters
----------
value : scalar, optional
method : str, optional
.. warning::
Using 'method' will result in high memory use,
as all `fill_value` methods will be converted to
an in-memory ndarray
limit : int, optional
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if ((method is None and value is None) or
(method is not None and value is not None)):
raise ValueError("Must specify one of 'method' or 'value'.")
elif method is not None:
msg = "fillna with 'method' requires high memory usage."
warnings.warn(msg, PerformanceWarning)
filled = interpolate_2d(np.asarray(self), method=method,
limit=limit)
return type(self)(filled, fill_value=self.fill_value)
else:
new_values = np.where(isna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentially just updating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
def shift(self, periods=1, fill_value=None):
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
subtype = np.result_type(fill_value, self.dtype.subtype)
if subtype != self.dtype.subtype:
# just coerce up front
arr = self.astype(SparseDtype(subtype, self.fill_value))
else:
arr = self
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=arr.dtype
)
if periods > 0:
a = empty
b = arr[:-periods]
else:
a = arr[abs(periods):]
b = empty
return arr._concat_same_type([a, b])
def _first_fill_value_loc(self):
"""
Get the location of the first missing value.
Returns
-------
int
"""
if len(self) == 0 or self.sp_index.npoints == len(self):
return -1
indices = self.sp_index.to_int_index().indices
if not len(indices) or indices[0] > 0:
return 0
diff = indices[1:] - indices[:-1]
return np.searchsorted(diff, 2) + 1
def unique(self):
uniques = list(algos.unique(self.sp_values))
fill_loc = self._first_fill_value_loc()
if fill_loc >= 0:
uniques.insert(fill_loc, self.fill_value)
return type(self)._from_sequence(uniques, dtype=self.dtype)
def _values_for_factorize(self):
# Still override this for hash_pandas_object
return np.asarray(self), self.fill_value
def factorize(self, na_sentinel=-1):
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of labels, why bother
# implementing an efficient factorize?
labels, uniques = algos.factorize(np.asarray(self),
na_sentinel=na_sentinel)
uniques = SparseArray(uniques, dtype=self.dtype)
return labels, uniques
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
from pandas import Index, Series
keys, counts = algos._value_counts_arraylike(self.sp_values,
dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and dropna:
pass
else:
if self._null_fill_value:
mask = isna(keys)
else:
mask = keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, ABCIndexClass):
keys = Index(keys)
result = Series(counts, index=keys)
return result
# --------
# Indexing
# --------
def __getitem__(self, key):
if isinstance(key, tuple):
if len(key) > 1:
raise IndexError("too many indices for array.")
key = key[0]
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
data_slice = self.values[key]
elif isinstance(key, slice):
# special case to preserve dtypes
if key == slice(None):
return self.copy()
# TODO: this logic is surely elsewhere
# TODO: this could be more efficient
indices = np.arange(len(self), dtype=np.int32)[key]
return self.take(indices)
else:
# TODO: I think we can avoid densifying when masking a
# boolean SparseArray with another. Need to look at the
# key's fill_value for True / False, and then do an intersection
# on the indicies of the sp_values.
if isinstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if com.is_bool_indexer(key) and len(self) == len(key):
return self.take(np.arange(len(key), dtype=np.int32)[key])
elif hasattr(key, '__len__'):
return self.take(key)
else:
raise ValueError("Cannot slice with '{}'".format(key))
return type(self)(data_slice, kind=self.kind)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.get_value_at(self.sp_values, sp_loc)
def take(self, indices, allow_fill=False, fill_value=None):
if is_scalar(indices):
raise ValueError("'indices' must be an array, not a "
"scalar '{}'.".format(indices))
indices = np.asarray(indices, dtype=np.int32)
if indices.size == 0:
result = []
kwargs = {'dtype': self.dtype}
elif allow_fill:
result = self._take_with_fill(indices, fill_value=fill_value)
kwargs = {}
else:
result = self._take_without_fill(indices)
kwargs = {'dtype': self.dtype}
return type(self)(result, fill_value=self.fill_value, kind=self.kind,
**kwargs)
def _take_with_fill(self, indices, fill_value=None):
if fill_value is None:
fill_value = self.dtype.na_value
if indices.min() < -1:
raise ValueError("Invalid value in 'indices'. Must be between -1 "
"and the length of the array.")
if indices.max() >= len(self):
raise IndexError("out of bounds value in 'indices'.")
if len(self) == 0:
# Empty... Allow taking only if all empty
if (indices == -1).all():
dtype = np.result_type(self.sp_values, type(fill_value))
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError('cannot do a non-empty take from an empty '
'axes.')
sp_indexer = self.sp_index.lookup_array(indices)
if self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
taken = np.full(sp_indexer.shape, fill_value=fill_value,
dtype=np.result_type(type(fill_value)))
else:
taken = self.sp_values.take(sp_indexer)
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
# Fill in two steps.
# Old fill values
# New fill values
# potentially coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.any():
result_type = np.result_type(result_type,
type(self.fill_value))
taken = taken.astype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.any():
result_type = np.result_type(result_type, type(fill_value))
taken = taken.astype(result_type)
taken[new_fill_indices] = fill_value
return taken
def _take_without_fill(self, indices):
to_shift = indices < 0
indices = indices.copy()
n = len(self)
if (indices.max() >= n) or (indices.min() < -n):
if n == 0:
raise IndexError("cannot do a non-empty take from an "
"empty axes.")
else:
raise IndexError("out of bounds value in 'indices'.")
if to_shift.any():
indices[to_shift] += n
if self.sp_index.npoints == 0:
# edge case in take...
# I think just return
out = np.full(indices.shape, self.fill_value,
dtype=np.result_type(type(self.fill_value)))
arr, sp_index, fill_value = make_sparse(out,
fill_value=self.fill_value)
return type(self)(arr, sparse_index=sp_index,
fill_value=fill_value)
sp_indexer = self.sp_index.lookup_array(indices)
taken = self.sp_values.take(sp_indexer)
fillable = (sp_indexer < 0)
if fillable.any():
# TODO: may need to coerce array to fill value
result_type = np.result_type(taken, type(self.fill_value))
taken = taken.astype(result_type)
taken[fillable] = self.fill_value
return taken
def searchsorted(self, v, side="left", sorter=None):
msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=2)
if not is_scalar(v):
v = np.asarray(v)
v = np.asarray(v)
return np.asarray(self, dtype=self.dtype.subtype).searchsorted(
v, side, sorter
)
def copy(self, deep=False):
if deep:
values = self.sp_values.copy()
else:
values = self.sp_values
return self._simple_new(values, self.sp_index, self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
fill_values = [x.fill_value for x in to_concat]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha all NA case too.
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
warnings.warn("Concatenating sparse arrays with multiple fill "
"values: '{}'. Picking the first and "
"converting the rest.".format(fill_values),
PerformanceWarning,
stacklevel=6)
keep = to_concat[0]
to_concat2 = [keep]
for arr in to_concat[1:]:
to_concat2.append(cls(np.asarray(arr), fill_value=fill_value))
to_concat = to_concat2
values = []
length = 0
if to_concat:
sp_kind = to_concat[0].kind
else:
sp_kind = 'integer'
if sp_kind == 'integer':
indices = []
for arr in to_concat:
idx = arr.sp_index.to_int_index().indices.copy()
idx += length # TODO: wraparound
length += arr.sp_index.length
values.append(arr.sp_values)
indices.append(idx)
data = np.concatenate(values)
indices = np.concatenate(indices)
sp_index = IntIndex(length, indices)
else:
# when concatentating block indices, we don't claim that you'll
# get an identical index as concating the values and then
# creating a new index. We don't want to spend the time trying
# to merge blocks across arrays in `to_concat`, so the resulting
# BlockIndex may have more blocs.
blengths = []
blocs = []
for arr in to_concat:
idx = arr.sp_index.to_block_index()
values.append(arr.sp_values)
blocs.append(idx.blocs.copy() + length)
blengths.append(idx.blengths)
length += arr.sp_index.length
data = np.concatenate(values)
blocs = np.concatenate(blocs)
blengths = np.concatenate(blengths)
sp_index = BlockIndex(length, blocs, blengths)
return cls(data, sparse_index=sp_index, fill_value=fill_value)
def astype(self, dtype=None, copy=True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
copy : bool, default True
Whether to ensure a copy is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.astype(np.dtype('int32'))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.astype(np.dtype('float64'))
... # doctest: +NORMALIZE_WHITESPACE
[0, 0, 1.0, 2.0]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Use a SparseDtype if you wish to be change the fill value as well.
>>> arr.astype(SparseDtype("float64", fill_value=np.nan))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
"""
dtype = self.dtype.update_dtype(dtype)
subtype = dtype._subtype_with_str
sp_values = astype_nansafe(self.sp_values,
subtype,
copy=copy)
if sp_values is self.sp_values and copy:
sp_values = sp_values.copy()
return self._simple_new(sp_values,
self.sp_index,
dtype)
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.SparseArray([0, 1, 2])
>>> arr.apply(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in apply.
# We get hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if isinstance(mapper, ABCSeries):
mapper = mapper.to_dict()
if isinstance(mapper, compat.Mapping):
fill_value = mapper.get(self.fill_value, self.fill_value)
sp_values = [mapper.get(x, None) for x in self.sp_values]
else:
fill_value = mapper(self.fill_value)
sp_values = [mapper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value)
def to_dense(self):
"""
Convert SparseArray to a NumPy array.
Returns
-------
arr : NumPy array
"""
return np.asarray(self, dtype=self.sp_values.dtype)
# TODO: Look into deprecating this in favor of `to_dense`.
get_values = to_dense
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
nd_state, (fill_value, sp_index) = state
sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
self._sparse_index = sp_index
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
else:
self.__dict__.update(state)
def nonzero(self):
if self.fill_value == 0:
return self.sp_index.to_int_index().indices,
else:
return self.sp_index.to_int_index().indices[self.sp_values != 0],
# ------------------------------------------------------------------------
# Reductions
# ------------------------------------------------------------------------
def _reduce(self, name, skipna=True, **kwargs):
method = getattr(self, name, None)
if method is None:
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
if skipna:
arr = self
else:
arr = self.dropna()
# we don't support these kwargs.
# They should only be present when called via pandas, so do it here.
# instead of in `any` / `all` (which will raise if they're present,
# thanks to nv.validate
kwargs.pop('filter_type', None)
kwargs.pop('numeric_only', None)
kwargs.pop('op', None)
return getattr(arr, name)(**kwargs)
def all(self, axis=None, *args, **kwargs):
"""
Tests whether all elements evaluate True
Returns
-------
all : bool
See Also
--------
numpy.all
"""
nv.validate_all(args, kwargs)
values = self.sp_values
if len(values) != len(self) and not np.all(self.fill_value):
return False
return values.all()
def any(self, axis=0, *args, **kwargs):
"""
Tests whether at least one of elements evaluate True
Returns
-------
any : bool
See Also
--------
numpy.any
"""
nv.validate_any(args, kwargs)
values = self.sp_values
if len(values) != len(self) and np.any(self.fill_value):
return True
return values.any().item()
def sum(self, axis=0, *args, **kwargs):
"""
Sum of non-NA/null values
Returns
-------
sum : float
"""
nv.validate_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
return sp_sum
else:
nsparse = self.sp_index.ngaps
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative summation. If None,
perform cumulative summation over flattened array.
Returns
-------
cumsum : SparseArray
"""
nv.validate_cumsum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError("axis(={axis}) out of bounds".format(axis=axis))
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumsum()
return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index,
fill_value=self.fill_value)
def mean(self, axis=0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
nv.validate_mean(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def transpose(self, *axes):
"""
Returns the SparseArray.
"""
return self
@property
def T(self):
"""
Returns the SparseArray.
"""
return self
# ------------------------------------------------------------------------
# Ufuncs
# ------------------------------------------------------------------------
def __array_wrap__(self, array, context=None):
from pandas.core.dtypes.generic import ABCSparseSeries
ufunc, inputs, _ = context
inputs = tuple(x.values if isinstance(x, ABCSparseSeries) else x
for x in inputs)
return self.__array_ufunc__(ufunc, '__call__', *inputs)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
special = {'add', 'sub', 'mul', 'pow', 'mod', 'floordiv', 'truediv',
'divmod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'remainder'}
if compat.PY2:
special.add('div')
aliases = {
'subtract': 'sub',
'multiply': 'mul',
'floor_divide': 'floordiv',
'true_divide': 'truediv',
'power': 'pow',
'remainder': 'mod',
'divide': 'div',
'equal': 'eq',
'not_equal': 'ne',
'less': 'lt',
'less_equal': 'le',
'greater': 'gt',
'greater_equal': 'ge',
}
flipped = {
'lt': '__gt__',
'le': '__ge__',
'gt': '__lt__',
'ge': '__le__',
'eq': '__eq__',
'ne': '__ne__',
}
op_name = ufunc.__name__
op_name = aliases.get(op_name, op_name)
if op_name in special and kwargs.get('out') is None:
if isinstance(inputs[0], type(self)):
return getattr(self, '__{}__'.format(op_name))(inputs[1])
else:
name = flipped.get(op_name, '__r{}__'.format(op_name))
return getattr(self, name)(inputs[0])
if len(inputs) == 1:
# No alignment necessary.
sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
return self._simple_new(sp_values,
self.sp_index,
SparseDtype(sp_values.dtype, fill_value))
result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs],
**kwargs)
if out:
if len(out) == 1:
out = out[0]
return out
if type(result) is tuple:
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
return type(self)(result)
def __abs__(self):
return np.abs(self)
# ------------------------------------------------------------------------
# Ops
# ------------------------------------------------------------------------
@classmethod
def _create_unary_method(cls, op):
def sparse_unary_method(self):
fill_value = op(np.array(self.fill_value)).item()
values = op(self.sp_values)
dtype = SparseDtype(values.dtype, fill_value)
return cls._simple_new(values, self.sp_index, dtype)
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(sparse_unary_method, name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
def sparse_arithmetic_method(self, other):
op_name = op.__name__
if isinstance(other, (ABCSeries, ABCIndexClass)):
# Rely on pandas to dispatch to us.
return NotImplemented
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all='ignore'):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
if op_name == 'divmod':
left, right = result
lfill, rfill = fill
return (_wrap_result(op_name, left, self.sp_index, lfill),
_wrap_result(op_name, right, self.sp_index, rfill))
return _wrap_result(op_name, result, self.sp_index, fill)
else:
other = np.asarray(other)
with np.errstate(all='ignore'):
# TODO: delete sparse stuff in core/ops.py
# TODO: look into _wrap_result
if len(self) != len(other):
raise AssertionError(
("length mismatch: {self} vs. {other}".format(
self=len(self), other=len(other))))
if not isinstance(other, SparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, op_name)
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(sparse_arithmetic_method, name, cls)
@classmethod
def _create_comparison_method(cls, op):
def cmp_method(self, other):
op_name = op.__name__
if op_name in {'and_', 'or_'}:
op_name = op_name[:-1]
if isinstance(other, (ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
if not is_scalar(other) and not isinstance(other, type(self)):
# convert list-like to ndarray
other = np.asarray(other)
if isinstance(other, np.ndarray):
# TODO: make this more flexible than just ndarray...
if len(self) != len(other):
raise AssertionError("length mismatch: {self} vs. {other}"
.format(self=len(self),
other=len(other)))
other = SparseArray(other, fill_value=self.fill_value)
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
else:
with np.errstate(all='ignore'):
fill_value = op(self.fill_value, other)
result = op(self.sp_values, other)
return type(self)(result,
sparse_index=self.sp_index,
fill_value=fill_value,
dtype=np.bool_)
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(cmp_method, name, cls)
@classmethod
def _add_unary_ops(cls):
cls.__pos__ = cls._create_unary_method(operator.pos)
cls.__neg__ = cls._create_unary_method(operator.neg)
cls.__invert__ = cls._create_unary_method(operator.invert)
@classmethod
def _add_comparison_ops(cls):
cls.__and__ = cls._create_comparison_method(operator.and_)
cls.__or__ = cls._create_comparison_method(operator.or_)
super(SparseArray, cls)._add_comparison_ops()
# ----------
# Formatting
# -----------
def __unicode__(self):
return '{self}\nFill: {fill}\n{index}'.format(
self=printing.pprint_thing(self),
fill=printing.pprint_thing(self.fill_value),
index=printing.pprint_thing(self.sp_index))
def _formatter(self, boxed=False):
# Defer to the formatter from the GenericArrayFormatter calling us.
# This will infer the correct formatter from the dtype of the values.
return None
SparseArray._add_arithmetic_ops()
SparseArray._add_comparison_ops()
SparseArray._add_unary_ops()
def _maybe_to_dense(obj):
"""
try to convert to dense
"""
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
"""
array must be SparseSeries or SparseArray
"""
if isinstance(array, ABCSparseSeries):
array = array.values.copy()
return array
def _sanitize_values(arr):
"""
return an ndarray for our input,
in a platform independent manner
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
# scalar
if is_scalar(arr):
arr = [arr]
# ndarray
if isinstance(arr, np.ndarray):
pass
elif is_list_like(arr) and len(arr) > 0:
arr = | maybe_convert_platform(arr) | pandas.core.dtypes.cast.maybe_convert_platform |
"""This module contains PlainFrame and PlainColumn tests.
"""
import collections
import datetime
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal as np_assert_equal
from pywrangler.util.testing.plainframe import (
NULL,
ConverterFromPandas,
NaN,
PlainColumn,
PlainFrame
)
@pytest.fixture
def plainframe_standard():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, 2, False, "string2", "2019-02-01 10:00:00"]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def plainframe_missings():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, NaN, False, "string2", "2019-02-01 10:00:00"],
[NULL, NULL, NULL, NULL, NULL]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def df_from_pandas():
df = pd.DataFrame(
{"int": [1, 2],
"int_na": [1, np.NaN],
"bool": [True, False],
"bool_na": [True, np.NaN],
"float": [1.2, 1.3],
"float_na": [1.2, np.NaN],
"str": ["foo", "bar"],
"str_na": ["foo", np.NaN],
"datetime": [pd.Timestamp("2019-01-01"), pd.Timestamp("2019-01-02")],
"datetime_na": [ | pd.Timestamp("2019-01-01") | pandas.Timestamp |
import numpy as np
import pandas as pd
from pandas import read_csv
from matplotlib import pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
### load data in
train = | pd.read_csv('5P14clean.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import requests
import json
'''
Author: <NAME>
Purpose: This program will geocode address in downtown detroit to specific x,y coordinates
Input: an excel table with address column(s)
ADDITIONAL FIELDS:
1. x,y: longitude and latitude OF the GEOCODing result(came from Google API)
2. flag: indicate whether the geocoder match exact address of the input
System Requirements:
1. Need pandas libraries
'''
# MAIN PARAMETERS
with open('config.json') as json_data_file:
data = json.load(json_data_file)
input_table = str(data['input_table'])
output_table = str(data['output_table'])
reference_path = str(data['reference_path'])
googleApiKey = str(data['googleApiKey'])
county = str(data['county'])
state = str(data['state'])
viewbox = str(data['viewbox'])
bound = str(data['bound'])
ref_data = pd.read_excel(reference_path) # load reference data
def OSM_geocode(address):
url = 'https://nominatim.openstreetmap.org/search'
global county, state, viewbox
params = {'q': address,
'county': county,
'state': state,
'viewbox': viewbox,
'bounded': 1,
'format': 'json',
'addressdetails': 0,
'countrycodes': 'US'}
try:
R = requests.get(url, params=params)
R.raise_for_status()
response = R.json()
display_name = response[0]['display_name']
except:
display_name = google_geocode(address)
return display_name
def google_geocode(intersect):
global bound, county, state, googleApiKey
GoogleApiKey = googleApiKey
params = {'address': '{},{}'.format(intersect, state),
'bounds': bound,
'key': GoogleApiKey}
url = 'https://maps.googleapis.com/maps/api/geocode/json'
R = requests.get(url, params=params)
R.raise_for_status()
response = R.json()
try:
display_name = response['results'][0]['formatted_address']
x = response['results'][0]['geometry']['location']['lng']
y = response['results'][0]['geometry']['location']['lat']
except:
display_name = False
x = False
y = False
return display_name, x, y
def match_ref(string, df):
ref_data = df
prefix = ['East', 'South', 'West', 'North']
first_word = string.strip().split(',')[0]
second_word = string.strip().split(',')[1]
if list(first_word)[0].isdigit() and list(first_word)[-1].isdigit():
parsed_range_r = first_word
parsed_name_r = ' '.join(second_word.strip().split(' ')[:-1])
reg_name = '^({}).*$'.format(parsed_name_r)
if first_word.strip().split(' ')[0] in prefix:
parsed_dir_r = first_word.strip().split(' ')[0]
else:
parsed_dir_r = False
else:
parsed_range_r = False
parsed_name_r = ' '.join(first_word.strip().split(' ')[:-1])
reg_name = '^.*\s({}).*$'.format(parsed_name_r)
if second_word.strip().split(' ')[0] in prefix:
parsed_dir_r = second_word.strip().split(' ')[0]
else:
parsed_dir_r = False
reg_name = '^.*\s({}).*$'.format(parsed_name_r)
if parsed_range_r:
matched_record = ref_data[(ref_data['ParsedRange'] == parsed_range_r)]
matched_record = matched_record[matched_record['Address'].str.contains(
reg_name)]
else:
matched_record = ref_data[ref_data['Address'].str.contains(reg_name)]
if parsed_dir_r:
matched_record = matched_record[(
ref_data['ParsedPreDir'] == parsed_dir_r)]
else:
pass
return matched_record
def google_match_ref(string, x, y, df):
ref_data = df
flag = None
first_word = string.strip().split(',')[0]
first_word_1st_word = first_word.strip().split(' ')[0]
second_word = string.strip().split(',')[1]
if list(first_word_1st_word)[0].isdigit() and list(first_word_1st_word)[-1].isdigit():
parsed_address = ' '.join(first_word.strip().split(' ')[:-1])
reg_name = '^({}).*$'.format(parsed_address)
else:
if list(second_word.strip().split(' ')[0])[0].isdigit() and list(second_word.strip().split(' ')[0])[
-1].isdigit():
parsed_address = ' '.join(second_word.strip().split(' ')[:-1])
reg_name = '^({}).*$'.format(parsed_address)
else:
flag = 'Do not match exact address.'
parsed_address = ' '.join(second_word.strip().split(' ')[:-1])
reg_name = '^.*({}).*$'.format(parsed_address)
matched_record = ref_data[ref_data['Address'].str.contains(reg_name)]
matched_record['flag'] = flag
matched_record['x'] = x
matched_record['y'] = y
return matched_record
def geocode(address_input):
global ref_data
output_df = ref_data[ref_data['Address'] == False]
for i, address in enumerate(address_input):
print('Geocoding <{}>...'.format(address))
google_output, x, y = google_geocode(address)
if google_output:
selected_record = google_match_ref(google_output, x, y, ref_data)
if selected_record.shape[0] > 0:
selected_record = selected_record.iloc[0]
output_df = output_df.append(selected_record)
print(' Complete.')
else:
print(' No matching record found in the reference database.', )
empty_output = ref_data.iloc[0].copy()
empty_output['flag'] = 'No matching record found in the reference database.'
empty_output['x'] = x
empty_output['y'] = y
output_df = output_df.append(empty_output)
else:
print(' Google GeoCoding Error: Address can\'t be found.', )
empty_output = ref_data.iloc[0].copy()
empty_output['flag'] = 'Google Address can\'t be found'
empty_output['x'] = np.nan
empty_output['y'] = np.nan
output_df = output_df.append(empty_output)
return output_df.reset_index()
def main():
# read input excel table
global input_table, output_table
input = pd.read_excel(input_table)
input_list = input.values.reshape((1, -1))[0]
output = geocode(input_list)
output['input_address'] = | pd.Series(input_list) | pandas.Series |
import datetime
from numbers import Number
from typing import Any
import numpy as np
import pandas as pd
def convert_indices(df: pd.DataFrame):
"""
extract all indices to columns if all are not numerical
and don't clash with existing column names
"""
if df.index.nlevels > 1:
# always reset multi-index
df.reset_index(inplace=True)
elif df.index.dtype == np.dtype("int64"):
# Int64Index -> RangeIndex if possible
df.reset_index(inplace=True, drop=True)
# col_names: List[str] = df.columns.values.tolist()
# if (
# all([df.index.get_level_values(x).dtype != np.dtype("int64") for x in range(df.index.nlevels)])
# and len(set(df.index.names)) == len(df.index.names)
# and not any([x in col_names for x in df.index.names])
# ):
# df.reset_index(inplace=True)
def downcast_numbers(data: pd.DataFrame):
"""Downcast numerics"""
def downcast(ser: pd.Series) -> pd.Series:
try:
ser = pd.to_numeric(ser, downcast="signed")
ser = pd.to_numeric(ser, downcast="unsigned")
except Exception:
pass # catch failure on Int64Dtype
return ser
# A result of downcast(timedelta64[ns]) is int <ns> and hard to understand.
# e.g.) 0 days 00:54:38.777572 -> 3278777572000 [ns]
df_num = data.select_dtypes("number", exclude=["timedelta"]) # , pd.Int64Dtype])
data[df_num.columns] = df_num.apply(downcast)
def timedelta_to_str(df: pd.DataFrame):
"""
convert timedelta to str - NOTE - only until arrow.js supports Duration type
"""
df_td = df.select_dtypes("timedelta")
# df[df_timedelta.columns] = df_timedelta.astype("string")
df[df_td.columns] = np.where(pd.isnull(df_td), pd.NA, df_td.astype("string"))
def parse_categories(data: pd.DataFrame):
"""Detect and converts categories"""
def criteria(ser: pd.Series) -> bool:
"""Decides whether to convert into categorical"""
nunique: int = ser.nunique()
if nunique <= 20 and (nunique != ser.size):
# few unique values => make it a category regardless of the proportion
return True
prop_unique = (nunique + 1) / (ser.size + 1) # + 1 for nan
if prop_unique <= 0.05:
# a lot of redundant information => categories are more compact
return True
return False
def try_to_category(ser: pd.Series) -> pd.Series:
return ser.astype("category") if criteria(ser) else ser
potential_cats = data.select_dtypes(["string", "object"])
data[potential_cats.columns] = potential_cats.apply(try_to_category)
def obj_to_str(df: pd.DataFrame):
"""Converts remaining objects columns to str"""
# convert objects to str / NA
df_str = df.select_dtypes("object")
# df[df_str.columns] = np.where(pd.isnull(df_str), pd.NA, df_str.astype("string"))
df[df_str.columns] = df_str.astype("string")
# convert categorical values (as strings if object)
def to_str_cat_vals(x: pd.Series) -> pd.Series:
if x.cat.categories.dtype == np.dtype("object"):
# x.cat.categories = x.cat.categories.astype(str)
# x.cat.categories = np.where(pd.isnull(x.cat.categories), pd.NA, x.cat.categories.astype("string"))
x.cat.categories = x.cat.categories.astype("string")
return x
df_cat = df.select_dtypes("category")
df[df_cat.columns] = df_cat.apply(to_str_cat_vals)
def process_df(df: pd.DataFrame, copy: bool = False) -> pd.DataFrame:
"""
Processing steps needed before writing / after reading
We only modify the dataframe to optimise size,
rather than convert/infer types, e.g. no longer parsing dates from strings
NOTE - this mutates the dataframe by default
"""
if copy:
df = df.copy(deep=True)
convert_indices(df)
# convert timedelta
timedelta_to_str(df)
downcast_numbers(df)
# save timedeltas cols (unneeded whilst timedelta_to_str used)
# td_col = df.select_dtypes("timedelta")
df = df.convert_dtypes()
# df[td_col.columns] = td_col
obj_to_str(df)
parse_categories(df)
return df
def to_df(value: Any) -> pd.DataFrame:
"""
Converts a python object, i.e. a script's output, to a dataframe
NOTE - this returns a new DF each time
"""
if value is None:
# This return the empty dataframe, which atm is the same as
# the empty file object in the CAS.
# However this is not ensured as pyarrow changes
return pd.DataFrame()
if isinstance(value, pd.DataFrame):
return value.copy(deep=True)
if isinstance(value, (pd.Series, pd.Index)):
if value.name is not None:
return pd.DataFrame(value)
return | pd.DataFrame({"Result": value}) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import solvers.statuses as statuses
MAX_TIMING = 1e8
def get_cumulative_data(solvers, problems):
for solver in solvers:
# Path where solver results are stored
path = os.path.join('.', 'results', 'benchmark_problems', solver)
# Initialize cumulative results
results = []
for problem in problems:
file_name = os.path.join(path, problem, 'full.csv')
results.append(pd.read_csv(file_name))
# Create cumulative dataframe
df = pd.concat(results)
# Store dataframe into results
solver_file_name = os.path.join(path, 'results.csv')
df.to_csv(solver_file_name, index=False)
def compute_performance_profiles(solvers, problems_type):
t = {}
status = {}
# Get time and status
for solver in solvers:
path = os.path.join('.', 'results', problems_type,
solver, 'results.csv')
df = | pd.read_csv(path) | pandas.read_csv |
import os
from calendar import month_name
from collections import OrderedDict
from operator import itemgetter
from pathlib import Path
from tkinter import N
import folium
import pandas as pd
import requests
from bs4 import BeautifulSoup, SoupStrainer
from loguru import logger
race_data_path = Path("data/race_data.csv")
def get_race_urls(index_url, base_url="https://races.fellrunner.org.uk"):
logger.debug("Getting race urls")
r = requests.get(index_url)
soup = BeautifulSoup(r.content, features="lxml")
race_urls = []
for tr in soup.find_all("tr")[2:]:
tds = tr.find_all("td")
race_url = base_url + tds[1].a["href"]
race_urls.append(race_url)
return race_urls
def scrape_race(race_url):
r = requests.get(race_url)
soup = BeautifulSoup(r.content, features="lxml")
lines = soup.find_all("li")
race_data = [list(line.stripped_strings) for line in lines]
race_dict = dict()
for line in race_data:
if line[0].endswith(":"):
field_name = line[0][:-1]
field_name = field_name.lower()
field_name = field_name.replace(" ", "_")
race_dict[field_name] = line[1]
race_dict["race_url"] = race_url
race_dict["title"] = soup.h1.string
return race_dict
def get_postcodes(race_data):
logger.debug("Geting postcode locations")
postcode_regex = r"([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y][0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9][A-Za-z]?))))\s?[0-9][A-Za-z]{2})"
race_data["postcode"] = race_data.venue.str.extract(postcode_regex)[1]
postcodes = race_data.postcode[~race_data.postcode.isnull()].to_list()
batchsize = 99
postcode_locations = []
for i in range(0, len(postcodes), batchsize):
pc_batch = postcodes[i : i + batchsize]
postcode_locations += requests.post(
url="https://postcodes.io/postcodes", json={"postcodes": pc_batch}
).json()["result"]
fields = ["postcode", "latitude", "longitude"]
pcd = [
itemgetter(*fields)(location["result"])
for location in postcode_locations
if location["result"] is not None
]
pcd = | pd.DataFrame(pcd, columns=fields) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pytest
from test_pvsystem import sam_data
from conftest import requires_scipy
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture()
def location():
return Location(32.2, -111, altitude=700)
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
@requires_scipy
def test_run_model_with_weather(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'wind_speed':5, 'temp_air':10}, index=times)
ac = mc.run_model(times, weather=weather).ac
expected = pd.Series(np.array([ 201.691634921, -2.00000000e-02]),
index=times)
| assert_series_equal(ac, expected, check_less_precise=2) | pandas.util.testing.assert_series_equal |
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal
import floris.tools as wfct
from floris.tools.optimization.scipy.yaw import YawOptimization
from floris.utils.visualization import property as ppt
from floris.utils.visualization import yaw_opt as yopt
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# YAW_EVALUATION #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
class YawSimulator(object):
def __init__(self, farm, wind, params=None, wakes=None, filted=True,
results=None):
self.fi = wfct.floris_interface.FlorisInterface(farm)
self.num_t = len(self.fi.floris.farm.turbine_map.turbines)
# yawing parameters setting
self.origin_f = 10
self.origin_t = 1 / self.origin_f
self.origin_factor = 10
self.f = 1 / 3 # sampling frequence (Hz) in time history curve
self.delt = 1 / self.f # sampling interval (s)
self.t_c = 3 # control interval time
self.n_c = np.ceil(self.t_c / self.delt) # control interval which temporalily set as the integer multiple
self.v_yaw = 0.5 # yawing speed of turbine
self.conv = np.arange(-5, 5) # convolution operation range [-i, ..., i]
self.theta_tol = 0.5 # tolerance for yawing control decision
self.theta_max = 30. # maximum yaw offset allowed in process
# optimization parametes setting
self.max_offset = 4.
self.max_time = 12.
# parsing the custom parameters if provided
if params is not None:
self.parse_params(params)
# wind data processing
self.filter = filted
self.origin_point = 18000
self.wd_param, self.ws_param = wind['wd'], wind['ws']
# wind direction data loading
if self.wd_param[0] == 'origin':
self.origin_wd = self.wind_load(self.wind_dataset[self.wd_param[0]],
'wd', num=self.origin_point)
self.wd = self.wind_processing(self.origin_wd, self.wd_param[1])
else:
self.wd = wind_generator(self.wd_param[0], self.wd_param[1],
num=1800, interval=self.delt)
# wind speed data loading
if self.ws_param[0] == 'origin':
self.origin_ws = self.wind_load(self.wind_dataset[self.ws_param[0]],
'ws', num=self.origin_point)
self.ws = self.wind_processing(self.origin_ws, self.ws_param[1])
else:
self.ws = wind_generator(self.ws_param[0], self.ws_param[1],
num=1800, interval=self.delt)
# yaw optimization configuration
self.options = {'maxiter': 20, 'disp': False, 'iprint': 2, 'ftol': 1e-5, 'eps': 0.01}
self.opt_fi = copy.deepcopy(self.fi)
self.optimizer = YawOptimization(self.opt_fi,
minimum_yaw_angle=0.0,
maximum_yaw_angle=20.0,
opt_method="SLSQP",
opt_options=self.options,
include_unc=False)
# simulation results packing or load the results file
if results:
self.results = self.data_load(results)
self.ws, self.wd = self.results['ws'], self.results['wd']
else:
self.results = self.data_package()
self.results['ws'], self.results['wd'] = self.ws, self.wd
self.results.fillna(0, inplace=True)
# print(self.results.columns)
def parse_params(self, params):
pass
@property
def wind_dataset(self, ):
return {'origin': '../inputs/winds/201301010930.xlsx',}
def wind_load(self, wind_file, type='wd', num=18000):
num_point = 72000
userows = int(num) if num else num_point
data = pd.read_excel(wind_file, sheet_name=0, usecols=[1, 3],
nrows=userows, names=['ws', 'wd'], header=None)
return np.round(data[type].values, 2)
def wind_processing(self, data, params):
return self.wind_scale(self.wind_filter(self.wind_downsampling(data)), params)
def wind_downsampling(self, data, average='fixed'):
if average == 'fixed':
return time_average(data, self.origin_factor)
elif average == 'sliding':
return sliding_average(data, 30)
else:
raise ValueError("Invalid average method!")
def wind_filter(self, data, cut_f=0.002):
if self.filter:
W_n = 2 * cut_f / self.f
b, a = signal.butter(8, W_n, 'lowpass')
# w, h = signal.freqs(b, a)
# print(b, a)
# print(w, h)
return signal.filtfilt(b, a, data)
else:
return data
def wind_scale(self, data, params=(270., 5.)):
return data_centered(data, center=params[0], scale=params[1])
def convolution(self, ind, weighted=None):
scope = self.conv if ind != 0 else 0.
weights = weighted if weighted else np.ones(self.conv.shape)
assert weights.shape == self.conv.shape
return np.mean(self.wd[ind - scope] * weights)
def time_history(self, origin=False, save=None):
# plot the origin wind speed and direction data
if origin:
yopt.time_history_plot(self.origin_wd, self.origin_ws, 0.1)
return yopt.time_history_plot(self.wd, self.ws, self.delt, save=save)
def power_calculation(self, yaw_offset, no_wake=False):
power_fi = copy.deepcopy(self.fi)
powers = np.zeros(len(self.wd))
turbine_powers = np.zeros((self.num_t, len(self.wd)))
for i, wd in enumerate(self.wd):
yaw = list(-1 * yaw_offset[i]) if yaw_offset.ndim == 2 else -1 * float(yaw_offset[i])
power_fi.reinitialize_flow_field(wind_direction=[wd], wind_speed=[self.ws[i]])
power_fi.calculate_wake(yaw_angles=yaw, no_wake=no_wake)
powers[i] = list2array(power_fi.get_farm_power(), 1e-6, 4)
turbine_powers[:, i] = list2array(power_fi.get_turbine_power(), 1e-6, 4)
return powers, turbine_powers
def power_output(self, wd, ws, yaw_offset):
# power_fi = copy.deepcopy(self.fi)
yaw = - yaw_offset if isinstance(yaw_offset, float) else list(-1 * yaw_offset)
self.fi.reinitialize_flow_field(wind_direction=wd, wind_speed=ws)
self.fi.calculate_wake(yaw_angles=yaw)
power = list2array(self.fi.get_farm_power(), 1e-6, 4)
turbine_power = list2array(self.fi.get_turbine_power(), 1e-6, 4)
return power, turbine_power
def yaw_optimizer(self, ws, wd, yaw_limits=False):
# find the optimial yaw offset of turbines at specific wind direction
self.opt_fi.reinitialize_flow_field(wind_direction=[wd], wind_speed=[ws])
if yaw_limits:
self.optimizer.reinitialize_opt(maximum_yaw_angle=self.max_yaw_angle(ws))
yaw_opt = self.optimizer.optimize(verbose=False)
yaw_opt = -1 * np.round(np.where(np.abs(np.array(yaw_opt)) > self.theta_tol,
yaw_opt, 0.), 2)
return yaw_opt
def max_yaw_angle(self, ws):
pass
def baseline_simulator(self, ):
# simulate the yaw control process of wind turbines without positive control
# baseline_yaw_simulation(self)
acc_time, acc_count = 0., 0.
yaw_flag, yaw_speed, = False, 0.,
obj, turbine, offset = np.zeros(len(self.wd)), np.zeros(len(self.wd)), np.zeros(len(self.wd))
for i, wd in enumerate(self.wd):
if i == 0:
obj[0], turbine[0], offset[0] = wd, wd, 0.
continue
# accumulated yaw offset
acc_count += 1 if np.abs(wd - turbine[i - 1]) >= self.max_offset else 0.
acc_time = acc_count * self.delt
# determine the turine yawing target
if (acc_time >= self.max_time):
obj[i] = self.convolution(i)
acc_count, acc_time, yaw_flag = 0., 0., True
else:
obj[i] = obj[i - 1]
# yaw the turbine to target direction
turbine[i] = turbine[i - 1] + yaw_speed * self.delt
if (turbine[i] - obj[i]) * (turbine[i - 1] - obj[i]) <= 0:
turbine[i], yaw_flag = obj[i], False
# judge yawing or not and yawing direction in the next step
yaw_angle = obj[i] - turbine[i]
yaw_speed = np.sign(yaw_angle) * self.v_yaw if yaw_flag else 0.
offset[i] = turbine[i] - wd
power, turbine_power = self.power_calculation(offset)
cols = self.results.columns[2:self.num_t + 6]
data = [obj, turbine, offset, power] + [turbine_power[i] for i in range(self.num_t)]
for col, d in zip(cols, data):
self.results[col] = d
return obj, turbine, offset, power, turbine_power
def control_simulator(self, ):
# simulate the yaw control process of wind turbines with positive control
# control_yaw_simulation(self)
acc_time, acc_count = 0., 0.
obj = np.zeros((self.num_t, len(self.wd)))
turbine = np.zeros((self.num_t, len(self.wd)))
offset = np.zeros((self.num_t, len(self.wd)))
yaw_flag, yaw_speed, = np.full(self.num_t, False), np.zeros(self.num_t)
beta_opt, theta_opt = np.zeros((self.num_t, len(self.wd))), np.zeros(len(self.wd))
for i, wd in enumerate(self.wd):
if i == 0:
turbine[:, i], theta_opt[i] = wd, wd
beta_opt[:, i] = self.yaw_optimizer(self.ws[i], wd)
obj[:, i] = theta_opt[i] + beta_opt[:, i]
yaw_flag[:] = True
else:
# accumulated yaw offset
acc_count += 1 if np.abs(wd - theta_opt[i - 1]) >= self.max_offset else 0.
acc_time = acc_count * self.delt
# determine the turine yawing target
if (acc_time >= self.max_time):
theta_opt[i] = self.convolution(i)
beta_opt[:, i] = self.yaw_optimizer(self.ws[i], theta_opt[i])
obj[:, i] = theta_opt[i] + beta_opt[:, i]
acc_count, acc_time, yaw_flag = 0., 0., True
else:
theta_opt[i], obj[:, i] = theta_opt[i - 1], obj[:, i - 1]
# yaw the turbine to target direction
turbine[:, i] = turbine[:, i - 1] + yaw_speed * self.delt
turbine[:, i] = np.where((turbine[:, i] - obj[:, i]) * \
(turbine[:, i - 1] - obj[:, i]) <= 0, obj[:, i], turbine[:, i])
yaw_flag = np.where((turbine[:, i] - obj[:, i]) * \
(turbine[:, i - 1] - obj[:, i]) <= 0, False, yaw_flag)
# judge yawing or not and yawing direction in the next step
yaw_angle = obj[:, i] - turbine[:, i]
yaw_speed = np.where(yaw_flag == True, np.sign(yaw_angle) * self.v_yaw, 0)
offset[:, i] = turbine[:, i] - wd
power, turbine_power = self.power_calculation(offset.T)
cols = self.results.columns[self.num_t + 6 :]
data = [obj, turbine, offset, turbine_power]
for i in range(self.num_t):
for col, d in zip(cols[i * 4:(i + 1) * 4], data):
self.results[col] = d[i]
self.results[cols[-1]] = power
return obj, turbine, offset, power, turbine_power
def simple_yaw_simulator(self, num, ratio=30, speed=6.):
np.random.seed(12345)
def average_wind(wd, ws, ind, m=10):
if ind < m:
return wd[:ind + 1].mean(), ws[:ind + 1].mean()
else:
return wd[ind - m:ind].mean(), ws[ind - m:ind].mean()
def yaw_lookup_table(wd, ws):
yaw_data = np.ones((2, num)) * np.array([[10.], [0.25]])
yaw_data[0, :] = yaw_data[0, :] + np.random.randn(num) * 5
yaw_data[1, :] = yaw_data[1, :] + np.random.randn(num) * 0.1
return yaw_data
# wind speed and directions data
wind_data = np.random.randn(2, 300) * np.array([[10.], [2.]]) + np.array([[0.], [10]])
wd, ws = wind_data[0, :], wind_data[1, :]
target, yaw, status, actual = np.zeros((2, num, len(wd))), np.zeros((2, num, len(wd))), \
np.zeros((2, num, len(wd))), np.zeros((2, num, len(wd)))
control_point, yaw_flag, yaw_speed = True, np.full(num, 1.), np.ones(num) * speed
for i in range(len(wd)):
control_point = True if i % ratio == 0 else False
awd, aws = average_wind(wd, ws, i)
yaw[:, :, i] = yaw_lookup_table(awd, aws) if control_point else yaw[:, :, i - 1]
target[0, :, i] = wd[i] + yaw[0, :, i] if control_point else target[0, :, i - 1]
target[1, :, i] = yaw[1, :, i] if control_point else target[1, :, i - 1]
previous = status[0, :, i - 1] if i != 0 else np.ones(num) * wd[i]
current = status[0, :, i - 1] + yaw_flag * yaw_speed if i != 0 else previous
boundary = np.sign((current - target[0, :, i]) * (previous - target[0, :, i]))
status[0, :, i], status[1, :, i] = \
np.where(boundary >= 0, current, target[0, :, i]), yaw[1, :, i]
yaw_flag, yaw_speed = np.where(boundary >= 0, 1., 0.), \
np.where(boundary >= 0, np.sign(target[0, :, i] - current) * speed, 0.)
actual[0, :, i], actual[1, :, i] = status[0, :, i] - wd[i], status[1, :, i]
return target, yaw, status, actual
def simulator(self, save=None):
self.baseline_simulator()
self.control_simulator()
self.data_export()
self.power_plot(save=save)
def turbine_simulator(self, ):
pass
def data_package(self, ):
wind_cols = ['ws', 'wd']
data_cols = ['obj', 'turbine', 'yaw', 'power']
baseline_cols = ['baseline_' + col for col in data_cols]
baseline_cols += [baseline_cols[-1] + '_' + str(i) for i in range(self.num_t)]
control_cols = [tmp + '_' + str(i) for i in range(self.num_t) for tmp in data_cols]
control_cols += ['power']
return | pd.DataFrame(columns=wind_cols + baseline_cols + control_cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 14 17:14:01 2021
@author: lochan
"""
import wntr
import pandas as pd
import math
# Filepath
input_file = "11.WG_ADD SS-cpsd-24t-o.inp"
# Create network
wnet = wntr.network.WaterNetworkModel(input_file)
# Network description
#wnet.describe(level=0)
#wnet.describe(level=1)
# Pipe and pump name lists
pipe_list = wnet.pipe_name_list
pump_list = wnet.pump_name_list
# Store the start and end nodes of all the pumps in a list
node_list = list()
for i in pump_list:
pump = wnet.get_link(i)
node_list.append(pump.start_node_name)
node_list.append(pump.end_node_name)
# Explore pipes around the pumps that should not be altered
pipes_to_be_unchanged = list() # List to store pipes around the pumps that should stay unaltered
depth = 4 # How far from the pump should the pipes stay unaltered; 1 depth
# level means pipes immediately connected to the pump nodes
for k in range(depth):
for i in pipe_list:
pipe = wnet.get_link(i)
for j in node_list:
if pipe.start_node_name == j:
pipes_to_be_unchanged.append(i)
continue
if pipe.end_node_name == j:
pipes_to_be_unchanged.append(i)
continue
pipes_to_be_unchanged = list(set(pipes_to_be_unchanged)) # Remove any duplicates
for i in pipes_to_be_unchanged:
pipe = wnet.get_link(i)
node_list.append(pipe.start_node_name)
node_list.append(pipe.end_node_name)
node_list = list(set(node_list)) # Remove any duplicates
pipes_to_be_unchanged = list(set(pipes_to_be_unchanged)) # Remove any duplicates
# Filter out from the total pipe list the pipes that are eligible for change
pipe_filtered_list = list()
for i in pipe_list:
switch = True
for j in pipes_to_be_unchanged:
if i == j:
switch = False
break
if switch: pipe_filtered_list.append(i)
# Store the store the pipes that are eligible to be modified in a Dataframe
pipe_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
from datetime import datetime
import lightgbm as lgb
import numpy as np
from sklearn.model_selection import KFold
import pandas as pd
import utils
import os
import ndcg_tools
import math
import gc
import sys
seed = SEED
cur_stage = CUR_STAGE
def get_scores(ans=None,shift=0.0,bottom=0.25,after_deal=True,save_version=None):
print(f'using bottom: {bottom}')
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
df_valid_stage = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
df_valid = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
phase2valid_item_degree = {}
phase2median = {}
for sta in range(cur_stage+1):
cur_df_valid = df_valid[df_valid['stage']==sta]
items = cur_df_valid['item_id'].values
item_degree = phase_item_degree[sta]
list_item_degress = []
for item_id in items:
list_item_degress.append(item_degree[item_id])
list_item_degress.sort()
median_item_degree = list_item_degress[len(list_item_degress) // 2]
phase2median[sta] = median_item_degree
for item in items:
phase2valid_item_degree[(sta,item)] = item_degree[item]
old = False
if after_deal:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
if old:
user_item_label = ans[ ['user','item','label'] ].values
user2stage = df_valid_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
sta_list = []
item_list = []
degree_list = []
for sta in range(cur_stage+1):
item_degrees = phase_item_degree[sta]
for item in item_degrees.keys():
sta_list.append(sta)
item_list.append(item)
degree_list.append( item_degrees[item] )
df_degree = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'degree':degree_list} )
ans = pd.merge( ans, df_degree, how='left', on=['stage','item'] )
phase_median = ans.groupby('stage')['degree'].median().reset_index()
phase_median['median_degree'] = phase_median['degree']
phase_median = phase_median.drop('degree', axis=1)
ans = pd.merge(ans, phase_median, how='left', on ='stage')
ans['is_rare'] = ans['degree'] <= (ans['median_degree']+shift)
else:
user2stage = df_valid_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
vals = ans[ ['item','stage'] ].values
is_rare = []
for val in vals:
is_rare.append( phase_item_degree[ val[1] ][ val[0] ] <= phase2median[ val[1] ] )
ans['is_rare'] = is_rare
ans['is_rare'] = ans['is_rare'].astype('float') / bottom
ans['is_rare'] = ans['is_rare']+1.0
ans['label'] = ans['label'] * ans['is_rare']
else:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
ans['label'] = -ans['label']
ans = ans.sort_values( by=['user','label'] )
user2recall = ans.groupby('user')['item'].agg(list)
user2pos = df_valid_stage[ ['user_id','item_id'] ].set_index('user_id')
all_scores = []
all_pred_items = {}
pickup = 500
save_predictions = {}
for sta in range(cur_stage+1):
predictions = []
item_degree = phase_item_degree[sta]
now_users = df_valid_stage[ df_valid_stage['stage']==sta ]['user_id'].tolist()
answers = []
for now_user in now_users:
pos = user2pos.loc[now_user].values[0]
pred = user2recall.loc[now_user]
new_pred = []
for j in pred:
if len(new_pred) < pickup:
flag = 0
for k in new_pred:
if j == k:
flag = 1
break
if flag==0:
new_pred.append( j )
answers.append( ( pos, item_degree[ pos ] ) )
all_pred_items[now_user] = []
for pred in new_pred[:pickup]:
all_pred_items[now_user].append( pred )
predictions.append(new_pred[:50]+[0]*(50-len(new_pred)))
save_predictions[now_user] = new_pred[:50]+[0]*(50-len(new_pred))
scores = ndcg_tools.evaluate_each_phase(predictions, answers, at=50)
all_scores.append(scores)
utils.dump_pickle(all_pred_items, rerank_path.format(pickup, mode))
if save_version is None:
save_version = version
with open(prediction_result+f'{save_version}_result_{bottom}_tmp_valid.csv','w') as file:
for idx,user in enumerate(save_predictions.keys()):
file.write(str(user)+','+','.join([str(p) for p in save_predictions[user]])+'\n')
for scores in all_scores:
print(scores)
print('all_scores_sum',np.array(all_scores).sum(axis=0))
print('7_9_all_scores_sum',np.array(all_scores[-3:]).sum(axis=0))
print('0_6_all_scores_sum',np.array(all_scores[0:7]).sum(axis=0))
return all_scores
def get_result(ans=None,shift=0.0,bottom=0.7,after_deal=True,save_version=None):
print(f'using bottom: {bottom}')
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
df_test_stage = utils.load_pickle(online_all_test_data_path.format(cur_stage))
df_valid = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
phase2valid_item_degree = {}
phase2median = {}
for sta in range(cur_stage+1):
cur_df_valid = df_valid[df_valid['stage']==sta]
items = cur_df_valid['item_id'].values
item_degree = phase_item_degree[sta]
list_item_degress = []
for item_id in items:
list_item_degress.append(item_degree[item_id])
list_item_degress.sort()
median_item_degree = list_item_degress[len(list_item_degress) // 2]
phase2median[sta] = median_item_degree
for item in items:
phase2valid_item_degree[(sta,item)] = item_degree[item]
old = False
if after_deal:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
if old:
user_item_label = ans[ ['user','item','label'] ].values
user2stage = df_test_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
sta_list = []
item_list = []
degree_list = []
for sta in range(cur_stage+1):
item_degrees = phase_item_degree[sta]
for item in item_degrees.keys():
sta_list.append(sta)
item_list.append(item)
degree_list.append( item_degrees[item] )
df_degree = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'degree':degree_list} )
ans = pd.merge( ans, df_degree, how='left', on=['stage','item'] )
phase_median = ans.groupby('stage')['degree'].median().reset_index()
phase_median['median_degree'] = phase_median['degree']
phase_median = phase_median.drop('degree', axis=1)
ans = pd.merge(ans, phase_median, how='left', on ='stage')
ans['is_rare'] = ans['degree'] <= (ans['median_degree']+shift)
else:
user2stage = df_test_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
vals = ans[ ['item','stage'] ].values
is_rare = []
for val in vals:
is_rare.append( phase_item_degree[ val[1] ][ val[0] ] <= phase2median[ val[1] ] )
ans['is_rare'] = is_rare
ans['is_rare'] = ans['is_rare'].astype('float') / bottom
ans['is_rare'] = ans['is_rare']+1.0
ans['label'] = ans['label'] * ans['is_rare']
else:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
ans['label'] = -ans['label']
ans = ans.sort_values( by=['user','label'] )
user2recall = ans.groupby('user')['item'].agg(list)
df_train_stage = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
all_scores = []
all_pred_items = {}
pickup = 500
predictions = {}
for sta in range(cur_stage+1):
now_users = df_test_stage[ df_test_stage['stage'] == sta ]['user_id'].tolist()
df_train = df_train_stage[ df_train_stage['stage'] == sta ]
hot_items = df_train['item_id'].value_counts().index.tolist()
answers = []
for now_user in now_users:
pred = user2recall.loc[now_user]
new_pred = []
for j in pred:
if (len(new_pred) < pickup) and (j not in new_pred):
new_pred.append( j )
all_pred_items[now_user] = []
for pred in new_pred[:pickup]:
all_pred_items[now_user].append(pred)
new_pred = new_pred[:50]
for j in hot_items:
if (len(new_pred) < 50) and (j not in new_pred):
new_pred.append( j )
predictions[now_user] = new_pred
utils.dump_pickle(all_pred_items, rerank_path.format(pickup, mode))
#check
'''
all_users = [ user for user in predictions.keys()]
np.random.seed(2020)
mask_indexs = np.random.choice(np.arange(len(all_users)),int(len(all_users)),replace=False)
mask_indexs = set(mask_indexs)
mask_preds = [200000+i for i in range(50)]
for user in predictions.keys():
if len(set(predictions[user])) != 50:
print('no')
print(1/0)
'''
with open(prediction_result+f'{save_version}.csv','w') as file:
for idx,user in enumerate(predictions.keys()):
file.write(str(user)+','+','.join([str(p) for p in predictions[user]])+'\n')
def cal_score(ans):
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
df_valid_stage = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
ans = ans.set_index(0)
user2pos = df_valid_stage[ ['user_id','item_id'] ].set_index('user_id')
all_scores = []
for sta in range(cur_stage+1):
predictions = []
item_degree = phase_item_degree[sta]
now_users = df_valid_stage[ df_valid_stage['stage']==sta ]['user_id'].tolist()
answers = []
for now_user in now_users:
pos = user2pos.loc[now_user].values[0]
new_pred = ans.loc[now_user].tolist()
answers.append( ( pos, item_degree[ pos ] ) )
predictions.append( new_pred )
scores = ndcg_tools.evaluate_each_phase(predictions, answers, at=50)
all_scores.append(scores)
for scores in all_scores:
print(scores)
print('all_scores_sum',np.array(all_scores).sum(axis=0))
print('7_9_all_scores_sum',np.array(all_scores[-3:]).sum(axis=0))
print('0_6_all_scores_sum',np.array(all_scores[0:7]).sum(axis=0))
def data_read(ver, sta='0.005'):
datas = []
for block_id in range(block_num):
if sta == '0.005':
datas.append( utils.load_pickle( lgb_ans_dir+'{}_{}_{}_{}_0.005_ans.pkl'.format(ver, 'test', cur_stage, block_id ) ) )
else:
print(1/0)
datas.append( utils.load_pickle( lgb_ans_dir+'{}_{}_{}_{}_ans.pkl'.format(ver, 'test', cur_stage, block_id ) ) )
data = pd.concat( datas )
return data
if __name__ == '__main__':
mode = cur_mode
used_recall_source = cur_used_recall_source
sum_mode = 'nosum'
used_recall_source = used_recall_source+'-'+sum_mode
print( f'Recall Source Use {used_recall_source} now mode {mode}')
'''
#model1 = lgb.Booster(model_file=lgb_model_dir+'0608185502.model')
#model2 = lgb.Booster(model_file=lgb_model_dir+'0611052612.model')
ans1 = data_read('0608185502', sta="")
ans2 = data_read('0611052612', sta='0.005')
ans3 = data_read('0611111606', sta='0.005')
import pdb
pdb.set_trace()
'''
online = True
ensemble_mode = 'no-ensemble'
if len(sys.argv)>0:
ensemble_mode = sys.argv[1]
if online == False:
#带有坚强的特征的。
#big:0608081804
#fut1:0608081755
#fut4:0608075849
versions = { 'big':'0608081804', 'fut1':'0608081755', 'fut4':'0608075849' }
#save
'''
for name in versions:
datas = []
for block_id in range(block_num):
datas.append( utils.load_pickle( lgb_ans_dir+'{}_{}_{}_{}_ans.pkl'.format(versions[name], 'valid', cur_stage, block_id ) ) )
data = pd.concat( datas )
get_scores(ans=data,shift=0.0,bottom=0.25,after_deal=True,save_version=versions[name])
get_scores(ans=data,shift=0.0,bottom=0.7,after_deal=True,save_version=versions[name])
'''
#using
anss = {}
for name in versions:
data = pd.read_csv(prediction_result+f'{versions[name]}_result_0.7_tmp_valid.csv',header=None)
anss[name] = data
df_valid_stage = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
tdata = df_valid_stage.groupby('user_id').first()
user2stage = dict( tdata['stage'] )
users = anss['big'][0].tolist()
ans = []
for i in range(len(users)):
stage = user2stage[ users[i] ]
if stage==9 :
ans.append( anss['fut1'].iloc[i] )
elif stage==8 :
ans.append( anss['fut4'].iloc[i] )
else:
ans.append( anss['big'].iloc[i] )
ans = | pd.concat(ans,axis=1) | pandas.concat |
import pandas as pd
import csv
import types
df = pd.read_csv("/home/bench/notebooks/data/IoT_Botnet/UNSW_2018_IoT_Botnet_Dataset_1.csv",header = None)
df.columns = ["pkSeqID","stime","flgs","proto","saddr","sport","daddr","dport","pkts","bytes","state","ltime","seq","dur","mean","stddev","smac","dmac","sum","min","max","soui","doui","sco","dco","spkts","dpkts","sbytes","dbytes","rate","srate","drate","attack","category","subcategory"]
df.to_csv("/home/bench/notebooks/data/IoT_Botnet/Complete.csv")
i=2
while(i<74):
file_to_work = "/home/bench/notebooks/data/IoT_Botnet/UNSW_2018_IoT_Botnet_Dataset_" + str(i) +".csv"
df_to_add = pd.read_csv(file_to_work,quoting=csv.QUOTE_NONE,header=None)
df_to_add.columns = ["pkSeqID","stime","flgs","proto","saddr","sport","daddr","dport","pkts","bytes","state","ltime","seq","dur","mean","stddev","smac","dmac","sum","min","max","soui","doui","sco","dco","spkts","dpkts","sbytes","dbytes","rate","srate","drate","attack","category","subcategory"]
df = pd.read_csv("/home/bench/notebooks/data/IoT_Botnet/Complete.csv")
df = | pd.concat([df,df_to_add]) | pandas.concat |
import pandas as pd
import numpy as np
import os
import sys
import json
import time
import gc
gc.enable()
dataPath = '../../../data/avito-demand-prediction'
def add_features_from_active(df_train, df_test, df_act, field, gpname, newname, navl, stat):
"""
df_train, df_test: main tables
df_act: active table
field: features to engineer
gpname: feature to group
newname: new name for the feature after engineering
navl: values to replace all null
stat: built-in stats operation, eg: mean, std, nunique, etc
"""
tmpgp = df_act[[gpname, field]].groupby(gpname)
tmpdf = getattr(tmpgp[field], stat)().fillna(navl)
tmpdf = tmpdf.reset_index(gpname)
print(f'{field} grouped by {gpname} has completed. {time.time()-ss:.2f} s')
tmpdf.rename(index=str, columns={field: newname}, inplace=True)
print(f'Rename {field} to {newname}. {time.time()-ss:.2f} s')
df_train_out = pd.merge(df_train, tmpdf, how='left', on=gpname)
df_test_out = | pd.merge(df_test, tmpdf, how='left', on=gpname) | pandas.merge |
import os
from collections import Counter
import sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
plt.rcParams["font.family"] = "Verdana"
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
plt.rcParams["font.family"] = "Verdana"
list_aa = []
list_all = []
Size = 10
Fontsize = 8
Width = 0.4
base_dir = r"D:\Dropbox\tm_homodimer_dropbox"
drive_dir = r"D:\drive\TMD_homodimer"
thoipa_dir = r"D:\data_thoipapy"
out_dir = os.path.join(drive_dir, "figs\\FigXY25_residues_composition")
# excel ="I:\sets\set05_ETRA_NMR_crystal_nr.xlsx"
excel = os.path.join(base_dir, "sets\\set05_ETRA_NMR_crystal_nr.xlsx")
df_set = pd.read_excel(excel, index_col=0)
df_set.reset_index(inplace=True)
for nn in df_set.index:
sour = df_set.loc[nn, "database"]
uniprot_acc = df_set.loc[nn, "acc"]
sys.stdout.write('{}, '.format(uniprot_acc))
sys.stdout.flush()
# read for crystal dataset.
# read combined data which include conservation, covaritation, lipophilicity and the interface from structure.
Prediction_path = os.path.join(thoipa_dir, "Features\\combined\\{}\\{}.surr20.gaps5.combined_features.csv".format(sour, uniprot_acc))
prediction = pd.read_csv(Prediction_path, index_col=0)
prediction.columns = prediction.columns.str.replace('residue_num', 'aa_position')
prediction.columns = prediction.columns.str.replace('residue_name', 'orig_aa')
prediction['orig_aa'].replace('Q', 'B', inplace=True)
prediction['orig_aa'].replace('N', 'B', inplace=True)
prediction['orig_aa'].replace('H', 'B', inplace=True)
prediction['orig_aa'].replace('D', 'B', inplace=True)
prediction['orig_aa'].replace('E', 'B', inplace=True)
prediction['orig_aa'].replace('K', 'B', inplace=True)
prediction['orig_aa'].replace('R', 'B', inplace=True)
# collect interfac eresidue
list_aaa = []
for n in prediction.index:
if prediction.loc[n, "interface"] == 1:
Orig_aa = prediction.loc[n, "orig_aa"]
list_aaa.extend(Orig_aa)
list_aa.extend(list_aaa)
# collect all residue
list_all.extend(prediction["orig_aa"].tolist())
# for all residues in 3 datasets.
# calculate total residue numbers for 3 datast seperate
# ETRA
total_number_interface = len(list_aa)
total_number_non_interface = len(list_all) - len(list_aa)
b = Counter(list_all)
c = Counter(list_aa)
Index_all = np.arange(len(b.items()))
Index = np.arange(len(c.items()))
Columns_all = ['Residue_all', 'Frequence_all']
Columns = ['Residue', 'Frequence']
df_count = | pd.DataFrame(index=Index, columns=Columns) | pandas.DataFrame |
import pandas as pd
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from vital_sqi.app.app import app
import pathlib
layout = html.Div([
html.Div([
dcc.Input(
id='editing-columns-name',
placeholder='Enter a column name...',
value='',
style={'padding': 10}
),
html.Button('Add Column', id='editing-columns-button', n_clicks=0)
], style={'height': 50}),
html.Div(id='data-table'),
# html.Div(id='summary-table')
])
# @app.callback(
# Output('summary-table', 'children'),
# Input('dataframe', 'data')
# )
# def on_summary_table():
# return
@app.callback(Output('data-table', 'children'),
Input('dataframe', 'data'))
def on_data_set_table(data):
if data is None:
raise PreventUpdate
df = pd.DataFrame(data)
df_2 = | pd.DataFrame(data) | pandas.DataFrame |
import pathlib
import tempfile
import ftplib
import numpy
import pandas
from unittest import mock
import pytest
import numpy.testing as nptest
import pandas.testing as pdtest
from cloudside import asos
from cloudside.tests import get_test_file
@pytest.fixture
def fake_rain_data():
rain_raw = [
0.0,
1.0,
2.0,
3.0,
4.0,
4.0,
4.0,
4.0,
4.0,
4.0,
4.0,
4.0,
0.0,
0.0,
0.0,
0.0,
0.0,
5.0,
5.0,
5.0,
5.0,
5.0,
5.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
2.0,
3.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
daterange = pandas.date_range(
start="2001-01-01 11:55", end="2001-01-01 15:50", freq=asos.FIVEMIN
)
return pandas.Series(rain_raw, index=daterange)
@pytest.fixture
def asos_metar():
teststring = (
"24229KPDX PDX20170108090014901/08/17 09:00:31 5-MIN KPDX 081700Z "
"10023G35KT 7SM -FZRA OVC065 00/M01 A2968 250 96 -1400 080/23G35 RMK "
"AO2 PK WND 10035/1654 P0005 I1000 T00001006"
)
return asos.MetarParser(teststring, strict=False)
def retr_error(cmd, action):
raise ftplib.error_perm
def test_MetarParser_datetime(asos_metar):
expected = pandas.Timestamp(year=2017, month=1, day=8, hour=9, minute=0, second=31)
assert asos_metar.datetime == expected
def test_MetarParser_asos_dict(asos_metar):
result = asos_metar.asos_dict()
# the "dict" rounds down the timestamp to the nearest 5 min
dateval = pandas.Timestamp(year=2017, month=1, day=8, hour=9, minute=0, second=0)
expected = asos.Obs(
datetime=dateval,
raw_precipitation=0.05,
temperature=0.0,
dew_point=-0.6,
wind_speed=23.0,
wind_direction=100,
air_pressure=250.0,
sky_cover=1.0,
)
assert result == expected
@pytest.mark.parametrize(
("exists", "force", "call_count"),
[(True, True, 1), (True, False, 0), (False, True, 1), (False, False, 1)],
)
@pytest.mark.parametrize("datestr", ["2016-01-01", "1999-01-01"])
@mock.patch("ftplib.FTP")
def test__fetch_file(ftp, exists, force, call_count, datestr):
ts = pandas.Timestamp("2016-01-01")
with tempfile.TemporaryDirectory() as rawdir:
std_path = pathlib.Path(rawdir).joinpath(f"64010KPDX{ts.year}01.dat")
if exists:
std_path.touch()
if ts.year == 1999 and call_count == 1:
expected_path = None
else:
expected_path = std_path
if expected_path is None:
ftp.retrlines.side_effect = retr_error
dst_path = asos._fetch_file("KPDX", ts, ftp, rawdir, force_download=force)
assert dst_path == expected_path
assert ftp.retrlines.call_count == call_count
@mock.patch.object(ftplib.FTP, "retrlines")
@mock.patch.object(ftplib.FTP, "login")
def test_fetch_files(ftp_login, ftp_retr):
with tempfile.TemporaryDirectory() as rawdir:
raw_paths = asos.fetch_files(
"KPDX", "1999-10-01", "2000-02-01", "<EMAIL>", rawdir
)
assert isinstance(raw_paths, filter)
assert all([(isinstance(rp, pathlib.Path) or (rp is None)) for rp in raw_paths])
assert ftp_login.called_once_with("<EMAIL>")
assert ftp_retr.call_count == 5
@pytest.mark.parametrize(("all_na", "expected"), [(False, 55), (True, 0)])
def test__find_reset_time(fake_rain_data, all_na, expected):
if all_na:
fake_rain_data.loc[:] = numpy.nan
result = asos._find_reset_time(fake_rain_data)
assert result == expected
def test_process_precip(fake_rain_data):
precip = fake_rain_data.to_frame("raw_precip")
result = asos._process_precip(precip, 55, "raw_precip")
expected = numpy.array(
[
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
nptest.assert_array_almost_equal(result, expected)
def test_parse_file():
datpath = pathlib.Path(get_test_file("sample_asos.dat"))
csvpath = pathlib.Path(get_test_file("sample_asos.csv"))
result = asos.parse_file(datpath)
expected = (
| pandas.read_csv(csvpath, parse_dates=True, index_col=["datetime"]) | pandas.read_csv |
#------------------------------------------------------------------------------#
# (1)
# Write min. 2 functions which handle the reading, processing and visualization
# of a time series of transactions for one location (dependet on an argument)
# (you can use the sum, mean or median) for the transactions on one day.
import pandas as pd
import matplotlib.pyplot as plt
def load_data(loc):
df = pd.read_csv("data/cc_data.csv")
df = df.query(f"location == \"{loc}\"")
df = df.reset_index(drop = True)
df = df.groupby("date").sum("price")
return df[["price"]]
def plot(loc):
df = load_data(loc)
df.plot()
plt.show()
# test your function(s) for <Coffee Cameleon> and <Brew've Been Served>
# plot("Coffee Cameleon")
# plot("Brew've Been Served")
# (2) - optional
# Create a heatmap (https://seaborn.pydata.org/generated/seaborn.heatmap.html)
# using seaborn for every location (y) and every date (x) and the
# sum of the price on the specific day (z).
# Hints: 1 - first build a list of dictionaires and convert it then to
# a df to visualize
# 2 - use iterrows in this way to iterate through a df:
# for index, row in df.iterrows:
# row.location
import numpy as np
import seaborn as sns
cc_df = | pd.read_csv("data/cc_data.csv") | pandas.read_csv |
import os
import unittest
from unittest import mock
from unittest.mock import Mock, MagicMock
import numpy as np
import pandas as pd
import pygrams
from scripts import FilePaths
from scripts.utils.pygrams_exception import PygramsException
class TestPyGrams(unittest.TestCase):
data_source_name = 'dummy.pkl.bz2'
out_name = 'out'
def setUp(self):
self.global_stopwords = '''the
'''
self.ngram_stopwords = '''with
'''
self.unigram_stopwords = '''of
'''
def assertListAlmostEqual(self, list_a, list_b, places=7):
self.assertEqual(len(list_a), len(list_b), 'Lists must be same length')
for a, b in zip(list_a, list_b):
self.assertAlmostEqual(a, b, places=places)
def preparePyGrams(self, fake_df_data, mock_read_pickle, mock_open, mock_bz2file, mock_path_isfile):
self.number_of_rows = len(fake_df_data['abstract'])
self.patent_id_auto_tested = 'patent_id' not in fake_df_data
self.application_id_auto_tested = 'application_id' not in fake_df_data
self.application_date_auto_tested = 'application_date' not in fake_df_data
self.publication_date_auto_tested = 'publication_date' not in fake_df_data
self.invention_title_auto_tested = 'invention_title' not in fake_df_data
self.classifications_cpc_auto_tested = 'classifications_cpc' not in fake_df_data
self.inventor_names_auto_tested = 'inventor_names' not in fake_df_data
self.inventor_countries_auto_tested = 'inventor_countries' not in fake_df_data
self.inventor_cities_auto_tested = 'inventor_cities' not in fake_df_data
self.applicant_organisation_auto_tested = 'applicant_organisation' not in fake_df_data
self.applicant_countries_auto_tested = 'applicant_countries' not in fake_df_data
self.applicant_cities_auto_tested = 'applicant_cities' not in fake_df_data
if self.patent_id_auto_tested:
fake_df_data['patent_id'] = [f'patent_id-{pid}' for pid in range(self.number_of_rows)]
if self.application_id_auto_tested:
fake_df_data['application_id'] = [f'application_id-{pid}' for pid in range(self.number_of_rows)]
if self.application_date_auto_tested:
fake_df_data['application_date'] = [pd.Timestamp('1998-01-01 00:00:00') + pd.DateOffset(weeks=row) for row
in range(self.number_of_rows)]
if self.publication_date_auto_tested:
fake_df_data['publication_date'] = [pd.Timestamp('2000-12-28 00:00:00') - pd.DateOffset(weeks=row) for row
in range(self.number_of_rows)]
if self.invention_title_auto_tested:
fake_df_data['invention_title'] = [f'invention_title-{pid}' for pid in range(self.number_of_rows)]
if self.classifications_cpc_auto_tested:
fake_df_data['classifications_cpc'] = [[f'Y{row:02}'] for row in range(self.number_of_rows)]
if self.inventor_names_auto_tested:
fake_df_data['inventor_names'] = [[f'Fred {row:02}'] for row in range(self.number_of_rows)]
if self.inventor_countries_auto_tested:
fake_df_data['inventor_countries'] = [['GB']] * self.number_of_rows
if self.inventor_cities_auto_tested:
fake_df_data['inventor_cities'] = [['Newport']] * self.number_of_rows
if self.applicant_organisation_auto_tested:
fake_df_data['applicant_organisation'] = [['Neat and tidy']] * self.number_of_rows
if self.applicant_countries_auto_tested:
fake_df_data['applicant_countries'] = [['GB']] * self.number_of_rows
if self.applicant_cities_auto_tested:
fake_df_data['applicant_cities'] = [['Newport']] * self.number_of_rows
df = | pd.DataFrame(data=fake_df_data) | pandas.DataFrame |
import random
import sys
import time
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from copy import copy
import itertools
from collections import deque
import pandas as pd
from tqdm import tqdm
def coalition_keys_to_str(c):
c_list = [c_t + 1 for c_t in c]
r = '_'.join([str(c_int) for c_int in c_list])
return r
def str_to_coalition_keys(s):
r = sorted(list(set([int(t) - 1 for t in s.split('_')])))
return r
class MetaCoalition:
curr_idx = 0
def __init__(self, key, value, number_of_players=3):
self.idx = MetaCoalition.curr_idx + 1
self.number_of_players = number_of_players
MetaCoalition.curr_idx += 1
key_int_l = [int(t) for t in key.split('_')]
key_int_l.sort()
self.coalition_members = key_int_l
self.key = '_'.join([str(t) for t in key_int_l])
self.sig = self.key
self.coalition_vectors = [0] * self.number_of_players
for p in key_int_l:
self.coalition_vectors[p - 1] = 1
self.possible_adversaries_vector = [0] * self.number_of_players
self.possible_adversaries = list()
for i in range(len(self.coalition_vectors)):
if self.coalition_vectors[i] == 0:
self.possible_adversaries += [i]
self.possible_adversaries_vector[i] = 1
self.value = value
def __str__(self):
return self.sig
def __contains__(self, key):
return key in self.coalition_members
def check_overlap(self, mc):
overlaps = list(set(self.coalition_members) & set(mc.coalition_members))
return len(overlaps) > 0
class Coalition:
@staticmethod
def reduce_coalition(coalitions):
if len(coalitions) == 0:
return list()
optimals = list()
adversaries = list(set([c.last_adv for c in coalitions]))
for adv_p in adversaries:
coalitions_with_leader = [c for c in coalitions if c.last_adv == adv_p]
optimal_coalition = max(coalitions_with_leader, key=lambda c: c.get_last_adv_value())
# Option B: split in case of equilibrium
optimal_value = optimal_coalition.get_last_adv_value()
optimals_p = [c for c in coalitions_with_leader if c.get_last_adv_value() == optimal_value]
optimals += optimals_p
return optimals
def __init__(self, payoffs, subcoalitions, last_adv, info):
self.info = info
self.last_adv = last_adv
self.all_metacoalitions = info.get('metacoalitions', dict())
self.subcoalitions = subcoalitions
self.players_count = info.get('players_count', 50)
self.bargain_step = info.get('bargain_step', 50)
self.payoff = np.array(payoffs)
self.players_vector = [int(t > 0) for t in self.payoff]
self.players = [p + 1 for p in range(len(self.players_vector)) if self.players_vector[p] > 0]
self.adversaries_vector = [int(t == 0) for t in self.payoff]
self.adversaries = [p + 1 for p in range(len(self.adversaries_vector)) if self.adversaries_vector[p] > 0]
self.next = list()
self.sig = None
self.sig = self._sig()
def get_last_adv_value(self):
return self.payoff[self.last_adv - 1]
def _sig(self):
if self.sig is None:
self.sig = '_'.join([str(po) for po in self.payoff])
self.sig = f'({self.last_adv})' + self.sig
return self.sig
def __str__(self):
return self.sig
def expand(self):
self.next = list()
returns = list()
for adv_p in self.adversaries:
known_metacoalitions_with_leader = [k for k, mc in self.all_metacoalitions.items() if
adv_p in mc]
for mc_key in known_metacoalitions_with_leader:
mc = self.all_metacoalitions[mc_key]
mc_value = mc.value
payoffs_c = [0] * self.players_count
# Set value of all non-leaders to the bargaining step
for player_j in mc.coalition_members:
if player_j != adv_p:
payoffs_c[player_j - 1] = self.payoff[player_j - 1] + bargain_step
adv_value = mc_value - sum(payoffs_c)
payoffs_c[adv_p - 1] = adv_value
if adv_value > 0:
current_mcs = self.subcoalitions
remain_mcs = [mc_t for mc_t in current_mcs if not mc.check_overlap(mc_t)]
for r_mc in remain_mcs:
for old_player in r_mc.coalition_members:
payoffs_c[old_player - 1] = self.payoff[old_player - 1]
n_coalition = Coalition(payoffs_c, [mc] + remain_mcs, last_adv=adv_p, info=self.info)
self.next.append(n_coalition.sig)
returns.append(n_coalition)
else:
# leader <= 0, coalition not relevant
pass
return returns
def set_edges(self, edges):
self.next = copy(edges)
def get_as_edge(self):
r = [(self.sig, t_next) for t_next in self.next]
return r
def get_all_root(bargain_step, metacoalitions, info):
roots = list()
players = list(range(1, players_count + 1)) # Note, the key of player N, is N+1
for leader_p in players:
known_coalitions_with_leader = [c for c in metacoalitions.values() if leader_p in c.coalition_members]
for coalition_c in known_coalitions_with_leader:
coalition_c_sig = coalition_c.sig
coalition_c_value = coalition_c.value
root = [0] * players_count
# Set value of all non-leaders to the bargaining step
for player_j in coalition_c.coalition_members:
if player_j != leader_p:
root[player_j - 1] = bargain_step
leader_value = coalition_c_value - sum(root)
root[leader_p - 1] = leader_value
if leader_value > 0:
coalition = Coalition(root, [coalition_c], leader_p, info=info)
roots.append(coalition)
return roots
if __name__ == '__main__':
coalitions_value = dict()
coalitions_value['1_2'] = 1000
coalitions_value['2_3'] = 1000
coalitions_value['3_4'] = 1000
coalitions_value['1_4'] = 1000
bargain_step = 10
default_coalition_value = 0
players_count = len(
list(set(itertools.chain.from_iterable([str_to_coalition_keys(t) for t in coalitions_value.keys()]))))
print(f"Players in game: {players_count}")
metacoalitions = [MetaCoalition(k, v, number_of_players=players_count) for k, v in coalitions_value.items()]
metacoalitions = {mc.sig: mc for mc in metacoalitions}
info = dict()
info['metacoalitions'] = metacoalitions
info['players_count'] = players_count
info['bargain_step'] = bargain_step
info['default_coalition_value'] = default_coalition_value
roots = get_all_root(
bargain_step, metacoalitions,
info=info,
)
print(f"Roots detected: {len(roots)}")
# BFS
g = dict()
q = []
visited = dict()
for root in roots:
g[root.sig] = root
q.append(root.sig)
while len(q) > 0:
node_sig = q.pop(0)
node = g[node_sig]
if node_sig in visited:
continue
else:
visited[node_sig] = True
n_nodes = node.expand()
n_nodes = Coalition.reduce_coalition(n_nodes)
node.set_edges([t._sig() for t in n_nodes])
for n_node in n_nodes:
if n_node is None:
continue
elif n_node.sig in visited:
continue
else:
g[n_node.sig] = n_node
q.append(n_node.sig)
# Build graph
nodes = list(g.keys())
edges = list()
for n_node_sig in nodes:
edges += g[n_node_sig].get_as_edge()
OG = nx.DiGraph()
for n_node in nodes:
OG.add_node(n_node, name=n_node)
OG.add_edges_from(edges)
# Find end points
end_points = list()
for node_sig, node in g.items():
e_t = node.get_as_edge()
if e_t is None or len(e_t) == 0:
end_points.append(node_sig)
# Find cycles
cycles = list(nx.algorithms.simple_cycles(OG))
# COLORING
color_map = []
for node in OG:
color = 'BLUE'
for cycle in cycles:
if node in cycle:
color = 'RED'
if node in end_points:
color = 'YELLOW'
color_map += [color]
print(f"Cycles: {len(cycles)}")
print(f"DeadEnds: {len(end_points)}")
possible_endings = len(cycles) + len(end_points)
print(f"Possible endings: {possible_endings}")
# # Plot all graph
# node_size = 500
# font_size = 10
# plt.figure(3, figsize=(12, 12))
#
# nx.draw(G, node_color=color_map, node_size=node_size, with_labels=True)
# plt.show()
# Plot just loops
nodes_sig = list()
if len(cycles) > 0:
nodes_sig = list(set.union(*[set(s) for s in cycles]))
edges = list()
for n_node_sig in nodes_sig:
e = g[n_node_sig].get_as_edge()
e = [et for et in e if et[1] in nodes_sig]
edges += e
G = nx.DiGraph()
for n_node in nodes_sig:
G.add_node(n_node, name=n_node)
G.add_edges_from(edges)
# Print components
components = [list(qt) for qt in nx.strongly_connected_components(G)]
for idx, comp in enumerate(components):
print(f'[{idx + 1}/{len(components)}]\t')
for c in components[0]:
print(c)
# Plot all graph
node_size = 500
font_size = 10
plt.figure(3, figsize=(12, 12))
color_map = []
for node in G:
color = 'ORANGE'
for idx, cycle in enumerate(cycles):
if node in cycle:
color = 'RED'
if node in end_points:
color = 'YELLOW'
color_map += [color]
nx.draw(G, node_color=color_map, node_size=node_size, with_labels=True)
# plt.show()
histdf = pd.DataFrame(index=OG.nodes, columns=range(1, len(components) + 1), data=0)
for comp_idx, comp in enumerate(components):
s_node = np.random.choice(comp)
hist_comp = dict()
hist_comp[s_node] = 1
itrs = 10000
for i in tqdm(range(itrs), desc=f'Random walk on component {comp_idx + 1}/{len(components)}'):
options_edges = list(OG.out_edges(s_node))
if len(options_edges) == 0:
time.sleep(0.1)
print(f"Component {comp_idx + 1} was broken")
break
selected_edge = options_edges[np.random.randint(len(options_edges))]
s_node = selected_edge[1]
hist_comp[s_node] = hist_comp.get(s_node, 0) + 1
sr = pd.Series(hist_comp)
histdf.loc[sr.index, comp_idx + 1] = sr
histdf = histdf[histdf.sum(axis=1) > 0]
histdf_f = histdf / histdf.sum(axis=0)
# adjacency matrix calculations
adj = nx.adjacency_matrix(OG).todense()
adj = adj / adj.sum(axis=1)
def steady_state_prop(p):
dim = p.shape[0]
q = (p - np.eye(dim))
ones = np.ones(dim)
q = np.c_[q, ones]
QTQ = np.dot(q, q.T)
bQT = np.ones(dim)
return np.linalg.solve(QTQ, bQT)
steady_state_matrix = steady_state_prop(adj)
steady_state_matrix = steady_state_matrix.round(4)
eps = 0.00001
ssdf = | pd.Series(index=OG.nodes, data=steady_state_matrix, dtype=float) | pandas.Series |
'''
Module docstring
'''
import sys
import os
from importlib import reload
import tempfile
import string
import pyutilib
import contextlib
from collections import namedtuple
import numbers
import numpy as np
import pandas as pd
import pyomo.environ as po
from pyomo.core.base.objective import SimpleObjective
from pyomo.opt import SolverFactory
import grimsel.auxiliary.maps as maps
import grimsel.auxiliary.timemap as timemap
import grimsel.core.constraints as constraints
import grimsel.core.variables as variables
import grimsel.core.parameters as parameters
import grimsel.core.sets as sets
import grimsel.core.io as io # for class methods
from grimsel import _get_logger
logger = _get_logger(__name__)
def get_random_suffix():
return ''.join(np.random.choice(list(string.ascii_lowercase), 4))
#
#TEMP_DIR = 'grimsel_temp_' + get_random_suffix()
#def create_tempfile(self, suffix=None, prefix=None, text=False, dirc=None):
# """
# Return the absolute path of a temporary filename that is_init_pf_dicts
# guaranteed to be unique. This function generates the file and returns
# the filename.
# """
#
# logger.warn('!!!!!!!!tempfiles.TempfileManagerPlugin.create_tempfile '
# 'is monkey patched!!!!!!!!')
#
# if suffix is None:
# suffix = ''
# if prefix is None:
# prefix = 'tmp'
#
# ans = tempfile.mkstemp(suffix=suffix, prefix=prefix, text=text, dir=dirc)
# ans = list(ans)
# if not os.path.isabs(ans[1]): #pragma:nocover
# fname = os.path.join(dirc, ans[1])
# else:
# fname = ans[1]
# os.close(ans[0])
#
# dirc = TEMP_DIR
#
# if not os.path.isdir(dirc):
# os.mkdir(dirc)
#
# new_fname = os.path.join(dirc, 'grimsel_temp_' + get_random_suffix() + suffix)
# # Delete any file having the sequential name and then
# # rename
# if os.path.exists(new_fname):
# os.remove(new_fname)
# fname = new_fname
#
# self._tempfiles[-1].append(fname)
# return fname
#
#import pyutilib.component.config.tempfiles as tempfiles
#tempfiles.TempfileManagerPlugin.create_tempfile = create_tempfile
reload(constraints)
reload(variables)
reload(parameters)
reload(sets)
class ModelBase(po.ConcreteModel, constraints.Constraints,
parameters.Parameters, variables.Variables, sets.Sets):
# class attributes as defaults for presolve_fixed_capacities
list_vars = [('var_yr_cap_pwr_rem', 'cap_pwr_rem'),
('var_yr_cap_pwr_new', 'cap_pwr_new')]
list_constr_deact = ['set_win_sol']
# db = get_config('sql_connect')['db']
def __init__(self, **kwargs):
'''
Keyword arguments:
nhours -- time resolution of the model, used for profile scaling
sc_warmstart -- input database schema for presolving
slct_node -- limit node selection
slct_encar -- limit to energy carrier selection
skip_runs -- boolean; if True, solver calls are skipped, also
stops the IO instance from trying to write the model
variables.
'''
super(ModelBase, self).__init__() # init of po.ConcreteModel
defaults = {'slct_node': [],
'slct_pp_type': [],
'slct_node_connect': [],
'slct_encar': [],
'nhours': 1,
'unq_code': '',
'mps': None,
'tm_filt': False,
'verbose_solver': True,
'constraint_groups': None,
'symbolic_solver_labels': False,
'skip_runs': False,
'nthreads': False,
'keepfiles': True,
'tempdir': None}
for key, val in defaults.items():
setattr(self, key, val)
self.__dict__.update(kwargs)
self._check_contraint_groups()
logger.info('self.slct_encar=' + str(self.slct_encar))
logger.info('self.slct_pp_type=' + str(self.slct_pp_type))
logger.info('self.slct_node=' + str(self.slct_node))
logger.info('self.slct_node_connect=' + str(self.slct_node_connect))
logger.info('self.nhours=' + str(self.nhours))
logger.info('self.constraint_groups=' + str(self.constraint_groups))
self.warmstartfile = self.solutionfile = None
# attributes for presolve_fixed_capacities
self.list_vars = ModelBase.list_vars
self.list_constr_deact = ModelBase.list_constr_deact
# def _update_slct_lists(self):
# ''''''
# for attr, series in ((self.slct_encar, self.df_def_encar.ca),
# (self.slct_encar_id, self.df_def_encar.ca_id),
# (self.slct_pp_type_id, self.df_def_pp_type.pt),
# (self.slct_pp_type_id, self.df_def_pp_type.pt),
# (self.slct_node, self.df_def_node.nd)):
# if not attr:
# attr = series.tolist()
def build_model(self):
'''
Call the relevant model methods to get everything set up.
This consists in:
1. call self.get_setlst (in Sets mixin class) to initialize
the self.setlst dictionary
2. call self.define_sets (in Sets mixin class) to initialize
Pyomo set objects
3. call self.define_parameters (in Parameters mixin class)
4. call self.define_variables (in Variables mixin class)
5. call self.add_all_constraints
6. call self.init_solver
.. note::
io needs to have loaded all data, i.e. set the ModelBase
DataFrames.
'''
self.get_setlst()
self.define_sets()
self.define_parameters()
if not self.skip_runs:
self.define_variables()
self.add_all_constraints()
self.init_solver()
@classmethod
def get_constraint_groups(cls, excl=None):
'''
Returns list names of methods defining constraint groups.
This classmethod can also be used to define the constraint_groups
parameter to initialize the ModelBase object by selecting certain
groups to be excluded.
Parameters
----------
excl : list
exclude certain group names from the returned list
Returns
-------
list
Names of constraint groups. These correspond to the
methods in the :class:`Constraints` class without the prefix
`add_` and the suffix `_rules`
'''
cg_lst = [mth.replace('add_', '').replace('_rules', '')
for mth in dir(cls)
if mth.startswith('add_') and 'rule' in mth]
if excl:
cg_lst = [cg for cg in cg_lst if not cg in excl]
return cg_lst
def _check_contraint_groups(self):
'''
Verification and completion of the constraint group selection.
Verifies constraint groups if the ``constraint_groups`` argument
is not None. Otherwise it gathers all accordingly named
methods from the class attributes and populates the list thusly.
Raises
------
ValueError
If the :class:`ModelBase` instance attribute ``constraint_groups``
contains invalid entries.
'''
cg_options = self.get_constraint_groups()
if self.constraint_groups is None:
self.constraint_groups = cg_options
else:
# get invalid constraint groups in input
nv = [cg for cg in self.constraint_groups
if not cg in cg_options]
if nv:
estr = ('Invalid constraint group(s): {nv}.'
+ '\nPossible choices are:\n{cg}'
).format(nv=', '.join(nv), cg=',\n'.join(cg_options))
raise ValueError(estr)
def add_all_constraints(self):
'''
Call all selected methods from the constraint mixin class.
Loops through the `constraint_groups` list and calls the corresponding
methods in the :class:`.Constraints` mixing class.
'''
for cg in set(self.constraint_groups):
logger.info('##### Calling constraint group {}'.format(cg.upper()))
getattr(self, 'add_%s_rules'%cg)()
def _limit_prof_to_cap(self):
if len(self.chp) > 0:
self.limit_prof_to_cap()
def limit_prof_to_cap(self, param_mod='cap_pwr_leg'):
'''
Make sure CHP profiles don't ask for more power than feasible.
This operates on the parameters and is called before each model run.
'''
logger.info('Limiting chp profiles to cap_pwr_leg')
# get list of plants relevant for chp from corresponding set
pp_chp = self.setlst['chp']
df_chpprof = io.IO.param_to_df(self.chpprof, ('sy', 'nd_id', 'ca_id'))
df_erg_chp = io.IO.param_to_df(self.erg_chp, ('pp_id', 'ca_id'))
df_erg_chp = df_erg_chp.loc[df_erg_chp.pp_id.isin(pp_chp)]
df_erg_chp['nd_id'] = df_erg_chp.pp_id.replace(self.mps.dict_plant_2_node_id)
# outer join profiles and energy to get a profile for each fuel
df_chpprof_tot = pd.merge(df_erg_chp.rename(columns={'value': 'erg'}),
df_chpprof.rename(columns={'value': 'prof'}),
on=['nd_id', 'ca_id'])
# scale profiles
df_chpprof_tot['prof_sc'] = df_chpprof_tot['erg'] * df_chpprof_tot['prof']
# get capacities from parameter
df_cap_pwr_leg = io.IO.param_to_df(self.cap_pwr_leg, ('pp_id', 'ca_id'))
# keep only chp-related fuels
df_cap_pwr_leg = df_cap_pwr_leg.loc[df_cap_pwr_leg.pp_id.isin(self.chp)]
# pivot_by fl_id
df_cappv = df_cap_pwr_leg.pivot_table(values='value',
index=['ca_id', 'pp_id'],
aggfunc=np.sum)['value']
# rename
df_cappv = df_cappv.rename('cap').reset_index()
# add capacity to profiles
df_chpprof_tot = pd.merge(df_cappv, df_chpprof_tot, on=['ca_id', 'pp_id'])
# find occurrences of capacity zero and chp erg non-zero
df_slct = df_chpprof_tot[['pp_id', 'ca_id', 'cap', 'erg']].drop_duplicates().copy()
df_slct = df_slct.loc[df_slct.cap.isin([0])
& -df_slct.erg.isin([0])]
str_erg_cap = ''
if len(df_slct > 0):
for nrow, row in df_slct.iterrows():
str_erg_cap += 'pp_id=%d, ca_id=%d: cap_pwr_leg=%f, erg_chp=%f\n'%tuple(row.values)
raise ValueError ('limit_prof_to_cap: one or more cap_pwr_leg are zero '
'while erg_chp is greater 0: \n' + str_erg_cap)
# find occurrences of capacity violations
mask_viol = df_chpprof_tot.prof_sc > df_chpprof_tot.cap
if mask_viol.sum() == 0:
logger.info('ok, nothing changed.')
else:
# REPORTING
df_profviol = df_chpprof_tot.loc[mask_viol]
dict_viol = df_profviol.pivot_table(index=['pp_id', 'ca_id'],
values='sy', aggfunc=len)['sy'].to_dict()
for kk, vv in dict_viol.items():
logger.warning('limit_prof_to_cap: \n(pp, ca)='
'{}: {} violations'.format(kk, vv))
logger.warning('limit_prof_to_cap: Modifing model '
'parameter ' + param_mod)
if param_mod == 'chpprof':
df_profviol['prof'] *= 0.999 * df_chpprof_tot.cap / df_chpprof_tot.prof_sc
dict_chpprof = (df_profviol.pivot_table(index=['sy', 'nd_id', 'ca_id'],
values='prof', aggfunc=min)['prof']
.to_dict())
for kk, vv in dict_chpprof.items():
self.chpprof[kk] = vv
elif param_mod == 'cap_pwr_leg':
# calculate capacity scaling factor
df_capsc = df_profviol.pivot_table(index=['pp_id', 'ca_id'],
values=['cap', 'prof_sc'], aggfunc=np.max)
df_capsc['cap_sc'] = df_capsc.prof_sc / df_capsc.cap
# merge scaling factor with capacity table
df_cap_pwr_leg = df_cap_pwr_leg.join(df_capsc,
on=df_capsc.index.names)
df_cap_pwr_leg = df_cap_pwr_leg.loc[-df_cap_pwr_leg.cap_sc.isnull()]
# apply scaling factor to all capacity with the relevant fuel
df_cap_pwr_leg['cap'] *= df_cap_pwr_leg.cap_sc * 1.0001
# dictionary
dict_cap_pwr_leg = df_cap_pwr_leg.set_index(['pp_id', 'ca_id'])['cap']
dict_cap_pwr_leg = dict_cap_pwr_leg.to_dict()
for kk, vv in dict_cap_pwr_leg.items():
self.cap_pwr_leg[kk] = vv
def _init_pf_dicts(self):
'''
Initializes dicts mapping the profile ids to other model ids.
This results in dictionaries which are assigned as :class:`ModelBase`
instance attributes:
* ``dict_pricesll_pf``: (fl_id, nd_id, ca_id) |rarr| (pricesll_pf_id)
* ``dict_pricebuy_pf``: (fl_id, nd_id, ca_id) |rarr| (pricebuy_pf_id)
* ``dict_dmnd_pf``: (nd_id, ca_id) |rarr| (dmnd_pf_id)
* ``dict_supply_pf``: (pp_id, ca_id) |rarr| (supply_pf_id)
Purpose
---------
The resulting dictionaries are used for filtering the profile tables
in the :module:`io` module and to access the profile parameters
in the model :class:`Constraints`.
'''
list_pf = [(self.df_fuel_node_encar,
['fl_id', 'nd_id', 'ca_id'], 'pricebuy'),
(self.df_fuel_node_encar,
['fl_id', 'nd_id', 'ca_id'], 'pricesll'),
(self.df_node_encar,
['nd_id', 'ca_id'], 'dmnd'),
(self.df_plant_encar,
['pp_id', 'ca_id'], 'supply')]
df, ind, name = list_pf[-1]
for df, ind, name in list_pf:
col = '%s_pf_id'%name
if df is not None and col in df.columns:
ind_df = df.loc[~df[col].isna()].set_index(ind)[col]
dct = ind_df.to_dict()
else:
dct = {}
setattr(self, 'dict_%s_pf'%name, dct)
def translate_pf_id(self, df):
'''
Adds model id columns for the profile ids in the input DataFrame.
Searches vars(self) for the pf_dict corresponding to the pf_ids
in the input DataFrame. Then uses this dictionary to add additional
columns to the output table.
Parameters
----------
df (DataFrame): DataFrame with pf_id column.
Returns
-------
:obj:`pandas.DataFrame`
Input DataFrame with added model ids corresponding to the pf_id.
Raises
------
IndexError: If multiple pf dictionaries correspond to the pf_id
values in the input DataFrame.
IndexError: If no pf dictionary can be found for the pf_id values.
'''
# identify corresponding pf dict
list_pf_id = set(df.pf_id.unique().tolist())
pf_arrs = {name_dict:
list_pf_id
.issubset(set(getattr(self, name_dict).values()))
for name_dict in vars(self)
if name_dict.startswith('dict_')
and name_dict.endswith('_pf')}
if sum(pf_arrs.values()) > 1:
raise ValueError('Ambiguous pf array in translate_pf_id '
'or df empty.')
elif sum(pf_arrs.values()) == 0:
raise ValueError('No pf array found for table with columns '
'%s'%df.columns.tolist() + '. Maybe you are '
'trying to translate a table with pf_ids which '
'are not included in the original model.')
else:
pf_dict = {val: key for key, val in pf_arrs.items()}[True]
new_cols = {'dict_pricesll_pf': ['fl_id', 'nd_id', 'ca_id'],
'dict_pricebuy_pf': ['fl_id', 'nd_id', 'ca_id'],
'dict_price_pf': ['fl_id', 'nd_id', 'ca_id'],
'dict_dmnd_pf': ['nd_id', 'ca_id'],
'dict_supply_pf': ['pp_id', 'ca_id']}[pf_dict]
pf_dict = getattr(self, pf_dict)
df_new = | pd.Series(pf_dict) | pandas.Series |
import influxdb
import pandas as pd
class InfluxDBInput():
def __init__(self, config_dict):
self.client = influxdb.InfluxDBClient(**config_dict)
def run_query(self, field, measurement, tags, pagesize=10000):
collect = []
times = []
values = []
q = True
pagenum = 0
# Single quotes around tags might not always work
tag_str = ' AND '.join(["{key}='{value}'".format(key=key, value=value) for key, value
in tags.items()])
while q:
q = self.client.query(("SELECT {field} FROM {measurement} WHERE {tags} "
"LIMIT {pagesize} OFFSET {page}")
.format(field=field, measurement=measurement, tags=tag_str,
pagesize=pagesize, page=pagenum*pagesize))
if q:
collect.append(q[measurement])
pagenum += 1
for resultset in collect:
for reading in resultset:
times.append(reading['time'])
values.append(reading[field])
s = pd.Series(values, index=times)
s.index = pd.to_datetime(s.index)
return s
def run(self, ingredient_dict):
return self.run_query(**ingredient_dict)
class InfluxDBOutput():
def __init__(self, config_dict):
self.client = influxdb.InfluxDBClient(**config_dict)
def _stringify_dataframe(self,
dataframe,
numeric_precision,
datatype='field',
upcast_to_float=True):
# Find int and string columns for field-type data
int_columns = dataframe.select_dtypes(include=['integer']).columns
string_columns = dataframe.select_dtypes(include=['object']).columns
# Convert dataframe to string
if numeric_precision is None:
# If no precision specified, convert directly to string (fast)
dataframe = dataframe.astype(str)
elif numeric_precision == 'full':
# If full precision, use repr to get full float precision
float_columns = (dataframe.select_dtypes(include=['floating'])
.columns)
nonfloat_columns = dataframe.columns[~dataframe.columns.isin(
float_columns)]
dataframe[float_columns] = dataframe[float_columns].applymap(repr)
dataframe[nonfloat_columns] = (dataframe[nonfloat_columns]
.astype(str))
elif isinstance(numeric_precision, int):
# If precision is specified, round to appropriate precision
float_columns = (dataframe.select_dtypes(include=['floating'])
.columns)
nonfloat_columns = dataframe.columns[~dataframe.columns.isin(
float_columns)]
dataframe[float_columns] = (dataframe[float_columns]
.round(numeric_precision))
# If desired precision is > 10 decimal places, need to use repr
if numeric_precision > 10:
dataframe[float_columns] = (dataframe[float_columns]
.applymap(repr))
dataframe[nonfloat_columns] = (dataframe[nonfloat_columns]
.astype(str))
else:
dataframe = dataframe.astype(str)
else:
raise ValueError('Invalid numeric precision.')
if datatype == 'field':
# If dealing with fields, format ints and strings correctly
if not upcast_to_float:
dataframe[int_columns] = dataframe[int_columns] + 'i'
dataframe[string_columns] = '"' + dataframe[string_columns] + '"'
dataframe.columns = dataframe.columns.values.astype(str)
return dataframe
def _convert_series_to_lines(self,
measurement,
fields,
tags=None,
field_names=[],
tag_names=[],
global_tags={},
time_precision=None,
numeric_precision=None):
if isinstance(fields, pd.Series):
if not (isinstance(fields.index, pd.PeriodIndex) or
isinstance(fields.index, pd.DatetimeIndex)):
raise TypeError('Must be Series with DatetimeIndex or \
PeriodIndex.')
elif isinstance(fields, list):
for field in fields:
if not isinstance(field, pd.Series):
raise TypeError('Must be Series, but type was: {0}.'
.format(type(field)))
if not (isinstance(field.index, pd.tseries.period.PeriodIndex) or
isinstance(field.index, pd.tseries.index.DatetimeIndex)):
raise TypeError('Must be Series with DatetimeIndex or \
PeriodIndex.')
precision_factor = {
"n": 1,
"u": 1e3,
"ms": 1e6,
"s": 1e9,
"m": 1e9 * 60,
"h": 1e9 * 3600,
}.get(time_precision, 1)
# Make array of timestamp ints
# TODO: Doesn't support multiple fields
time = ((pd.to_datetime(fields.index).values.astype(int) /
precision_factor).astype(int).astype(str))
# If tag columns exist, make an array of formatted tag keys and values
if tags is not None:
if isinstance(tags, pd.Series):
tag_df = pd.DataFrame(list(zip(tags)), columns=[tag_names])
elif isinstance(tags, list):
tag_df = pd.DataFrame(list(zip(*tags)), columns=tag_names)
else:
print(type(tags))
raise ValueError
tag_df = self._stringify_dataframe(
tag_df, numeric_precision, datatype='tag')
tags = (',' + (
(tag_df.columns.values + '=').tolist() + tag_df)).sum(axis=1)
del tag_df
else:
tags = ''
# Make an array of formatted field keys and values
if isinstance(fields, pd.Series):
field_df = pd.DataFrame(list(zip(fields)), columns=[field_names])
elif isinstance(fields, list):
field_df = pd.DataFrame(list(zip(*fields)), columns=field_names)
field_df = self._stringify_dataframe(
field_df, numeric_precision, datatype='field')
field_df = (field_df.columns.values + '=').tolist() + field_df
field_df[field_df.columns[1:]] = ',' + field_df[field_df.columns[1:]]
fields = field_df.sum(axis=1)
del field_df
# Add any global tags to formatted tag strings
if global_tags:
global_tags = ','.join(['='.join([tag, global_tags[tag]])
for tag in global_tags])
if tags is not None:
tags = tags + ',' + global_tags
else:
tags = ',' + global_tags
# Generate line protocol string
points = (measurement + tags + ' ' + fields + ' ' + time).tolist()
return points
def _convert_dataframe_to_lines(self,
dataframe,
measurement,
field_columns=[],
tag_columns=[],
global_tags={},
time_precision=None,
numeric_precision=None):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.tseries.period.PeriodIndex) or
isinstance(dataframe.index, pd.tseries.index.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or \
PeriodIndex.')
# Create a Series of columns for easier indexing
column_series = | pd.Series(dataframe.columns) | pandas.Series |
__author__ = "<NAME>"
__project__ = "gme.estimate"
__created__ = "05-07-2018"
__all__ = ['format_regression_table']
from typing import List
import pandas as pd
def format_regression_table(results_dict: dict = None,
variable_list: List[str] = [],
format: str = 'txt',
se_below: bool = True,
significance_levels: List[float] = [0.01, 0.05, 0.10],
round_values: int = 3,
omit_fe_prefix: List[str] = [],
table_columns: list = [],
path: str = None,
include_index: bool = False,
latex_syntax: bool = False,
r_squared: bool = False,
note: str = None):
'''
Format estimation results into a standard table format with options for significance stars, LaTeX syntax, standard
error positioning, rounding, fixed effect ommission, and others options.
Args:
results_dict: Dict[statsmodels.genmod.generalized_linear_model.GLMResultsWrapper]
A dictionary of GLM fit objects from statsmodels
variable_list: (optional) List[str]
A list of variables to include in the results table. If none are provided, all variables are included. The
default is an empty list, which results in the inclusion of all estimated variables.
format: str
Determines the file formatting of text. Accepts 'tex' for LaTeX, 'txt' for plain text, or 'csv' for a
csv table. Default is 'txt'.
se_below: bool
If True, standard errors are presented below estimates. If False, they are presented in a column to the
right. The default is True.
significance_levels: List[float]
A list specifying the three percentages, from lowest to highest, on which to base significance stars. The
default value is [0.01, 0.05, 0.10].
round_values: int
The number of decimal points to include in the reported figures. The default is 3.
omit_fe_prefix: (optional) List[str]
A list of strings such that any variable starting with that string are omitted from the created table. The
value is an empty list that omits no variables.
table_columns: (optional) List[str]
A list of keys from the results_dict to be included in the created table. The default is an empty list, which
results in all values being created
path: (optional) str
A system path and file name to write the created table to. File extensions of .txt (format = 'txt'),
.tex or .txt (format = 'tex'), or .csv (format = 'csv') are recommended.
include_index: bool
If true, the outputed .csv file will contain row numbers. Default is False.
latex_syntax: bool
If true, the table will include LaTeX syntax, regardless of the chosen format. Default is False
variable_order: (optional) List[str]
If supplied, provides an specific ordering in which to list the variables in the table.
r_squared: bool
If True, it includes R^2 values in the table. This is primarily useful if OLS regression results are
supplied. Default is False.
note: (optional) str
Adds the supplied string as a note at the bottom of the table.
Returns: Pandas.DataFrame
A DataFrame containing the formatted results table with specified syntax.
Examples:
Create a .csv file.
>>> sample_estimation_model.format_regression_table(format = 'csv',
path = "c:\folder\saved_results.csv")
Create a LaTeX .tex table without fixed effects (with prefix 'imp_fe_' and 'exp_fe_')
>>> sample_estimation_model.format_regression_table(format = 'tex',
path = "c:\folder\saved_results.tex",
omit_fe_prefix = ['imp_fe_' , 'exp_fe_'])
'''
if results_dict is None:
raise ValueError('Must input a dictionary of regression (GLM.fit) objects')
if len(table_columns) == 0:
table_columns = list(results_dict.keys())
else:
for column in table_columns:
if column not in list(results_dict.keys()):
raise ValueError('Specified column {0} in table_columns is not a key in results_dict.'.format(column))
formatted_dict = {}
for key in table_columns:
results = results_dict[key]
if se_below is True:
compiled_results = pd.DataFrame(columns=['Variable', str(key)])
if se_below is False:
compiled_results = pd.DataFrame(columns=['Variable', str(key), (str(key) + ' SE')])
if len(variable_list) == 0:
variable_list_current = results.params.index
else:
variable_list_current = pd.Series(variable_list)
if len(omit_fe_prefix) > 0:
for prefix in omit_fe_prefix:
variable_list_current = variable_list_current[~variable_list_current.str.startswith(prefix)]
for variable in variable_list_current:
# Add significance stars to coefficient
beta = str(round(results.params[variable], round_values))
while (len(beta) - beta.index('.') - 1) < round_values: # pad trailing zeros if dropped
beta = beta + '0'
if format == 'tex' or latex_syntax is True:
if results.pvalues[variable] < significance_levels[0]:
formatted_coeff = beta + '$^{***}$'
if (results.pvalues[variable] < significance_levels[1]) & \
(results.pvalues[variable] >= significance_levels[0]):
formatted_coeff = beta + '$^{**}$'
if (results.pvalues[variable] < significance_levels[2]) & \
(results.pvalues[variable] >= significance_levels[1]):
formatted_coeff = beta + '$^{*}$'
if results.pvalues[variable] >= significance_levels[2]:
formatted_coeff = beta
else:
if results.pvalues[variable] < significance_levels[0]:
formatted_coeff = beta + '***'
if (results.pvalues[variable] < significance_levels[1]) & \
(results.pvalues[variable] >= significance_levels[0]):
formatted_coeff = beta + '**'
if (results.pvalues[variable] < significance_levels[2]) & \
(results.pvalues[variable] >= significance_levels[1]):
formatted_coeff = beta + '*'
if results.pvalues[variable] >= significance_levels[2]:
formatted_coeff = beta
# Format standard error
std_err = str(round(results.bse[variable], round_values))
while (len(std_err) - std_err.index('.') - 1) < round_values: # pad trailing zeros if dropped
std_err = std_err + '0'
formatted_se = '(' + std_err + ')'
if se_below is False:
row = pd.DataFrame({'Variable': variable,
str(key): formatted_coeff,
(str(key) + ' SE'): formatted_se},
index=[('a_' + variable)])
compiled_results = | pd.concat([compiled_results, row], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_fillna(self):
self.tsframe.ix[:5, 'A'] = nan
self.tsframe.ix[-5:, 'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5, 'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5, 'A']).all())
self.assertTrue((padded.ix[-5:, 'A'] == padded.ix[-5, 'A']).all())
# mixed type
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
mf.ix[-10:, 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])
result = df.fillna({2: 'foo'})
expected = DataFrame([['a', 'a', 'foo', 'a'],
['b', 'b', 'foo', 'b'],
['c', 'c', 'foo', 'c']])
assert_frame_equal(result, expected)
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0, 'Date2'])
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.get_dtype_counts().sort_values()
expected = Series({'object': 5})
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = result.get_dtype_counts().sort_values()
expected = Series({'int64': 5})
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3), columns=['A', 'B'], dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan', index=lrange(3), columns=['A', 'B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
for v in ['', 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=pd.date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar'] * 5},
index=list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:, 0].fillna,
self.frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_fill_corner(self):
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20, 'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = DataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
assert_frame_equal(res, exp)
class TestDataFrameInterpolate(tm.TestCase, TestData):
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
| tm._skip_if_no_scipy() | pandas.util.testing._skip_if_no_scipy |
f"""基本操作"""
import json
import re
import threading
from collections import defaultdict
from io import BytesIO
from itertools import count as iter_count
from bson import ObjectId
import jieba
import pandas
import json
import itertools
from opencc import OpenCC
import pykakasi
from transliterate import translit
from PyMongoWrapper import F, QueryExprParser
from PyMongoWrapper.dbo import DbObject, DbObjectCollection
from .utils import execute_query_expr, language_iso639
from models import Paragraph, Collection, parser, db
from pipeline import PipelineStage
class Passthrough(PipelineStage):
"""直接通过
"""
def resolve(self, p : Paragraph) -> Paragraph:
return p
class TradToSimpChinese(PipelineStage):
"""繁体中文转为简体中文
"""
t2s = OpenCC('t2s')
def resolve(self, p: Paragraph) -> Paragraph:
p.content = TradToSimpChinese.t2s.convert(p.content)
if p.lang == 'cht': p.lang = 'chs'
return p
class JiebaCut(PipelineStage):
"""使用结巴分词生成检索词
"""
def resolve(self, p: Paragraph) -> Paragraph:
p.tokens = list(jieba.cut_for_search(p.content) if self.for_search else jieba.cut(p.content))
return p
class WordStemmer(PipelineStage):
"""附加词干到 tokens 中(需要先进行切词)
"""
_language_stemmers = {}
@staticmethod
def get_stemmer(lang):
from nltk.stem.snowball import SnowballStemmer
if lang not in WordStemmer._language_stemmers:
stemmer = SnowballStemmer(language_iso639.get(lang, lang).lower())
WordStemmer._language_stemmers[lang] = stemmer
return WordStemmer._language_stemmers[lang]
def __init__(self, append=True):
"""
Args:
append (bool): 将词干添加到结尾,否则直接覆盖
"""
self.append = append
def resolve(self, p : Paragraph) -> Paragraph:
tokens = [WordStemmer.get_stemmer(p.lang).stem(_) for _ in p.tokens]
if self.append:
p.tokens += tokens
else:
p.tokens = tokens
return p
class WordCut(PipelineStage):
"""多语种分词
"""
t2s = OpenCC('t2s')
kks = pykakasi.kakasi()
stmr = WordStemmer(append=True)
def __init__(self, for_search=False, **kwargs):
"""
Args:
for_search (bool): 是否用于搜索(添加冗余分词结果或西文词干/转写)
"""
self.for_search = for_search
def resolve(self, p: Paragraph) -> Paragraph:
if p.lang == 'cht':
p.content = WordCut.t2s.convert(p.content)
if p.lang in ('chs', 'cht'):
p.tokens = list(jieba.cut_for_search(p.content) if self.for_search else jieba.cut(p.content))
elif p.lang == 'ja':
p.tokens = []
for i in WordCut.kks.convert(p.content):
p.tokens.append(i['orig'])
if self.for_search: p.tokens.append(i['hepburn'])
else:
p.tokens = [_.lower() for _ in re.split(r'[^\w]', p.content)]
if self.for_search:
WordCut.stmr.resolve(p)
if self.for_search and p.lang == 'ru':
p.tokens += [translit(_, 'ru', reversed=True).lower() for _ in p.tokens]
return p
class KeywordsFromTokens(PipelineStage):
"""将词袋中的分词结果加入到检索词中并删除词袋
"""
def resolve(self, p: Paragraph) -> Paragraph:
p.keywords = list(set(p.tokens))
delattr(p, 'tokens')
p.save()
return p
class FilterPunctuations(PipelineStage):
"""过滤标点符号
"""
re_punctuations = re.compile(r'[,。「」·;□■•●『』[]【】()\s\(\)、“”‘’《》——\-!?\.\?\!\,\'\":\/\\\n\u3000…]')
def resolve(self, p: Paragraph) -> Paragraph:
p.content = FilterPunctuations.re_punctuations.sub('', p.content)
return p
class AccumulateParagraphs(PipelineStage):
"""将遍历的段落保存起来以备下一步骤使用(通常用于导出)
"""
def __init__(self):
self.paragraphs = []
self.lock = threading.Lock()
def resolve(self, p : Paragraph):
with self.lock:
self.paragraphs.append(p)
def summarize(self, *args):
return self.paragraphs
class Export(PipelineStage):
"""结果导出为文件
"""
def __init__(self, format='xlsx', limit=0) -> None:
"""导出结果
Args:
format (xlsx|json|csv): 输出格式。
limit (int, optional): 最多导出的记录数量,0表示无限制。
"""
self.format = format
self.limit = limit
def summarize(self, r):
def json_dump(v):
try:
return json.dump(v)
except:
return str(v)
r = [_.as_dict() if isinstance(_, DbObject) else _ for _ in r ]
if self.format == 'json':
return {
'__file_ext__': 'json',
'data': json_dump(r).encode('utf-8')
}
elif self.format == 'csv':
b = BytesIO()
| pandas.DataFrame(r) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 18:09:41 2019
@author: shuxinyu
"""
import numpy as np
import pandas as pd
import random
from sklearn.feature_selection import chi2, VarianceThreshold, SelectKBest
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore')
AUC_record= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = | Index(['one', 'two']) | pandas.Index |
# This script creates the final pollution data set for the road networks and pollution project
# Importing required modules
import pandas as pd
# Defining username + filepaths
username = ''
filepath = 'C:/Users/' + username + '/Documents/Data/road_networks/'
# Reading in the raw pollution data
o3 = pd.read_csv(filepath + 'data/pollution_data/ozone_data.csv')
pm = pd.read_csv(filepath + 'data/pollution_data/pm_data.csv')
pm10 = | pd.read_csv(filepath + 'data/pollution_data/pm10_data.csv') | pandas.read_csv |
# Authors
# * <NAME>
# * <NAME>
# * <NAME>
#############
## Imports ##
#############
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from tensorflow.keras.preprocessing import timeseries_dataset_from_array
from sklearn.multioutput import MultiOutputRegressor, RegressorChain
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.compose import TransformedTargetRegressor
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from pandas.plotting import register_matplotlib_converters
from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from statsmodels.tsa.seasonal import STL
from IPython.display import display
from matplotlib import pyplot as plt
from IPython.display import display
from itertools import product
from datetime import datetime
from tsa_benchmarks import *
from tsa_metrics import *
from tsa_wrappers import *
from tsa_preprocessing import *
import lightgbm as lgb
import seaborn as sns
import pandas as pd
import numpy as np
import warnings
import pickle
import tqdm
import json
warnings.filterwarnings("ignore")
register_matplotlib_converters()
sns.set_style('darkgrid')
np.set_printoptions(precision=4)
pd.set_option('precision', 4)
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# pd.set_option('display.max_colwidth', None)
###############
## Functions ##
###############
def score_reveal(methods: dict) -> DataFrame:
return (
pd.DataFrame({
method: {
col: np.mean(score_list)
for col, score_list
in info.items()
if col not in ['meta', 'model']
}
for method, info
in methods.items()
}))
def mslt(ts, s=[12], plot=False):
components = {'Data': ts}
series = ts.copy()
for t in s:
res = STL(
series, period=t, seasonal=t if t % 2 else t+1, robust=True).fit()
components[f'Trend'] = res.trend
components[f'Seasonal{t}'] = res.seasonal
series = res.trend + res.resid
components[f'Remainder'] = res.resid
res = pd.DataFrame(components)
if plot:
res.plot(
subplots=True, layout=(-1, 1), figsize=(12, 10), color='k',
title=[*res.columns], legend=False)
plt.tight_layout()
return res
def rateMyForecast(
train: DataFrame,
test: DataFrame,
forecast: DataFrame) -> DataFrame:
"""
Evalute the forcast per group, given train, test, and forecast tables.
The function evaluates the metrics per column of the provided table.
Parameters
----------
train : DataFrame
DataFrame contaning the train set.
test : DataFrame
DataFrame contaning the test set.
forecast : DataFrame
DataFrame contaning the forecast set.
Returns
-------
DataFrame
DataFrame contaning the metrics as columns, groups as rows,
and scores as values.
"""
res = pd.DataFrame([
{'Group': col,
'RMSE': rmse(np.array(test[col]), np.array(forecast[col])),
'MAE': mae(np.array(test[col]), np.array(forecast[col])),
'MASE': mase(np.array(test[col]), np.array(forecast[col]), np.array(train[col])),
'RMSSE': rmsse(np.array(test[col]), np.array(forecast[col]), np.array(train[col]))}
for col in test])
return res.set_index('Group')
def compute_bottomup(df_orig, df_pred, lvl_pred):
"""Pre-processes the original data by level and returns
a dictionary of RMSSEs for each time series in each level.
Parameters
----------
df_orig : DataFrame
DataFrame contaning the original data (index=date, columns=hts).
df_pred : DataFrame
DataFrame contaning the predictions using best model (index=date, columns=hts).
lvl_pred : int
Specified hierarchical level of the df_pred.
Returns
-------
res_bylvl : DataFrame
Nested dictionary of RMSSEs per time series per level
"""
levels = json.loads(open('levels.json', 'r').read())
res_bylvl = {}
lvl_preds = list(sorted(range(2, lvl_pred), reverse=True))
for x in list(sorted(range(1, lvl_pred), reverse=True)):
if x in lvl_preds:
orig = df_orig.sum(level=[levels[str(x)]], axis=1)
pred = df_pred.sum(level=[levels[str(x)]], axis=1)
else:
orig = df_orig.sum(level=levels[str(x)], axis=1)
pred = df_pred.sum(level=levels[str(x)], axis=1)
# Test and Train Split
train = orig.iloc[:1913, ]
test = orig.iloc[1913:, ]
# Initialize res dictionary by column
res_bycol = {}
if x in lvl_preds:
for col in orig.columns:
res_bycol[col] = rmsse(test[col], pred[col], train[col])
else:
res_bycol['Total'] = rmsse(test, pred, train)
res_bylvl[x] = res_bycol
return res_bylvl
def compute_topdown(df_full, df_pred, lvl_pred, approach='AHP'):
"""Pre-processes the original data by level and returns
a dictionary of RMSSEs for each time series in each level.
Parameters
----------
df_orig : DataFrame
DataFrame contaning the original data (index=date, columns=hts).
df_pred : DataFrame
DataFrame contaning the predictions using best model (index=date, columns=hts).
lvl_pred : int
Specified hierarchical level of the df_pred.
Returns
-------
res_bylvl : DataFrame
Nested dictionary of RMSSEs per time series per level
"""
levels = json.loads(open('levels.json', 'r').read())
lvl_preds = list(levels.keys())[9:]
ldf_pred_tot = df_pred.sum(axis=1)
if approach == 'AHP':
res_bylvl = {}
forc_bylvl = {}
for x in tqdm.tqdm(lvl_preds):
propors = {}
next_lvl_forc = {}
res_bycol = {}
lvl = df_full.sum(level=levels[x], axis=1)
# Test and Train Split
train = lvl.iloc[:1913, ]
test = lvl.iloc[1913:, ]
for col in tqdm.tqdm(lvl.columns.tolist()):
propors[col] = sum(lvl[col]/lvl.sum(axis=1)) * (1/len(lvl))
next_lvl_forc[col] = ldf_pred_tot * propors[col]
res_bycol[col] = (rmsse(test[col],
next_lvl_forc[col],
train[col]))
forc_bylvl[x] = next_lvl_forc
res_bylvl[x] = res_bycol
return res_bylvl
#############################################
## Model Selection ##
#############################################
class EndogenousTransformer(BaseEstimator, TransformerMixin):
"""
Transform a univariate `X` into `X_train` of leght `w` and
`y_train` of length `h`. The total no. of data points will be:
>>> len(X) - w - h + 1
By default returns X, and y but can be configured to return
only `X` or `y`.
This runs on `TimeseriesGenerator` backend.
"""
def __init__(
self, w: int,
h: int,
return_X: bool = True,
return_y: bool = True,
reshape: bool = False) -> None:
"""Initializes the transformer"""
self.w = w
self.h = h
self.return_X = return_X
self.return_y = return_y
self.reshape = reshape
def fit(self, X, y=None):
self.X = X if not self.reshape else np.array(X).reshape(-1)
# print('fitting', self.X.shape, (self.return_X, self.return_y))
self.y = y
return self
def transform(self, X, y=None):
self.X = X if not self.reshape else np.array(X).reshape(-1)
# print('transforming', ((X, len(X)) if isinstance(X, list) else self.X.shape), (self.return_X, self.return_y))
if len(self.X) == self.w:
return np.array([self.X])
X_train, _, y_train, _ = TimeseriesGenerator(
self.X, self.y, self.w, self.h)
# print('into', y_train.shape, (self.return_X, self.return_y))
if self.return_X and self.return_y:
return X_train, y_train
elif self.return_X:
return X_train
elif self.return_y:
return y_train
else:
raise ValueError
def inverse_transform(self, X):
return X.flatten()
def cross_val_score(X, est, config, scoring, cv):
"""
Splits `X` using `cv` and predicts using `est` with `config` params.
The output will be scored based on `scoring`.
"""
param = config.copy()
h = param.pop('h')
w = param.pop('w')
folds = cv.split(X, h)
scores = {metric: [] for metric in scoring}
for train, val in folds:
X_train, X_test, y_train, y_test = TimeseriesGenerator(
train, val, w, h)
est.set_params(**param)
est.fit(X_train, y_train)
y_hat = est.predict(X_test)
for metric in scores:
scores[metric].append(scoring[metric](y_test, y_hat))
return scores
def cross_val_predict(X, est, config, cv):
param = config.copy()
h = param.pop('h')
w = param.pop('w', None)
folds = cv.split(X, h)
fit_params = {}
res = {}
for k, (train, val) in enumerate(folds):
if w:
X_train, X_test, y_train, y_test = TimeseriesGenerator(
train, val, w, h)
est.set_params(**param)
est.fit(X_train, y_train)
y_hat = est.predict(X_test)[0]
else:
try:
model = est(X, **param)
fit = model.fit(**fit_params)
y_hat = fit.forecast(h)
except:
y_hat = np.full(len(val), np.nan)
res.update({(k, i): y for i, y in enumerate(y_hat)})
return res
class OverlappingTimeSeriesSplit:
"""
Creates overlapping timeseries splits of the dataset for use in
cross validation.
Can be used to return only the index for operations that require
only the indices.
Usage:
>>> tscv = OverlappingTimeSeriesSplit(val_size=39, h=35, return_type='index')
>>> tscv.split(train[col])
This returns six splits of the data given 40 available observations
for cross validation and horizon of 35 given by:
>>> splits = val_size - h + 1
"""
def __init__(self, val_size, h, return_type='value'):
self.val_size = val_size
self.return_type = return_type
self.h = h
def split(self, design_set, h=None):
h = h if h else self.h
val_end = len(design_set)
divider = val_end - h
dataset = []
while len(design_set) - divider <= self.val_size:
dataset.append(
(np.arange(0, divider), np.arange(divider, val_end))
if self.return_type == 'index' else
(design_set[np.arange(0, divider)],
design_set[np.arange(divider, val_end)])
)
val_end -= 1
divider -= 1
return dataset[::-1]
class GridSearch:
def __init__(self, estimator, param_grid, cv, scoring=[]):
self.est = estimator
self.param_grid = param_grid
self.param_list = [
dict(zip(param_grid.keys(), params))
for params in product(*param_grid.values())]
self.cv = cv
self.scoring = scoring
def fit(self, X, scores=False):
self.cv_results_ = []
self.df_records_ = []
for param in tqdm.tqdm(self.param_list):
if scores:
res = {
'params': param.copy(),
**cross_val_score(
X, self.est, param, self.scoring, self.cv)}
rec = {
'Lookback': res['params']['w'],
'Horizon': res['params']['h'],
'Average RMSE': np.mean(res['rmse']),
'Stdev RMSE': np.std(res['rmse'])}
rec['Sum'] = (rec['Average RMSE'] + rec['Stdev RMSE'])
# self.best_params = (
# self.df.nsmallest(1, 'Sum').iloc[0].to_dict())
else:
res = {
'params': param.copy(),
**cross_val_predict(
X, self.est, param, self.cv)}
rec = res
self.cv_results_.append(res)
self.df_records_.append(rec)
self.df = pd.DataFrame(self.df_records_)
def evaluate_methods(
methods: dict,
X: Series,
XOG: Series,
tscv,
col,
w: int,
h: int,
scoring=rmse) -> dict:
"""
Evaluates the different forecasting methods defined in a dict:
>>> methods = {
... str(method_name): {
... `meta`: 'base' | 'stat' | 'ml_recursive' | 'ml_direct',
... `model`: model
... }
... }
Parameters
----------
methods: dict
A dict containing information about the methods and an instance
of each.
X: Series
Training set which may or may not be pre-processed
XOG: Series
Test set where the ground truth will come.
tscv
A splitter that returns `train_index` and `test_index`
col
Will be used as key for scores.
w: int
Lookback window
h: int
Forecast horizon
Returns
-------
methods: dict
Updated dict containing RMSE scores.
"""
for method, model in methods.items():
for train_index, test_index in tscv.split(X):
X_train = X.iloc[train_index]
X_test = XOG.iloc[train_index].iloc[-w:]
y_test = XOG.iloc[test_index]
if model['meta'] in ['stat', 'base']:
y_pred = model['model'].fit(X_train).forecast(h)
if model['meta'] in ['ml_recursive']:
model['model'].fit(None, X_train)
clear_output()
y_pred = model['model'].predict(X_test).squeeze()
if model['meta'] in ['ml_direct']:
X_train, _, y_train, _ = TimeseriesGenerator(
X=X_train,
y=None,
w=w,
h=h)
model['model'].fit(X_train, y_train)
y_pred = model['model'].predict([X_test]).squeeze()
if model['meta'] in ['combo']:
model['model'][0].fit(None, X_train)
clear_output()
y_pred1 = model['model'][0].predict(X_test).squeeze()
X_train, _, y_train, _ = TimeseriesGenerator(
X=X_train,
y=None,
w=w,
h=h)
model['model'][1].fit(X_train, y_train)
y_pred2 = model['model'][1].predict([X_test]).squeeze()
y_pred = (y_pred2 + y_pred1) / 2
methods[method].setdefault(col, []).append(
scoring(np.array(y_test), y_pred))
return methods
###############
## Ensembles ##
###############
def forecastUsingConfig(est, regions, design_set, test_set):
forecast = {}
for region in regions:
train = design_set[region['Region']]
test = test_set[region['Region']]
w = int(region['Lookback'])
h = int(region['Horizon'])
X_train, X_test, y_train, y_test = TimeseriesGenerator(
train, test, w, h)
# est.set_params(**param)
fit = est.fit(X_train, y_train)
forecast[region['Region']] = fit.predict(X_test)[0]
forecast_set = pd.DataFrame(forecast)
forecast_set.index = test_set.index
return forecast_set
class ensemble1:
def __init__(self, w, s):
self.w = w
self.s = s = [7, 30, 365]
def fit(self, ts, lower=np.NINF, upper=np.inf, ):
series = timeSeriesFiltering(ts, lower, upper)
self.res = mslt(series, s=self.s)
# Seasonal
# Trend
self.trend_fit = ETSModel(self.res.Trend, trend='add').fit()
# Residuals
X_train, _, y_train, _ = TimeseriesGenerator(
self.res.Remainder, y=None, w=self.w, h=1)
resid_model = lgb.LGBMRegressor(random_state=1)
self.resid_fit = resid_model.fit(X_train, y_train)
return self
def forecast(self, h):
forecasts = {'Data': np.nan}
forecasts['Trend'] = self.trend_fit.forecast(h)
for seasonality in self.s:
forecasts[f'Seasonal{seasonality}'] = snaivef(
self.res[f'Seasonal{seasonality}'], h, seasonality)
resid = self.res.Remainder.tolist()
for _ in range(h):
f = self.resid_fit.predict([resid[-self.w:]])
forecasts.setdefault('Remainder', []).extend(f)
resid.extend(f)
return pd.DataFrame(forecasts).assign(
Data=lambda x: np.nansum(x, axis=1))
class ensemble2:
def __init__(self, col_assignment: dict, methods: dict, h: int, w: int) -> None:
self.methods = methods
self.col_assignment = col_assignment
self.h = h
self.w = w
def fit(self, df_train: DataFrame):
"""Lazy fit."""
self.df_train = df_train
# for label, content in df_train.items():
# X_train = content
# model_name = self.col_assignment[label]
# model = self.methods[model_name].copy()
# if model['meta'] in ['stat', 'base']:
# self.fits[label] = model['model'].fit(X_train)
# if model['meta'] in ['ml_recursive']:
# self.fits[label] = model['model'].fit(None, X_train)
# if model['meta'] in ['ml_direct']:
# X_train, _, y_train, _ = TimeseriesGenerator(
# X=X_train,
# y=None,
# w=self.w,
# h=self.h)
# self.fits[label] = model['model'].fit(X_train, y_train)
# clear_output()
def predict(self, df_test: DataFrame):
df_train = self.df_train
res = {}
for label in df_test:
X_train = df_train[label]
X_test = df_test[label].iloc[-self.w:]
model_name = self.col_assignment[label]
model = self.methods[model_name]
if model['meta'] in ['stat', 'base']:
y_pred = model['model'].fit(X_train).forecast(self.h)
if model['meta'] in ['ml_recursive']:
model['model'].fit(None, X_train)
clear_output()
y_pred = model['model'].predict(X_test).squeeze()
if model['meta'] in ['ml_direct']:
X_train, _, y_train, _ = TimeseriesGenerator(
X=X_train,
y=None,
w=self.w,
h=self.h)
model['model'].fit(X_train, y_train)
y_pred = model['model'].predict([X_test]).squeeze()
if model['meta'] in ['combo']:
model['model'][0].fit(None, X_train)
clear_output()
y_pred1 = model['model'][0].predict(X_test).squeeze()
X_train, _, y_train, _ = TimeseriesGenerator(
X=X_train,
y=None,
w=self.w,
h=self.h)
model['model'][1].fit(X_train, y_train)
y_pred2 = model['model'][1].predict([X_test]).squeeze()
y_pred = (y_pred2 + y_pred1) / 2
res[label] = np.array(y_pred)
return | pd.DataFrame(res) | pandas.DataFrame |
import logging
import yaml
import os
import docker
import re
import sys
from tempfile import NamedTemporaryFile
import numpy as np
import pandas as pd
from pandas.errors import EmptyDataError
from docker.errors import NotFound, APIError
from io import StringIO
# from pynomer.client import NomerClient
# from ..core import URIMapper, URIManager, TaxId
from ..util.taxo_helper import *
pd.options.mode.chained_assignment = None
"""
https://github.com/globalbioticinteractions/globalbioticinteractions/wiki/Taxonomy-Matching
"""
class NoValidColumnException(Exception):
pass
class ConfigurationError(Exception):
pass
def create_mapping(df):
"""
Return a dict that keeps track of duplicated items in a DataFrame
"""
return (
df.reset_index()
.groupby(df.columns.tolist(), dropna=False)["index"]
.agg(["first", tuple])
.set_index("first")["tuple"]
.to_dict()
)
class TaxonomicEntityValidator:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
self.taxo_to_matcher = {
"GBIF": "gbif",
"NCBI": "ncbi",
"IF": "indexfungorum",
"SILVA": "ncbi",
}
self.default_name_matcher = "globalnames"
self.nomer = NomerHelper()
def validate(self, df):
"""For a subset of columns (e.g. consumers and resources),
validate taxonomic ids and/or names against a source taxonomy.
Returns the input DataFrame with new columns containing the valid
ids and names for each query column.
"""
for column_config in self.config.columns:
# Set default values
assert column_config.uri_column != None
column_config.id_column = (
column_config.id_column if "id_column" in column_config else None
)
column_config.name_column = (
column_config.name_column if "name_column" in column_config else None
)
column_config.source_taxonomy = (
column_config.source_taxonomy
if "source_taxonomy" in column_config
else None
)
if not (column_config.id_column or column_config.name_column):
raise NoValidColumnException(
"You should specify at least one valid column containing the taxon names or ids."
)
# Map taxa to target taxonomy
self.logger.info(
f"Validate {df.shape[0]} taxa from columns ({column_config.id_column},{column_config.name_column})"
)
valid_df = self.validate_columns(
df,
id_column=column_config.id_column,
name_column=column_config.name_column,
source_taxonomy=column_config.source_taxonomy,
)
df[column_config.uri_column] = valid_df["iri"]
df[column_config.valid_name_column] = valid_df["valid_name"]
df[column_config.valid_id_column] = valid_df["valid_id"]
return df
def validate_columns(
self, df, id_column=None, name_column=None, source_taxonomy=None
):
"""
Taxonomic entity validation consists in checking that the pair (taxid, name)
is valid in a given taxonomy (both taxid and name are optional, but at least
one of them must exist). This function adds a column "valid_id" and a column
"valid_name" to the input DataFrame. If both values are NaN, the corresponding
entity is considered invalid.
"""
def add_prefix(col, src_taxo):
"""
Add the source taxonomy name as a prefix to all taxids in a column
"""
def return_prefixed(id, src_taxo):
if (
pd.notnull(id) and len(str(id).split(":")) == 2
): # .startswith(src_taxo + ":"):
return (
id
if not pd.isna(
pd.to_numeric(str(id).split(":")[-1], errors="coerce")
)
else np.nan
)
elif pd.notnull(id) and pd.isna(pd.to_numeric(id, errors="coerce")):
return np.nan
elif pd.notnull(id):
return f"{src_taxo}:{id}"
else:
return None
return col.map(lambda id: return_prefixed(id, src_taxo))
assert id_column or name_column
subset = [col for col in [id_column, name_column] if col]
sub_df = df[subset].astype(pd.StringDtype(), errors="ignore")
mapping = create_mapping(
sub_df
) # Mapping from items in drop_df to all duplicates in sub_df
drop_df = sub_df.drop_duplicates(subset=subset).replace("", np.nan)
id_df = None
name_df = None
if id_column:
assert source_taxonomy
if source_taxonomy in self.taxo_to_matcher:
drop_df[id_column] = add_prefix(drop_df[id_column], source_taxonomy)
id_df = drop_df.dropna(subset=[id_column])
if name_column:
drop_df["canonical_name"] = drop_df[name_column]
names = drop_df["canonical_name"].dropna().to_list()
norm_names = self.normalize_names(names)
drop_df.replace({"canonical_name": norm_names}, inplace=True)
if id_df is not None:
name_df = drop_df.loc[~drop_df.index.isin(id_df.index)]
else:
name_df = drop_df.dropna(subset=["canonical_name"])
sub_df["valid_id"] = None
sub_df["valid_name"] = None
sub_df["iri"] = None
if id_df is not None and not id_df.empty:
valid_ids = self.validate_taxids(
id_df, id_column, name_column, source_taxonomy
)
valid_ids = valid_ids.groupby(
["queryId"], dropna=False
) # Get all matches for each id
for index, row in drop_df.iterrows():
id = row[id_column]
if pd.notnull(id) and id in valid_ids.groups:
valid = valid_ids.get_group(id).iloc[0]
for i in mapping[index]:
sub_df.at[i, "valid_id"] = valid["matchId"]
sub_df.at[i, "valid_name"] = valid["matchName"]
sub_df.at[i, "iri"] = valid["iri"]
if name_df is not None and not name_df.empty:
valid_names = self.validate_names(name_df, "canonical_name")
valid_names = valid_names.groupby(
["queryName"], dropna=False
) # Get all matches for each name
for index, row in drop_df.iterrows():
name = row["canonical_name"] # name_column]
if pd.notnull(name) and name in valid_names.groups:
valid = valid_names.get_group(name).iloc[0]
for i in mapping[index]:
sub_df.at[i, "valid_id"] = valid["matchId"]
sub_df.at[i, "valid_name"] = valid["matchName"]
sub_df.at[i, "iri"] = valid["iri"]
if source_taxonomy == "SILVA":
self.logger.debug("SILVA : all names and ids are valid by default")
for index, row in drop_df.iterrows():
for i in mapping[index]:
if id_column:
sub_df.at[i, "valid_id"] = (
row[id_column]
if row[id_column].startswith("SILVA:")
else sub_df.at[i, "valid_id"]
)
taxid = row[id_column].split(":")[-1]
sub_df.at[i, "iri"] = (
f"https://www.arb-silva.de/{taxid}"
if row[id_column].startswith("SILVA:")
else sub_df.at[i, "iri"]
)
if name_column:
sub_df.at[i, "valid_name"] = (
row[name_column]
if sub_df.at[i, "valid_id"].startswith("SILVA:")
else sub_df.at[i, "valid_name"]
)
self.logger.debug(sub_df[["valid_id", "valid_name", "iri"]])
# Get some statistics
df_drop = sub_df.drop_duplicates(subset=subset)
nb_unique = df_drop.shape[0]
nb_valid = df_drop.dropna(subset=["valid_id"]).shape[0]
self.logger.info(f"Found {nb_valid}/{nb_unique} valid taxonomic entities")
return sub_df
def normalize_names(self, names):
"""
Given a list of taxonomic names, return the corresponding canonical forms
"""
f_temp = NamedTemporaryFile(delete=True) # False)
self.logger.debug(f"Write names to {f_temp.name} for validation using gnparser")
names_str = "\n".join([name.replace("\n", " ") for name in names])
f_temp.write(names_str.encode())
f_temp.read() # I don't know why but it is needed or sometimes the file appears empty when reading
canonical_names = get_canonical_names(f_temp.name)
f_temp.close()
canonical_names = canonical_names["CanonicalSimple"].to_list()
assert len(names) == len(canonical_names)
return {names[i]: canonical_names[i] for i in range(len(names))}
def validate_names(self, df, name_column):
"""
Validate all taxonomic names in a DataFrame column
"""
names = df[name_column].to_list()
self.logger.debug(f"Validate names {names} using {self.default_name_matcher}")
query = self.nomer.df_to_query(
df=df,
name_column=name_column,
)
matching = self.nomer.ask_nomer(query, matcher=self.default_name_matcher)
if not matching.empty:
mask = matching["matchType"].isin(
["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"] # , "SIMILAR_TO"]
)
return matching[mask]
return matching
def validate_taxids(self, df, id_column, name_column=None, source_taxonomy=None):
"""
Validate all taxonomic identifiers in a DataFrame column against a given taxonomy
"""
matcher = self.taxo_to_matcher[source_taxonomy]
taxids = df[id_column].to_list()
self.logger.debug(f"Validate taxids {taxids} using {matcher}")
query = self.nomer.df_to_query(
df=df,
id_column=id_column,
name_column=name_column,
)
matching = self.nomer.ask_nomer(query, matcher=matcher)
if not matching.empty:
mask = matching["matchType"].isin(
["SAME_AS", "SYNONYM_OF", "HAS_ACCEPTED_NAME"] # , "SIMILAR_TO"]
)
return matching[mask]
return matching
def test_validator():
df = pd.read_csv(
"/home/leguilln/workspace/KNOWLEDGE_INTEGRATION/inteGraph/taxo_valid_test.csv",
sep=";",
keep_default_na=False,
)
validator = TaxonomicEntityValidator()
df = validator.validate(
df, id_column="consumer_key", name_column="consumer_scientificName"
)
df.to_csv(
"/home/leguilln/workspace/KNOWLEDGE_INTEGRATION/inteGraph/taxo_valid_result.csv",
sep=";",
)
class TaxonomicEntityMapper:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
self.default_matcher = "wikidata-web"
self.ncbi_matcher = "ncbi"
self.target_taxonomy = "NCBI"
self.keep_taxo = ["NCBI", "GBIF", "IF"]
self.nomer = NomerHelper()
def df_to_triples(self, df):
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import RDFS, OWL
g = Graph()
for index, row in df.iterrows():
if row["queryIRI"] != row["iri"]:
g.add((URIRef(row["queryIRI"]), OWL.sameAs, URIRef(row["iri"])))
g.add((URIRef(row["queryIRI"]), RDFS.label, Literal(row["queryName"])))
if row["matchId"].split(":")[0].startswith("NCBI"):
taxid = row["matchId"].split(":")[-1]
g.add(
(
URIRef(row["queryIRI"]),
OWL.sameAs,
URIRef(f"http://purl.obolibrary.org/obo/NCBITaxon_{taxid}"),
)
)
g.add((URIRef(row["iri"]), RDFS.label, Literal(row["matchName"])))
if | pd.notna(row["matchRank"]) | pandas.notna |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 8 12:30:11 2016
@author: tkc
"""
import re, sys
import pandas as pd
import numpy as np
from collections import defaultdict
import tkinter as tk
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\Utilities' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\Utilities')
#%%
# Testing row=EDXsumm.loc[index]
def compsummary(EDXcomp, Elements, Elemexcl):
''' Compositional summary that keeps at % and identifying fields only '''
mycols=['Filename', 'Sample', 'Comments', 'SEMbasis', 'Phase']
# Handle quant including the excluded elements
missing=[i for i in Elemexcl if i not in Elements]
if len(missing)>0:
print(','.join(missing),' excluded elements missing from Element list')
missing=[i for i in Elements if i not in EDXcomp.columns]
if len(missing)>0:
print(','.join(missing),' elements not present in EDXcomp... removed')
for i, elem in enumerate(missing):
Elements.remove(elem)
real=[i for i in Elements if i not in Elemexcl]
# order with real elems first and excluded second
for i, elem in enumerate(real):
mycols.append('%'+elem)
mycols.append('Total')
if 'Total' not in EDXcomp.columns:
EDXcomp['Total']=np.nan
for i, elem in enumerate(Elemexcl):
mycols.append('%'+elem)
for index, row in EDXcomp.iterrows():
elemsumm=0.0
# Compute at.% including all elems
for i, elem in enumerate(Elements):
elemsumm+=row['%'+elem]
for i, elem in enumerate(Elements):
EDXcomp=EDXcomp.set_value(index,'%'+elem,
100*EDXcomp.loc[index]['%'+elem]/elemsumm)
# Redo the sum only for included (real) elems
elemsumm=0.0
for i, elem in enumerate(real):
elemsumm+=row['%'+elem]
for i, elem in enumerate(real):
EDXcomp=EDXcomp.set_value(index,'%'+elem,
100*EDXcomp.loc[index]['%'+elem]/elemsumm)
EDXcomp=EDXcomp.set_value(index,'Total',
elemsumm)
EDXsumm=EDXcomp[mycols]
return EDXsumm
def calcadjcnts(Integquantlog, elem1, elem2, adjcntratio, adjerr, kfacts):
'''Performs count, corrected count and error adjustment using pair of elements; elem1 is quant element in question
elem2 is other interfering peak, adjcntratio and adjerr from SEM_interferences
kfacts is list of params for this elem from SEMquantparams '''
filelist=np.ndarray.tolist(Integquantlog.Filename.unique())
kfactor, errkfact, mass=kfacts # unpack kfactor main params
for i, fname in enumerate(filelist):
match1=Integquantlog[(Integquantlog['Filename']==fname) & (Integquantlog['Element']==elem1)]
match2=Integquantlog[(Integquantlog['Filename']==fname) & (Integquantlog['Element']==elem2)]
if len(match2)!=1 or len(match1)!=1:
print('Problem finding ', elem1,' and/or', elem2, 'for ', fname)
continue
elem2cnts=match2.iloc[0]['Subtractedcounts']
elem1cnts=match1.iloc[0]['Subtractedcounts']
err1=match1.iloc[0]['% err'] # fractional/relative error for element in question
err2=match2.iloc[0]['% err'] # fractional/relative error for interfering element
term2fracterr=np.sqrt(err2**2+adjerr**2) # fractional error in term 2
term2err=term2fracterr*elem2cnts*adjcntratio # absolute error in term2 correction
adjcnts=elem1cnts-elem2cnts*adjcntratio # adjusted counts ratio for element 1
newerr=np.sqrt(err1**2+term2err**2) # absolute error in adjusted counts
newadjerr=newerr/adjcnts # New fractional error in elem 1
match1=match1.set_value(match1.index[0], 'Adjcounts', max(adjcnts,0))
if adjcnts>0:
match1=match1.set_value(match1.index[0], '% err', newadjerr) # reset error to include that due to interference
# now recalculate corrected counts and associated error
backcnts=match1.iloc[0]['Backcounts']
newcorrcnts=adjcnts*kfactor/mass # standard value
if adjcnts<2*np.sqrt(backcnts): # 2sigma of background as lower limit
newcorrcnts=2*np.sqrt(backcnts)*kfactor/mass # set to lower limit
print ('2sigma of background limiting value used for ', elem1, fname)
match1=match1.set_value(match1.index[0],'Significance',0) # set to zero as marker of limiting value
match1=match1.set_value(match1.index[0],'Correctedcounts',newcorrcnts)
# find combined 2 sigma error (counts and k-factor) as percent error
comberr=np.sqrt(errkfact**2+newadjerr**2) # combine the fractional errors
match1=match1.set_value(match1.index[0],'Errcorrcnts',newcorrcnts*comberr)
Integquantlog.loc[match1.index,match1.columns]=match1 # copies altered row back to main log
return Integquantlog
def recalcadjbatch(df, Interferences, SEMquantparams):
'''Calculate adjusted counts (from unresolvable interferences), then recalculate corrected counts with updated k-factor/ k-fact error
and mass result stored in corrcnts column and used for subsequent compositional determinations
can change AESquantresults and recalc at any time '''
if 'Correctedcounts' not in df:
df['Correctedcounts']=0.0 # new column for adjusted amplitude Correctedcounts (if not already present)
if 'Errcorrcnts' not in df:
df['Errcorrcnts']=0.0 # new column for error
# loop for each element, mask df, get appropriate k-factor & mass
df=df.reset_index(drop=True) # go ahead and reset index
elemlist=np.ndarray.tolist(df.Element.unique()) # list of unique elements from df
for index, row in Interferences.iterrows():
elem1=Interferences.loc[index]['Elem1']
elem2=Interferences.loc[index]['Elem2']
if elem1 in elemlist and elem2 in elemlist: # do the subtraction
adjratio=Interferences.loc[index]['Adjcntratio']
adjerr=Interferences.loc[index]['Percenterr'] # error as percentage for this adjustment
matchelem=SEMquantparams[SEMquantparams['element']==elem1]
if not matchelem.empty:
kfacts=[matchelem.iloc[0]['kfactor'],matchelem.iloc[0]['errkfact'],matchelem.iloc[0]['mass']] # kfactor and mass for this element/peak
df= calcadjcnts(df, elem1, elem2, adjratio, adjerr, kfacts) # makes correction for all in list, incl new corrcnts and error estimates
return df
def assembledataset(paramloglist, integloglist):
'''Construct master paramlog, integlog, backfitlog and peakslog for a list of directories '''
mycols=['Project','Basename','Filenumber','Filename','FilePath','Sample','Point','Comments','Date','Time','Beamkv','Livetime','Realtime','Detected','Converted','Stored','Timeconst','Deadfraction']
Masterparamlog=pd.DataFrame(columns=mycols)
mycols2=['Basename','Filenumber', 'Point', 'Filename', 'Filepath', 'Sample', 'Comments', 'Element', 'Energy', 'Shift', 'Rawcounts',
'Backcounts', 'Subtractedcounts', 'Adjcounts', '% err', 'Significance' , 'Correctedcounts', 'Errcorrcnts',] # for integration results
Masterinteglog=pd.DataFrame(columns=mycols2) # empty frame
for i, logfile in enumerate(paramloglist):
thisparam=pd.read_csv(logfile)
Masterparamlog= | pd.concat([Masterparamlog,thisparam], ignore_index=True) | pandas.concat |
from util.processor import fill_final_table
import config as config
import pandas as pd
import os
def restore_backup(db_conn):
table_names = list(config.WEATHER_URL_MAP.keys())
historical_data = {}
for table_name in table_names:
historical_data[table_name] = pd.read_sql_table(f"h_{table_name}", db_conn)
historical_data[table_name]['WeatherDate'] = | pd.to_datetime(historical_data[table_name]['WeatherDate']) | pandas.to_datetime |
from peakaboo.peak_classify import data_grouping
from peakaboo.peak_classify import cluster_classifier
import numpy as np
import pandas as pd
def test_data_grouping():
index_df = np.zeros((2, 2))
height_df = pd.DataFrame([1, 2, 3])
fwhm_df = pd.DataFrame([4, 5, 6])
threshold = 1
try:
data_grouping(index_df, height_df, fwhm_df, threshold)
except AttributeError:
pass
else:
print('Incorrect data type passed', 'Check peak_finding_master output')
index_df = pd.DataFrame()
height_df = pd.DataFrame([1, 2, 3])
fwhm_df = pd.DataFrame([4, 5, 6])
threshold = 1
t = data_grouping(index_df, height_df, fwhm_df, threshold)
assert len(t) == 0, "Index data frame is empty"
index_df = pd.DataFrame([1, 2, 3])
height_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import pandas as pd
from os import path
from glob import glob
# merge result files from process.py into one file
dfs = []
for fn in glob('results/*'):
if path.basename(fn) == '_SUCCESS':
continue
with open(fn) as f:
dfs.append( | pd.read_csv(fn, sep='\t', names=['word', 'freq'], index_col='word') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 30 01:27:20 2018
@author: shugo
"""
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
from scipy import interpolate
#from mpl_toolkits.mplot3d import Axes3D
def _convert(df,dfConverted,m): #m = 1から16まで
nrow = 60973 #格子点数(気圧面データのやつ)地上データは格子点数がことなるので注意
if m>13:
n =6*12+1+(m-13)*5
else:
n = 6*(m-1)+1
#HGT
df_tmp = df.loc[nrow*(n-1):nrow*n-1,[0,1,4,5,3,6]].reset_index(drop = True) #カラム名は抜き出した元のものが引き継がれる
dfConverted["date1"] = df_tmp[0] # inital date of MSM model #なのでここで指定するインデックスは注意0,1,4,5,3,6の順だよ
dfConverted["date2"] = df_tmp[1] # forecasted time in this file
dfConverted["log"] = df_tmp[4] # longtitute
dfConverted["lat"] = df_tmp[5] # altitude
dfConverted["hPa"] = df_tmp[3] # pressure
dfConverted["HGT"] = df_tmp[6] # altitude
#print(m,"HGT")
#UGRD: vel. in x-direction (EW)
df_tmp = df.loc[nrow*n:nrow*(n+1)-1,[6]]
dfConverted["UGRD"] = df_tmp.reset_index(drop = True)[6] #reset_index()でインデックスを振り直している(そのままだとindexはnrow~nrow*2-1となる)そのままだと代入先にそんなindexないよって怒られる
#print(df_tmp)
#VGRD: vel in y-direction (NS)
df_tmp = df.loc[nrow*(n+1):nrow*(n+2)-1,[6]]
dfConverted["VGRD"] = df_tmp.reset_index(drop = True)[6]
#TMP: temperature
df_tmp = df.loc[nrow*(n+2):nrow*(n+3)-1,[6]]
dfConverted["TMP"] = df_tmp.reset_index(drop = True)[6]
#VVEL: vertical vel.
df_tmp = df.loc[nrow*(n+3):nrow*(n+4)-1,[6]]
dfConverted["VVEL"] = df_tmp.reset_index(drop = True)[6]
del df_tmp
return None
def convert_csv(csv_tmp_name, csv_output_filename):
# --------------------------
# convert wgrib2 raw output csv into better-formatted csv.
# input:
# csv_tmp_name = csv file converted from. raw output of wgrib2
# csv_output_filename = csv file converted to.
# --------------------------
# convert tmp_csv to another csv
df = pd.read_csv(csv_tmp_name,delimiter = ',',header=None,skiprows = 0,engine='python', error_bad_lines=False, encoding = "Shift-JIS")
dfConverted_1000 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_975 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_950 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_925 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_900 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_850 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_800 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_700 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_600 = pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL'])
dfConverted_500 = | pd.DataFrame(columns = ['date1','date2','log','lat','hPa','HGT','UGRD','VGRD','TMP','VVEL']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 14:51:24 2020
@author: <NAME>
"""
import os
import itertools
import json
import shutil
from datetime import datetime
import utils.support as sup
from operator import itemgetter
import numpy as np
import pandas as pd
import readers.log_splitter as ls
from extraction import log_replayer as rpl
from extraction import role_discovery as rl
from core_modules.times_allocator import embedding_trainer as em
from core_modules.times_allocator import times_model_optimizer as to
from core_modules.times_allocator import model_hpc_optimizer as hpc_op
from core_modules.times_allocator import intercase_features_calculator as it
from core_modules.times_allocator import times_predictor as tp
from sklearn.preprocessing import MaxAbsScaler
from pickle import dump
class TimesGenerator():
"""
This class evaluates the inter-arrival times
"""
def __init__(self, process_graph, log, parms):
"""constructor"""
self.log = log
self.process_graph = process_graph
self.parms = parms
self.one_timestamp = parms['read_options']['one_timestamp']
self.timeformat = parms['read_options']['timeformat']
self.model_metadata = dict()
self._load_model()
# =============================================================================
# Generate traces
# =============================================================================
def _load_model(self) -> None:
model_exist = True
model_path = self._define_model_path(self.parms)
# Save path(s) if the model exists
if isinstance(model_path, tuple):
self.proc_model_path = model_path[0]
self.wait_model_path = model_path[1]
model_exist = (
os.path.exists(model_path[0]) and os.path.exists(model_path[1]))
self.parms['proc_model_path'] = model_path[0]
self.parms['wait_model_path'] = model_path[1]
else:
self.model_path = model_path
model_exist = os.path.exists(model_path)
self.parms['model_path'] = model_path
# Discover and compare
if not model_exist or self.parms['update_times_gen']:
times_optimizer = self._discover_model()
save, metadata_file = self._compare_models(
times_optimizer.best_loss,
self.parms['update_times_gen'], model_path)
if save:
self._save_model(metadata_file, times_optimizer, model_path)
# Save basic features scaler
name = metadata_file.replace('_meta.json', '')
dump(self.scaler, open(name+'_scaler.pkl','wb'))
# clean output folder
shutil.rmtree(self.parms['output'])
def generate(self, sequences, iarr):
model_path = (self.model_path
if self.parms['model_type'] in ['basic', 'inter', 'inter_nt']
else (self.proc_model_path, self.wait_model_path))
predictor = tp.TimesPredictor(model_path,
self.parms,
sequences,
iarr)
return predictor.predict(self.parms['model_type'])
@staticmethod
def _define_model_path(parms):
path = parms['times_gen_path']
fname = parms['file'].split('.')[0]
inter = parms['model_type'] in ['inter', 'dual_inter', 'inter_nt']
is_dual = parms['model_type'] == 'dual_inter'
arpool = parms['all_r_pool']
next_ac = parms['model_type'] == 'inter_nt'
if inter:
if is_dual:
if arpool:
return (os.path.join(path, fname+'_dpiapr.h5'),
os.path.join(path, fname+'_dwiapr.h5'))
else:
return (os.path.join(path, fname+'_dpispr.h5'),
os.path.join(path, fname+'_dwispr.h5'))
else:
if next_ac:
if arpool:
return os.path.join(path, fname+'_inapr.h5')
else:
return os.path.join(path, fname+'_inspr.h5')
else:
if arpool:
return os.path.join(path, fname+'_iapr.h5')
else:
return os.path.join(path, fname+'_ispr.h5')
else:
return os.path.join(path, fname+'.h5')
def _compare_models(self, acc, model_exist, file):
if isinstance(file, tuple):
model = os.path.splitext(os.path.split(file[0])[1])[0]
model = (model.replace('dpiapr', 'diapr')
if self.parms['all_r_pool'] else
model.replace('dpispr', 'dispr'))
metadata_file = os.path.join(self.parms['times_gen_path'],
model+'_meta.json')
else:
model = os.path.splitext(os.path.split(file)[1])[0]
metadata_file = os.path.join(self.parms['times_gen_path'],
model+'_meta.json')
# compare with existing model
save = True
if model_exist:
# Loading of parameters from existing model
if os.path.exists(metadata_file):
with open(metadata_file) as file:
data = json.load(file)
if data['loss'] < acc:
save = False
return save, metadata_file
def _save_model(self, metadata_file, times_optimizer, model_path):
model_metadata = dict()
# best structure mining parameters
model_metadata['loss'] = times_optimizer.best_loss
model_metadata['generated_at'] = (
datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
model_metadata['ac_index'] = self.ac_index
model_metadata['usr_index'] = self.usr_index
model_metadata['log_size'] = len(pd.DataFrame(self.log).caseid.unique())
model_metadata = {**model_metadata,
**times_optimizer.best_parms}
model_name = metadata_file.replace('_meta.json', '')
if self.parms['model_type'] in ['inter', 'dual_inter', 'inter_nt']:
model_metadata['roles'] = self.roles
model_metadata['roles_table'] = self.roles_table.to_dict('records')
model_metadata['inter_mean_states'] = self.mean_states
# Save intecase scalers
dump(self.inter_scaler, open(model_name+'_inter_scaler.pkl', 'wb'))
if self.parms['model_type'] == 'dual_inter':
dump(self.end_inter_scaler,
open(model_name+'_end_inter_scaler.pkl', 'wb'))
# Save models
if isinstance(model_path, tuple):
shutil.copyfile(os.path.join(times_optimizer.best_output,
os.path.split(model_path[0])[1]),
self.proc_model_path)
shutil.copyfile(os.path.join(times_optimizer.best_output,
os.path.split(model_path[1])[1]),
self.wait_model_path)
else:
# Copy best model to destination folder
source = os.path.join(times_optimizer.best_output,
self.parms['file'].split('.')[0]+'.h5')
shutil.copyfile(source, self.model_path)
# Save metadata
sup.create_json(model_metadata, metadata_file)
# =============================================================================
# Train model
# =============================================================================
def _discover_model(self, **kwargs):
# indexes creation
self.ac_index, self.index_ac = self._indexing(self.log.data, 'task')
self.usr_index, self.index_usr = self._indexing(self.log.data, 'user')
# replay
self._replay_process()
if self.parms['model_type'] in ['inter', 'dual_inter', 'inter_nt']:
self._add_intercases()
self._split_timeline(0.8, self.one_timestamp)
self.log_train = self._add_calculated_times(self.log_train)
self.log_valdn = self._add_calculated_times(self.log_valdn)
# Add index to the event log
ac_idx = lambda x: self.ac_index[x['task']]
self.log_train['ac_index'] = self.log_train.apply(ac_idx, axis=1)
self.log_valdn['ac_index'] = self.log_valdn.apply(ac_idx, axis=1)
if self.parms['model_type'] in ['inter_nt', 'dual_inter']:
ac_idx = lambda x: self.ac_index[x['n_task']]
self.log_train['n_ac_index'] = self.log_train.apply(ac_idx, axis=1)
self.log_valdn['n_ac_index'] = self.log_valdn.apply(ac_idx, axis=1)
# Load embedding matrixes
emb_trainer = em.EmbeddingTrainer(self.parms,
pd.DataFrame(self.log),
self.ac_index,
self.index_ac,
self.usr_index,
self.index_usr)
self.ac_weights = emb_trainer.load_embbedings()
# Scale features
self._transform_features()
# Optimizer
self.parms['output'] = os.path.join('output_files', sup.folder_id())
if self.parms['opt_method'] == 'rand_hpc':
times_optimizer = hpc_op.ModelHPCOptimizer(self.parms,
self.log_train,
self.log_valdn,
self.ac_index,
self.ac_weights)
times_optimizer.execute_trials()
elif self.parms['opt_method'] == 'bayesian':
times_optimizer = to.TimesModelOptimizer(self.parms,
self.log_train,
self.log_valdn,
self.ac_index,
self.ac_weights)
times_optimizer.execute_trials()
return times_optimizer
# =============================================================================
# Support modules
# =============================================================================
def _replay_process(self) -> None:
"""
Process replaying
"""
replayer = rpl.LogReplayer(self.process_graph,
self.log.get_traces(),
self.parms,
msg='reading conformant training traces:')
self.log = replayer.process_stats.rename(columns={'resource':'user'})
self.log['user'] = self.log['user'].fillna('sys')
self.log = self.log.to_dict('records')
@staticmethod
def _indexing(log, feat):
log = pd.DataFrame(log)
# Activities index creation
if feat=='task':
log = log[~log[feat].isin(['Start', 'End'])]
else:
log[feat] = log[feat].fillna('sys')
subsec_set = log[feat].unique().tolist()
subsec_set = [x for x in subsec_set if not x in ['Start', 'End']]
index = dict()
for i, _ in enumerate(subsec_set):
index[subsec_set[i]] = i + 1
index['Start'] = 0
index['End'] = len(index)
index_inv = {v: k for k, v in index.items()}
return index, index_inv
def _split_timeline(self, size: float, one_ts: bool) -> None:
"""
Split an event log dataframe by time to peform split-validation.
prefered method time splitting removing incomplete traces.
If the testing set is smaller than the 10% of the log size
the second method is sort by traces start and split taking the whole
traces no matter if they are contained in the timeframe or not
Parameters
----------
size : float, validation percentage.
one_ts : bool, Support only one timestamp.
"""
# Split log data
splitter = ls.LogSplitter(self.log)
train, valdn = splitter.split_log('timeline_contained', size, one_ts)
total_events = len(self.log)
# Check size and change time splitting method if necesary
if len(valdn) < int(total_events*0.1):
train, valdn = splitter.split_log('timeline_trace', size, one_ts)
# Set splits
key = 'end_timestamp' if one_ts else 'start_timestamp'
valdn = pd.DataFrame(valdn)
train = pd.DataFrame(train)
valdn = valdn[~valdn.task.isin(['Start', 'End'])]
train = train[~train.task.isin(['Start', 'End'])]
self.log_valdn = (valdn.sort_values(key, ascending=True)
.reset_index(drop=True))
self.log_train = (train.sort_values(key, ascending=True)
.reset_index(drop=True))
def _add_intercases(self):
"""Appends the indexes and relative time to the dataframe.
parms:
log: dataframe.
Returns:
Dataframe: The dataframe with the calculated features added.
"""
log = | pd.DataFrame(self.log) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.losses import mse
from keras.preprocessing.text import Tokenizer
import sys
import importlib
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path+"config")
attention_setting = importlib.import_module(config_path+"attention_setting")
pam_mapping = {}
def get_map(row):
pam_mapping[row[0]] = row[1]
class __NtsTokenizer(Tokenizer):
def __init__(self, nt):
Tokenizer.__init__(self)
if nt == 4:
self.dic = [a + b + c + d for a in 'ATCG' for b in 'ATCG' for c in 'ATCG' for d in 'ATCG']
elif nt == 3:
self.dic = [a + b + c for a in 'ATCG' for b in 'ATCG' for c in 'ATCG']
elif nt == 2:
self.dic = [a + b for a in 'ATCG' for b in 'ATCG']
elif nt == 1:
self.dic = [a for a in 'ATCG']
else:
self.dic = []
self.fit_on_texts(self.dic)
def split_seqs(seq, nt = config.word_len):
t = __NtsTokenizer(nt = nt)
result = ''
lens = len(seq)
for i in range(lens - nt + 1):
result += ' ' + seq[i:i+nt].upper()
seq_result = t.texts_to_sequences([result])
return pd.Series(seq_result[0]) - 1
def split_mismatchs(seq1, seq2):
t = __NtsTokenizer(nt=2)
result = ''
lens = len(seq1)
for i in range(lens):
result += ' ' + seq1[i] + seq2[i]
seq_result = t.texts_to_sequences([result.upper()])
return pd.Series(seq_result[0]) - 1
def __get_expand_table_3(rev = False):
possibilities = pd.Series([a + b + c + d for a in 'ATCG' for b in 'ATCG' for c in 'ATCG' for d in 'ATCG']).to_frame(
name='ori_seq')
possibilities['key'] = 0
change = pd.Series([a + b for a in 'ATCG' for b in 'ATCG']).to_frame(name='change')
change['key'] = 0
merged = | pd.merge(possibilities, change, on='key') | pandas.merge |
import pandas as pd
import numpy as np
import os
import sqlalchemy
import data_functions as datfunc
import utility_functions as utilfunc
import agent_mutation
from agents import Agents, Solar_Agents
from pandas import DataFrame
import json
# Load logger
logger = utilfunc.get_logger()
#%%
def check_table_exists(schema, table, con):
"""
Checks if table exists in schema
Parameters
----------
**schema** : 'SQL schema'
SQL schema in which to check if given table exists
**table** : 'SQL table'
SQL table to be searched
**con** : 'SQL connection'
SQL connection to connect to database
Returns
-------
**True or False** : 'bool'
Returns True if table exists in schema.
"""
sql = """SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = '{}' AND table_name = '{}');""".format(schema, table)
return pd.read_sql(sql, con).values[0][0]
def get_psql_table_fields(engine, schema, name):
"""
Creates numpy array of columns from specified schema and table
Parameters
----------
**engine** : 'SQL engine'
SQL engine to intepret SQL query
**schema** : 'SQL schema'
SQL schema to pull table from
**name** : 'string'
Name of the table from which fields are retrieved
Returns
-------
numpy array : 'np.array'
Numpy array of columns
"""
sql = "SELECT column_name FROM information_schema.columns WHERE table_schema = '{}' AND table_name = '{}'".format(schema, name)
return np.concatenate(pd.read_sql_query(sql, engine).values)
def df_to_psql(df, engine, schema, owner, name, if_exists='replace', append_transformations=False):
"""
Uploads dataframe to database
Parameters
----------
**df** : 'pd.df'
Dataframe to upload to database
**engine** : 'SQL table'
SQL engine to intepret SQL query
**schema** : 'SQL schema'
Schema in which to upload df
**owner** : 'string'
Owner of schema
**name** : 'string'
Name to be given to table that is uploaded
**if_exists** : 'replace or append'
If table exists and if if_exists set to replace, replaces table in database. If table exists and if if_exists set to append, appendss table in database.
**append_transformations** : 'bool'
IDK
Returns
-------
**df** : 'pd.df'
Dataframe that was uploaded to database
"""
d_types = {}
transform = {}
f_d_type = {}
sql_type = {}
delete_list = []
orig_fields = df.columns.values
df.columns = [i.lower() for i in orig_fields]
for f in df.columns:
df_filter = pd.notnull(df[f]).values
if sum(df_filter) > 0:
f_d_type[f] = type(df[f][df_filter].values[0]).__name__.lower()
if f_d_type[f][0:3].lower() == 'int':
sql_type[f] = 'INTEGER'
if f_d_type[f][0:5].lower() == 'float':
d_types[f] = sqlalchemy.types.NUMERIC
sql_type[f] = 'NUMERIC'
if f_d_type[f][0:3].lower() == 'str':
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'list':
d_types[f] = sqlalchemy.types.ARRAY(sqlalchemy.types.STRINGTYPE)
transform[f] = lambda x: json.dumps(x)
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'ndarray':
d_types[f] = sqlalchemy.types.ARRAY(sqlalchemy.types.STRINGTYPE)
transform[f] = lambda x: json.dumps(list(x))
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'dict':
d_types[f] = sqlalchemy.types.STRINGTYPE
transform[f] = lambda x: json.dumps(
dict([(k_v[0], list(k_v[1])) if (type(k_v[1]).__name__ == 'ndarray') else (k_v[0], k_v[1]) for k_v in list(x.items())]))
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'interval':
d_types[f] = sqlalchemy.types.STRINGTYPE
transform[f] = lambda x: str(x)
sql_type[f] = 'VARCHAR'
if f_d_type[f] == 'dataframe':
d_types[f] = sqlalchemy.types.STRINGTYPE
transform[f] = lambda x: x.to_json() if isinstance(x,DataFrame) else str(x)
sql_type[f] = 'VARCHAR'
else:
orig_fields = [i for i in orig_fields if i.lower()!=f]
delete_list.append(f)
df = df.drop(delete_list, axis=1)
for k, v in list(transform.items()):
if append_transformations:
df[k + "_" + f_d_type[k]] = df[k].apply(v)
sql_type[k + "_" + f_d_type[k]] = sql_type[k]
del df[k]
del sql_type[k]
else:
df[k] = df[k].apply(v)
conn = engine.connect()
if if_exists == 'append':
fields = [i.lower() for i in get_psql_table_fields(engine, schema, name)]
for f in list(set(df.columns.values) - set(fields)):
sql = "ALTER TABLE {}.{} ADD COLUMN {} {}".format(schema, name, f, sql_type[f])
conn.execute(sql)
df.to_sql(name, engine, schema=schema, index=False, dtype=d_types, if_exists=if_exists)
sql = 'ALTER TABLE {}."{}" OWNER to "{}"'.format(schema, name, owner)
conn.execute(sql)
conn.close()
engine.dispose()
df.columns = orig_fields
return df
#%%
def get_scenario_settings(schema, con):
"""
Creates dataframe of default scenario settings from input_main_scenario_options table
Parameters
----------
**schema** : 'SQL schema'
Schema in which to look for the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
Returns
-------
**df** : 'pd.df'
Dataframe of default scenario settings
"""
sql = "SELECT * FROM {}.input_main_scenario_options".format(schema)
df = pd.read_sql(sql, con)
return df
def get_userdefined_scenario_settings(schema, table_name, con):
"""
Creates dataframe of user created scenario settings
Parameters
----------
**schema** : 'SQL schema'
Schema in which to look for the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
Returns
-------
**df** : 'pd.df'
Dataframe of user created scenario settings
"""
sql = "SELECT * FROM {}.{}".format(schema, table_name)
df = pd.read_sql(sql, con)
return df
#%%
def import_table(scenario_settings, con, engine, role, input_name, csv_import_function=None):
"""
Imports table from csv given the name of the csv
Parameters
----------
**scenario_settings** : 'SQL schema'
Schema in which to look for the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
**engine** : 'SQL engine'
SQL engine to intepret SQL query
**role** : 'string'
Owner of schema
**input_name** : 'string'
Name of the csv file that should be imported
**csv_import_function** : 'function'
Specific function to import and munge csv
Returns
-------
**df** : 'pd.df'
Dataframe of the table that was imported
"""
schema = scenario_settings.schema
shared_schema = 'diffusion_shared'
input_data_dir = scenario_settings.input_data_dir
user_scenario_settings = get_scenario_settings(schema, con)
scenario_name = user_scenario_settings[input_name].values[0]
if scenario_name == 'User Defined':
userdefined_table_name = "input_" + input_name + "_user_defined"
scenario_userdefined_name = get_userdefined_scenario_settings(schema, userdefined_table_name, con)
scenario_userdefined_value = scenario_userdefined_name['val'].values[0]
if check_table_exists(shared_schema, scenario_userdefined_value, con):
sql = 'SELECT * FROM {}."{}"'.format(shared_schema, scenario_userdefined_value)
df = pd.read_sql(sql, con)
else:
df = pd.read_csv(os.path.join(input_data_dir, input_name, scenario_userdefined_value + '.csv'), index_col=False)
if csv_import_function is not None:
df = csv_import_function(df)
df_to_psql(df, engine, shared_schema, role, scenario_userdefined_value)
else:
if input_name == 'elec_prices':
df = datfunc.get_rate_escalations(con, scenario_settings.schema)
elif input_name == 'load_growth':
df = datfunc.get_load_growth(con, scenario_settings.schema)
elif input_name == 'pv_prices':
df = datfunc.get_technology_costs_solar(con, scenario_settings.schema)
return df
#%%
def stacked_sectors(df):
"""
Takes dataframe and sorts table fields by sector
Parameters
----------
**df** : 'pd.df'
Dataframe to be sorted by sector.
Returns
-------
**output** : 'pd.df'
Dataframe of the table that was imported and split by sector
"""
sectors = ['res', 'ind','com','nonres','all']
output = pd.DataFrame()
core_columns = [x for x in df.columns if x.split("_")[-1] not in sectors]
for sector in sectors:
if sector in set([i.split("_")[-1] for i in df.columns]):
sector_columns = [x for x in df.columns if x.split("_")[-1] == sector]
rename_fields = {k:"_".join(k.split("_")[0:-1]) for k in sector_columns}
temp = df.loc[:,core_columns + sector_columns]
temp = temp.rename(columns=rename_fields)
if sector =='nonres':
sector_list = ['com', 'ind']
elif sector=='all':
sector_list = ['com', 'ind','res']
else:
sector_list = [sector]
for s in sector_list:
temp['sector_abbr'] = s
output = pd.concat([output, temp], ignore_index=True, sort=False)
return output
#%%
def deprec_schedule(df):
"""
Takes depreciation schedule and sorts table fields by depreciation year
Parameters
----------
**df** : 'pd.df'
Dataframe to be sorted by sector.
Returns
-------
**output** : 'pd.df'
Dataframe of depreciation schedule sorted by year
"""
columns = ['1', '2', '3', '4', '5', '6']
df['deprec_sch']=df.apply(lambda x: [x.to_dict()[y] for y in columns], axis=1)
max_required_year = 2050
max_input_year = np.max(df['year'])
missing_years = np.arange(max_input_year + 1, max_required_year + 1, 1)
last_entry = df[df['year'] == max_input_year]
for year in missing_years:
last_entry['year'] = year
df = df.append(last_entry)
return df.loc[:,['year','sector_abbr','deprec_sch']]
#%%
def melt_year(parameter_name):
"""
Returns a function to melt dataframe's columns of years and parameter values to the row axis
Parameters
----------
**parameter name** : 'string'
Name of the parameter value in dataframe.
Returns
-------
**function** : 'function'
Function that melts years and parameter value to row axis
"""
def function(df):
"""
Unpivots years and values from columns of dataframe to rows for each state abbreviation
Parameters
----------
**df** : 'pd.df'
Dataframe to be unpivot.
Returns
-------
**df_tidy** : 'pd.df'
Dataframe with every other year and the parameter value for that year as rows for each state
"""
years = np.arange(2014, 2051, 2)
years = [str(year) for year in years]
df_tidy = pd.melt(df, id_vars='state_abbr', value_vars=years, var_name='year', value_name=parameter_name)
df_tidy['year'] = df_tidy['year'].astype(int)
return df_tidy
return function
#%%
def import_agent_file(scenario_settings, con, cur, engine, model_settings, agent_file_status, input_name):
"""
Generates new agents or uses pre-generated agents from provided .pkl file
Parameters
----------
**scenario_settings** : 'SQL schema'
Schema of the scenario settings
**con** : 'SQL connection'
SQL connection to connect to database
**cur** : 'SQL cursor'
Cursor
**engine** : 'SQL engine'
SQL engine to intepret SQL query
**model_settings** : 'object'
Model settings that apply to all scenarios
**agent_file_status** : 'attribute'
Attribute that describes whether to use pre-generated agent file or create new
**input_name** : 'string'
.Pkl file name substring of pre-generated agent table
Returns
-------
**solar_agents** : 'Class'
Instance of Agents class with either user pre-generated or new data
"""
schema = scenario_settings.schema
input_agent_dir = model_settings.input_agent_dir
state_to_model = scenario_settings.state_to_model
ISO_List = ['ERCOT', 'NEISO', 'NYISO', 'CAISO', 'PJM', 'MISO', 'SPP']
if agent_file_status == 'Use pre-generated Agents':
userdefined_table_name = "input_" + input_name + "_user_defined"
scenario_userdefined_name = get_userdefined_scenario_settings(schema, userdefined_table_name, con)
scenario_userdefined_value = scenario_userdefined_name['val'].values[0]
solar_agents_df = pd.read_pickle(os.path.join(input_agent_dir, scenario_userdefined_value+".pkl"))
if scenario_settings.region in ISO_List:
solar_agents_df = pd.read_pickle(os.path.join(input_agent_dir, scenario_userdefined_value+".pkl"))
else:
solar_agents_df = solar_agents_df[solar_agents_df['state_abbr'].isin(state_to_model)]
if solar_agents_df.empty:
raise ValueError('Region not present within pre-generated agent file - Edit Inputsheet')
solar_agents = Agents(solar_agents_df)
solar_agents.on_frame(agent_mutation.elec.reassign_agent_tariffs, con)
else:
raise ValueError('Generating agents is not supported at this time. Please select "Use pre-generated Agents" in the input sheet')
return solar_agents
#%%
def process_elec_price_trajectories(elec_price_traj):
"""
Returns the trajectory of the change in electricity prices over time with 2018 as the base year
Parameters
----------
**elec_price_traj** : 'pd.df'
Dataframe of electricity prices by year and ReEDS BA
Returns
-------
**elec_price_change_traj** : 'pd.df'
Dataframe of annual electricity price change factors from base year
"""
county_to_ba_lkup = pd.read_csv('county_to_ba_mapping.csv')
# For SS19, when using Retail Electricity Prices from ReEDS
base_year_prices = elec_price_traj[elec_price_traj['year']==2018]
base_year_prices.rename(columns={'elec_price_res':'res_base',
'elec_price_com':'com_base',
'elec_price_ind':'ind_base'}, inplace=True)
elec_price_change_traj = pd.merge(elec_price_traj, base_year_prices[['res_base', 'com_base', 'ind_base', 'ba']], on='ba')
elec_price_change_traj['elec_price_change_res'] = elec_price_change_traj['elec_price_res'] / elec_price_change_traj['res_base']
elec_price_change_traj['elec_price_change_com'] = elec_price_change_traj['elec_price_com'] / elec_price_change_traj['com_base']
elec_price_change_traj['elec_price_change_ind'] = elec_price_change_traj['elec_price_ind'] / elec_price_change_traj['ind_base']
# Melt by sector
res_df = pd.DataFrame(elec_price_change_traj['year'])
res_df = elec_price_change_traj[['year', 'elec_price_change_res', 'ba']]
res_df.rename(columns={'elec_price_change_res':'elec_price_multiplier'}, inplace=True)
res_df['sector_abbr'] = 'res'
com_df = pd.DataFrame(elec_price_change_traj['year'])
com_df = elec_price_change_traj[['year', 'elec_price_change_com', 'ba']]
com_df.rename(columns={'elec_price_change_com':'elec_price_multiplier'}, inplace=True)
com_df['sector_abbr'] = 'com'
ind_df = pd.DataFrame(elec_price_change_traj['year'])
ind_df = elec_price_change_traj[['year', 'elec_price_change_ind', 'ba']]
ind_df.rename(columns={'elec_price_change_ind':'elec_price_multiplier'}, inplace=True)
ind_df['sector_abbr'] = 'ind'
elec_price_change_traj = pd.concat([res_df, com_df, ind_df], ignore_index=True, sort=False)
elec_price_change_traj = pd.merge(county_to_ba_lkup, elec_price_change_traj, how='left', on=['ba'])
elec_price_change_traj.drop(['ba'], axis=1, inplace=True)
return elec_price_change_traj
#%%
def process_wholesale_elec_prices(wholesale_elec_price_traj):
"""
Returns the trajectory of the change in wholesale electricity prices over time
Parameters
----------
**wholesale_elec_price_traj** : 'pd.df'
Dataframe of wholesale electricity prices by year and ReEDS BA
Returns
-------
**wholesale_elec_price_change_traj** : 'pd.df'
Dataframe of annual electricity price change factors from base year
"""
county_to_ba_lkup = pd.read_csv('county_to_ba_mapping.csv')
years = np.arange(2014, 2051, 2)
years = [str(year) for year in years]
wholesale_elec_price_change_traj = pd.melt(wholesale_elec_price_traj, id_vars='ba', value_vars=years, var_name='year', value_name='wholesale_elec_price_dollars_per_kwh')
wholesale_elec_price_change_traj['year'] = wholesale_elec_price_change_traj['year'].astype(int)
wholesale_elec_price_change_traj = | pd.merge(county_to_ba_lkup, wholesale_elec_price_change_traj, how='left', on=['ba']) | pandas.merge |
import os
import pandas as pd
from .billboard import clean_artist_col, clean_billboard
def test_prune_dummy():
dummy1 = pd.Series(data=['Major Lazer & DJ Snake Featuring MO'])
pruned1 = clean_artist_col(dummy1)
assert pruned1[0] == 'major lazer'
dummy2 = pd.Series(data=['<NAME> Featuring A$AP Rocky',
# The following two lines represent a single row
('Macklemore & <NAME> Featuring <NAME>,' +
'<NAME>, <NAME> & <NAME>'),
'<NAME> And <NAME> Featuring Quavo'])
pruned2 = clean_artist_col(dummy2)
assert pruned2[0] == '<NAME>'
assert pruned2[1] == 'macklemore'
assert pruned2[2] == 'young thug'
def test_prune_full(datadir):
hot100_path = os.path.join(datadir, 'hot100.csv')
df = | pd.read_csv(hot100_path) | pandas.read_csv |
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4])),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": [np.nan, 4, 5, 6], "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": [np.nan, np.nan, 1, 0, 1]}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index=pd.Int64Index([1])),
pd.DataFrame({"a": [4.]}, index=pd.Int64Index([1]))]),
([pd.DataFrame(), pd.Series([None, 1., 2., 3.])],
[ | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""PortfolioOpt: Financial Portfolio Optimization
This module provides a set of functions for financial portfolio
optimization, such as construction of Markowitz portfolios, minimum
variance portfolios and tangency portfolios (i.e. maximum Sharpe ratio
portfolios) in Python. The construction of long-only, long/short and
market neutral portfolios is supported."""
import unittest
import sys
import numpy as np
import pandas as pd
import portfolioopt as pfopt
__all__ = ['create_test_data']
def create_test_data(my_seed=42, num_days=100):
"""
Creates some test returns data together with
its covariance matrix and average returns.
Parameters
----------
my_seed: integer, optional
Covariance matrix of asset returns.
num_days: integer, optional
Expected asset returns (often historical returns).
Returns
-------
returns: pandas.DataFrame
Test returns.
cov_mat: pandas.DataFrame
Test covariance matrix.
avg_rets: pandas.Series
Test average returns.
"""
np.random.seed(my_seed)
data = np.random.normal(loc=0.001, scale=0.05, size=(num_days, 5))
dates = | pd.date_range('1/1/2000', periods=num_days, freq='D', tz='UTC') | pandas.date_range |
from __future__ import annotations
from collections import namedtuple
from typing import TYPE_CHECKING
import warnings
from matplotlib.artist import setp
import numpy as np
from pandas.core.dtypes.common import is_dict_like
from pandas.core.dtypes.missing import remove_na_arraylike
import pandas as pd
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.core import (
LinePlot,
MPLPlot,
)
from pandas.plotting._matplotlib.style import get_standard_colors
from pandas.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
maybe_adjust_figure,
)
if TYPE_CHECKING:
from matplotlib.axes import Axes
class BoxPlot(LinePlot):
_kind = "box"
_layout_type = "horizontal"
_valid_return_types = (None, "axes", "dict", "both")
# namedtuple to hold results
BP = namedtuple("BP", ["ax", "lines"])
def __init__(self, data, return_type="axes", **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == "vertical":
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type="axes", **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if "color" in self.kwds:
if self.colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'"
)
self.color = self.kwds.pop("color")
if isinstance(self.color, dict):
valid_keys = ["boxes", "whiskers", "medians", "caps"]
for key in self.color:
if key not in valid_keys:
raise ValueError(
f"color dict contains invalid key '{key}'. "
f"The key must be either {valid_keys}"
)
else:
self.color = None
# get standard colors for default
colors = get_standard_colors(num_colors=3, colormap=self.colormap, color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = "k" # mpl default
def _get_colors(self, num_colors=None, color_kwds="color"):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get("boxes", self._boxes_c)
whiskers = self.color.get("whiskers", self._whiskers_c)
medians = self.color.get("medians", self._medians_c)
caps = self.color.get("caps", self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
# GH 30346, when users specifying those arguments explicitly, our defaults
# for these four kwargs should be overridden; if not, use Pandas settings
if not self.kwds.get("boxprops"):
setp(bp["boxes"], color=boxes, alpha=1)
if not self.kwds.get("whiskerprops"):
setp(bp["whiskers"], color=whiskers, alpha=1)
if not self.kwds.get("medianprops"):
setp(bp["medians"], color=medians, alpha=1)
if not self.kwds.get("capprops"):
setp(bp["caps"], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
self._return_obj = pd.Series(dtype=object)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = self._plot(
ax, y, column_num=i, return_type=self.return_type, **kwds
)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = self._plot(
ax, y, column_num=0, return_type=self.return_type, **kwds
)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [left for left, _ in self._iter_data()]
labels = [pprint_thing(left) for left in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax: Axes, labels):
if self.orientation == "vertical":
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self, ax, data):
pass
@property
def orientation(self):
if self.kwds.get("vert", True):
return "vertical"
else:
return "horizontal"
@property
def result(self):
if self.return_type is None:
return super().result
else:
return self._return_obj
def _grouped_plot_by_column(
plotf,
data,
columns=None,
by=None,
numeric_only=True,
grid=False,
figsize=None,
ax=None,
layout=None,
return_type=None,
**kwargs,
):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = create_subplots(
naxes=naxes, sharex=True, sharey=True, figsize=figsize, ax=ax, layout=layout
)
_axes = flatten_axes(axes)
ax_values = []
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(pprint_thing(by))
ax_values.append(re_plotf)
ax.grid(grid)
result = pd.Series(ax_values, index=columns)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle(f"Boxplot grouped by {byline}")
maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwds,
):
import matplotlib.pyplot as plt
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
if isinstance(data, pd.Series):
data = data.to_frame("x")
column = "x"
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
# if colors not provided, use same defaults as DataFrame.plot.box
result = get_standard_colors(num_colors=3)
result = np.take(result, [0, 0, 2])
result = np.append(result, "k")
colors = kwds.pop("color", None)
if colors:
if is_dict_like(colors):
# replace colors in result array with user-specified colors
# taken from the colors dict parameter
# "boxes" value placed in position 0, "whiskers" in 1, etc.
valid_keys = ["boxes", "whiskers", "medians", "caps"]
key_to_index = dict(zip(valid_keys, range(4)))
for key, value in colors.items():
if key in valid_keys:
result[key_to_index[key]] = value
else:
raise ValueError(
f"color dict contains invalid key '{key}'. "
f"The key must be either {valid_keys}"
)
else:
result.fill(colors)
return result
def maybe_color_bp(bp, **kwds):
# GH 30346, when users specifying those arguments explicitly, our defaults
# for these four kwargs should be overridden; if not, use Pandas settings
if not kwds.get("boxprops"):
setp(bp["boxes"], color=colors[0], alpha=1)
if not kwds.get("whiskerprops"):
setp(bp["whiskers"], color=colors[1], alpha=1)
if not kwds.get("medianprops"):
setp(bp["medians"], color=colors[2], alpha=1)
if not kwds.get("capprops"):
setp(bp["caps"], color=colors[3], alpha=1)
def plot_group(keys, values, ax: Axes):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis="both", labelsize=fontsize)
if kwds.get("vert", 1):
ticks = ax.get_xticks()
if len(ticks) != len(keys):
i, remainder = divmod(len(ticks), len(keys))
assert remainder == 0, remainder
keys *= i
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp, **kwds)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == "dict":
return bp
elif return_type == "both":
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(
plot_group,
data,
columns=columns,
by=by,
grid=grid,
figsize=figsize,
ax=ax,
layout=layout,
return_type=return_type,
)
else:
if return_type is None:
return_type = "axes"
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when 'by' is None")
if ax is None:
rc = {"figure.figsize": figsize} if figsize is not None else {}
with plt.rc_context(rc):
ax = plt.gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
def boxplot_frame(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwds,
):
import matplotlib.pyplot as plt
ax = boxplot(
self,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
grid=grid,
rot=rot,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwds,
)
plt.draw_if_interactive()
return ax
def boxplot_frame_groupby(
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
**kwds,
):
if subplots is True:
naxes = len(grouped)
fig, axes = create_subplots(
naxes=naxes,
squeeze=False,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
)
axes = flatten_axes(axes)
ret = pd.Series(dtype=object)
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(
ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds
)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
else:
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = pd.concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
# GH 16748, DataFrameGroupby fails when subplots=False and `column` argument
# is assigned, and in this case, since `df` here becomes MI after groupby,
# so we need to couple the keys (grouped values) and column (original df
# column) together to search for subset to plot
if column is not None:
column = | com.convert_to_list_like(column) | pandas.core.common.convert_to_list_like |
"""Rank genes according to differential expression.
"""
from math import floor
from typing import Iterable, Union, Optional
import numpy as np
import pandas as pd
from anndata import AnnData
from scipy.sparse import issparse, vstack
from .. import _utils
from .. import logging as logg
from ..preprocessing._simple import _get_mean_var
from .._compat import Literal
from ..get import _get_obs_rep
_Method = Optional[Literal['logreg', 't-test', 'wilcoxon', 't-test_overestim_var']]
_CorrMethod = Literal['benjamini-hochberg', 'bonferroni']
def _select_top_n(scores, n_top):
n_from = scores.shape[0]
reference_indices = np.arange(n_from, dtype=int)
partition = np.argpartition(scores, -n_top)[-n_top:]
partial_indices = np.argsort(scores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
return global_indices
def _ranks(X, mask=None, mask_rest=None):
CONST_MAX_SIZE = 10000000
n_genes = X.shape[1]
if issparse(X):
merge = lambda tpl: vstack(tpl).toarray()
adapt = lambda X: X.toarray()
else:
merge = np.vstack
adapt = lambda X: X
masked = mask is not None and mask_rest is not None
if masked:
n_cells = np.count_nonzero(mask) + np.count_nonzero(mask_rest)
get_chunk = lambda X, left, right: merge(
(X[mask, left:right], X[mask_rest, left:right])
)
else:
n_cells = X.shape[0]
get_chunk = lambda X, left, right: adapt(X[:, left:right])
# Calculate chunk frames
max_chunk = floor(CONST_MAX_SIZE / n_cells)
for left in range(0, n_genes, max_chunk):
right = min(left + max_chunk, n_genes)
df = pd.DataFrame(data=get_chunk(X, left, right))
ranks = df.rank()
yield ranks, left, right
def _tiecorrect(ranks):
size = np.float64(ranks.shape[0])
if size < 2:
return np.repeat(ranks.shape[1], 1.0)
arr = np.sort(ranks, axis=0)
tf = np.insert(arr[1:] != arr[:-1], (0, arr.shape[0] - 1), True, axis=0)
idx = np.where(tf, np.arange(tf.shape[0])[:, None], 0)
idx = np.sort(idx, axis=0)
cnt = np.diff(idx, axis=0).astype(np.float64)
return 1.0 - (cnt ** 3 - cnt).sum(axis=0) / (size ** 3 - size)
class _RankGenes:
def __init__(
self,
adata,
groups,
groupby,
reference='rest',
use_raw=True,
layer=None,
comp_pts=False,
):
if 'log1p' in adata.uns_keys() and adata.uns['log1p']['base'] is not None:
self.expm1_func = lambda x: np.expm1(x * np.log(adata.uns['log1p']['base']))
else:
self.expm1_func = np.expm1
self.groups_order, self.groups_masks = _utils.select_groups(
adata, groups, groupby
)
adata_comp = adata
if layer is not None:
if use_raw:
raise ValueError("Cannot specify `layer` and have `use_raw=True`.")
X = adata_comp.layers[layer]
else:
if use_raw and adata.raw is not None:
adata_comp = adata.raw
X = adata_comp.X
# for correct getnnz calculation
if issparse(X):
X.eliminate_zeros()
self.X = X
self.var_names = adata_comp.var_names
self.ireference = None
if reference != 'rest':
self.ireference = np.where(self.groups_order == reference)[0][0]
self.means = None
self.vars = None
self.means_rest = None
self.vars_rest = None
self.comp_pts = comp_pts
self.pts = None
self.pts_rest = None
self.stats = None
# for logreg only
self.grouping_mask = adata.obs[groupby].isin(self.groups_order)
self.grouping = adata.obs.loc[self.grouping_mask, groupby]
def _basic_stats(self):
n_genes = self.X.shape[1]
n_groups = self.groups_masks.shape[0]
self.means = np.zeros((n_groups, n_genes))
self.vars = np.zeros((n_groups, n_genes))
self.pts = np.zeros((n_groups, n_genes)) if self.comp_pts else None
if self.ireference is None:
self.means_rest = np.zeros((n_groups, n_genes))
self.vars_rest = np.zeros((n_groups, n_genes))
self.pts_rest = np.zeros((n_groups, n_genes)) if self.comp_pts else None
else:
mask_rest = self.groups_masks[self.ireference]
X_rest = self.X[mask_rest]
self.means[self.ireference], self.vars[self.ireference] = _get_mean_var(
X_rest
)
# deleting the next line causes a memory leak for some reason
del X_rest
if issparse(self.X):
get_nonzeros = lambda X: X.getnnz(axis=0)
else:
get_nonzeros = lambda X: np.count_nonzero(X, axis=0)
for imask, mask in enumerate(self.groups_masks):
X_mask = self.X[mask]
if self.comp_pts:
self.pts[imask] = get_nonzeros(X_mask) / X_mask.shape[0]
if self.ireference is not None and imask == self.ireference:
continue
self.means[imask], self.vars[imask] = _get_mean_var(X_mask)
if self.ireference is None:
mask_rest = ~mask
X_rest = self.X[mask_rest]
self.means_rest[imask], self.vars_rest[imask] = _get_mean_var(X_rest)
# this can be costly for sparse data
if self.comp_pts:
self.pts_rest[imask] = get_nonzeros(X_rest) / X_rest.shape[0]
# deleting the next line causes a memory leak for some reason
del X_rest
def t_test(self, method):
from scipy import stats
self._basic_stats()
for group_index, mask in enumerate(self.groups_masks):
if self.ireference is not None and group_index == self.ireference:
continue
mean_group = self.means[group_index]
var_group = self.vars[group_index]
ns_group = np.count_nonzero(mask)
if self.ireference is not None:
mean_rest = self.means[self.ireference]
var_rest = self.vars[self.ireference]
ns_other = np.count_nonzero(self.groups_masks[self.ireference])
else:
mean_rest = self.means_rest[group_index]
var_rest = self.vars_rest[group_index]
ns_other = self.X.shape[0] - ns_group
if method == 't-test':
ns_rest = ns_other
elif method == 't-test_overestim_var':
# hack for overestimating the variance for small groups
ns_rest = ns_group
else:
raise ValueError('Method does not exist.')
# TODO: Come up with better solution. Mask unexpressed genes?
# See https://github.com/scipy/scipy/issues/10269
with np.errstate(invalid="ignore"):
scores, pvals = stats.ttest_ind_from_stats(
mean1=mean_group,
std1=np.sqrt(var_group),
nobs1=ns_group,
mean2=mean_rest,
std2=np.sqrt(var_rest),
nobs2=ns_rest,
equal_var=False, # Welch's
)
# I think it's only nan when means are the same and vars are 0
scores[np.isnan(scores)] = 0
# This also has to happen for <NAME>
pvals[np.isnan(pvals)] = 1
yield group_index, scores, pvals
def wilcoxon(self, tie_correct):
from scipy import stats
self._basic_stats()
n_genes = self.X.shape[1]
# First loop: Loop over all genes
if self.ireference is not None:
# initialize space for z-scores
scores = np.zeros(n_genes)
# initialize space for tie correction coefficients
if tie_correct:
T = np.zeros(n_genes)
else:
T = 1
for group_index, mask in enumerate(self.groups_masks):
if group_index == self.ireference:
continue
mask_rest = self.groups_masks[self.ireference]
n_active = np.count_nonzero(mask)
m_active = np.count_nonzero(mask_rest)
if n_active <= 25 or m_active <= 25:
logg.hint(
'Few observations in a group for '
'normal approximation (<=25). Lower test accuracy.'
)
# Calculate rank sums for each chunk for the current mask
for ranks, left, right in _ranks(self.X, mask, mask_rest):
scores[left:right] = np.sum(ranks.iloc[0:n_active, :])
if tie_correct:
T[left:right] = _tiecorrect(ranks)
std_dev = np.sqrt(
T * n_active * m_active * (n_active + m_active + 1) / 12.0
)
scores = (
scores - (n_active * ((n_active + m_active + 1) / 2.0))
) / std_dev
scores[np.isnan(scores)] = 0
pvals = 2 * stats.distributions.norm.sf(np.abs(scores))
yield group_index, scores, pvals
# If no reference group exists,
# ranking needs only to be done once (full mask)
else:
n_groups = self.groups_masks.shape[0]
scores = np.zeros((n_groups, n_genes))
n_cells = self.X.shape[0]
if tie_correct:
T = np.zeros((n_groups, n_genes))
for ranks, left, right in _ranks(self.X):
# sum up adjusted_ranks to calculate W_m,n
for imask, mask in enumerate(self.groups_masks):
scores[imask, left:right] = np.sum(ranks.iloc[mask, :])
if tie_correct:
T[imask, left:right] = _tiecorrect(ranks)
for group_index, mask in enumerate(self.groups_masks):
n_active = np.count_nonzero(mask)
if tie_correct:
T_i = T[group_index]
else:
T_i = 1
std_dev = np.sqrt(
T_i * n_active * (n_cells - n_active) * (n_cells + 1) / 12.0
)
scores[group_index, :] = (
scores[group_index, :] - (n_active * (n_cells + 1) / 2.0)
) / std_dev
scores[np.isnan(scores)] = 0
pvals = 2 * stats.distributions.norm.sf(np.abs(scores[group_index, :]))
yield group_index, scores[group_index], pvals
def logreg(self, **kwds):
# if reference is not set, then the groups listed will be compared to the rest
# if reference is set, then the groups listed will be compared only to the other groups listed
from sklearn.linear_model import LogisticRegression
# Indexing with a series causes issues, possibly segfault
X = self.X[self.grouping_mask.values, :]
if len(self.groups_order) == 1:
raise ValueError('Cannot perform logistic regression on a single cluster.')
clf = LogisticRegression(**kwds)
clf.fit(X, self.grouping.cat.codes)
scores_all = clf.coef_
for igroup, _ in enumerate(self.groups_order):
if len(self.groups_order) <= 2: # binary logistic regression
scores = scores_all[0]
else:
scores = scores_all[igroup]
yield igroup, scores, None
if len(self.groups_order) <= 2:
break
def compute_statistics(
self,
method,
corr_method='benjamini-hochberg',
n_genes_user=None,
rankby_abs=False,
tie_correct=False,
**kwds,
):
if method in {'t-test', 't-test_overestim_var'}:
generate_test_results = self.t_test(method)
elif method == 'wilcoxon':
generate_test_results = self.wilcoxon(tie_correct)
elif method == 'logreg':
generate_test_results = self.logreg(**kwds)
self.stats = None
n_genes = self.X.shape[1]
for group_index, scores, pvals in generate_test_results:
group_name = str(self.groups_order[group_index])
if n_genes_user is not None:
scores_sort = np.abs(scores) if rankby_abs else scores
global_indices = _select_top_n(scores_sort, n_genes_user)
first_col = 'names'
else:
global_indices = slice(None)
first_col = 'scores'
if self.stats is None:
idx = | pd.MultiIndex.from_tuples([(group_name, first_col)]) | pandas.MultiIndex.from_tuples |
# python ライブラリ
import pandas as pd
import numpy as np
import copy
import datetime
import os
trend_data_file_path = 'data/src/TREND_76_6904050_20210814_20210827_20210828183418.xlsx'
df = | pd.read_excel(trend_data_file_path,encoding="shift-jis") | pandas.read_excel |
"""
Helper functions for calculating standard meteorological quantities
"""
import numpy as np
import pandas as pd
import xarray as xr
# constants
epsilon = 0.622 # ratio of molecular weights of water to dry air
def e_s(T, celsius=False, model='Tetens'):
"""Calculate the saturation vapor pressure of water, $e_s$ [mb]
given the air temperature.
"""
if celsius:
# input is deg C
T_degC = T
T = T + 273.15
else:
# input is in Kelvin
T_degC = T - 273.15
if model == 'Bolton':
# Eqn 10 from Bolton (1980), Mon. Weather Rev., Vol 108
# - applicable from -30 to 35 deg C
svp = 6.112 * np.exp(17.67*T_degC / (T_degC + 243.5))
elif model == 'Magnus':
# Eqn 21 from Alduchov and Eskridge (1996), J. Appl. Meteorol., Vol 35
# - AERK formulation, applicable from -40 to 50 deg C
svp = 6.1094 * np.exp(17.625*T_degC / (243.04 + T_degC))
elif model == 'Tetens':
# Tetens' formula, e.g., from the National Weather Service:
# https://www.weather.gov/media/epz/wxcalc/vaporPressure.pdf
svp = 6.11 * 10**(7.5*T_degC/(237.3+T_degC))
else:
raise ValueError('Unknown model: {:s}'.format(model))
return svp
def T_d(T, RH, celsius=False, model='NWS'):
"""Calculate the dewpoint temperature, $T_d$, from air temperature
and relative humidity [%]. If celsius is True, input and output
temperatures are in degrees Celsius; otherwise, inputs and outputs
are in Kelvin.
"""
if model == 'NWS':
es = e_s(T, celsius, model='Tetens')
# From National Weather Service, using Tetens' formula:
# https://www.weather.gov/media/epz/wxcalc/virtualTemperature.pdf
# - note the expression for vapor pressure is the saturation vapor
# pressure expression, with Td instead of T
e = RH/100. * es
denom = 7.5*np.log(10) - np.log(e/6.11)
Td = 237.3 * np.log(e/6.11) / denom
if not celsius:
Td += 273.15
else:
raise ValueError('Unknown model: {:s}'.format(model))
return Td
def w_s(T,p,celsius=False):
"""Calculate the saturation mixing ratio, $w_s$ [kg/kg] given the
air temperature and station pressure [mb].
"""
es = e_s(T,celsius)
# From Wallace & Hobbs, Eqn 3.63
return epsilon * es / (p - es)
def T_to_Tv(T,p=None,RH=None,e=None,w=None,Td=None,
celsius=False,verbose=False):
"""Convert moist air temperature to virtual temperature.
Formulas based on given total (or "station") pressure (p [mbar]) and
relative humidity (RH [%]); mixing ratio (w [kg/kg]); or partial
pressures of water vapor and dry air (e, pd [mbar]); or dewpoint
temperature (Td).
"""
if celsius:
T_degC = T
T += 273.15
else:
T_degC = T - 273.15
if (p is not None) and (RH is not None):
# saturation vapor pressure of water, e_s [mbar]
es = e_s(T)
if verbose:
# sanity check!
es_est = e_s(T, model='Bolton')
print('e_s(T) =',es,'~=',es_est)
es_est = e_s(T, model='Magnus')
print('e_s(T) =',es,'~=',es_est)
# saturation mixing ratio, ws [-]
ws = w_s(T, p)
if verbose:
print('w_s(T,p) =',ws,'~=',epsilon*es/p)
# mixing ratio, w, from definition of relative humidity
w = (RH/100.) * ws
if verbose:
# we also have specific humidity, q, at this point (not needed)
q = w / (1+w)
print('q(T,p,RH) =',q)
# Using Wallace & Hobbs, Eqn 3.59
if verbose:
# sanity check!
print('Tv(T,p,RH) ~=',T*(1+0.61*w))
Tv = T * (w/epsilon + 1) / (1 + w)
elif (e is not None) and (p is not None):
# Definition of virtual temperature
# Wallace & Hobbs, Eqn 3.16
Tv = T / (1 - e/p*(1-epsilon))
elif w is not None:
# Using Wallace & Hobbs, Eqn 3.59 substituted into 3.16
Tv = T * (w/epsilon + 1) / (1 + w)
elif (Td is not None) and (p is not None):
# From National Weather Service, using Tetens' formula:
# https://www.weather.gov/media/epz/wxcalc/vaporPressure.pdf
Td_degC = Td
if not celsius:
Td_degC -= 273.15
e = e_s(Td_degC, celsius=True, model='Tetens')
# Calculate from definition of virtual temperature
Tv = T_to_Tv(T,e=e,p=p)
else:
raise ValueError('Specify (T,RH,p) or (T,e,p) or (T,w), or (T,Td,p)')
if celsius:
Tv -= 273.15
return Tv
def Ts_to_Tv(Ts,**kwargs):
"""TODO: Convert sonic temperature [K] to virtual temperature [K].
"""
def calc_wind(df,u='u',v='v'):
"""Calculate wind speed and direction from horizontal velocity
components, u and v.
"""
if not all(velcomp in df.columns for velcomp in [u,v]):
print(('velocity components u/v not found; '
'set u and/or v to calculate wind speed/direction'))
else:
wspd = np.sqrt(df[u]**2 + df[v]**2)
wdir = 180. + np.degrees(np.arctan2(df[u], df[v]))
return wspd, wdir
def calc_uv(df,wspd='wspd',wdir='wdir'):
"""Calculate velocity components from wind speed and direction.
"""
if not all(windvar in df.columns for windvar in [wspd,wdir]):
print(('wind speed/direction not found; '
'set wspd and/or wpd to calculate velocity components'))
else:
ang = np.radians(270. - df[wdir])
u = df[wspd] * np.cos(ang)
v = df[wspd] * np.sin(ang)
return u,v
def theta(T, p, p0=1000.):
"""Calculate (virtual) potential temperature [K], theta, from (virtual)
temperature T [K] and pressure p [mbar] using Poisson's equation.
Standard pressure p0 at sea level is 1000 mbar or hPa.
Typical assumptions for dry air give:
R/cp = (287 J/kg-K) / (1004 J/kg-K) = 0.286
"""
return T * (p0/p)**0.286
def covariance(a,b,interval='10min',resample=False):
"""Calculate covariance between two series (with datetime index) in
the specified interval, where the interval is defined by a pandas
offset string
(http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects).
Notes:
- The output data will have the same length as the input data by
default, because statistics are calculated with pd.rolling(). To
return data at the same intervals as specified, set
`resample=True`.
- Covariances may be simultaneously calculated at multiple heights
by inputting multi-indexed dataframes (with height being the
second index level)
- If the inputs have multiindices, this function will return a
stacked, multi-indexed dataframe.
Example:
heatflux = covariance(df['Ts'],df['w'],'10min')
"""
# handle multiindices
have_multiindex = False
if isinstance(a.index, pd.MultiIndex):
assert isinstance(b.index, pd.MultiIndex), \
'Both a and b should have multiindices'
assert len(a.index.levels) == 2
assert len(b.index.levels) == 2
# assuming levels 0 and 1 are time and height, respectively
a = a.unstack() # create unstacked copy
b = b.unstack() # create unstacked copy
have_multiindex = True
elif isinstance(b.index, pd.MultiIndex):
raise AssertionError('Both a and b should have multiindices')
# check index
if isinstance(interval, str):
# make sure we have a compatible index
assert isinstance(a.index, (pd.DatetimeIndex, pd.TimedeltaIndex, pd.PeriodIndex))
assert isinstance(b.index, (pd.DatetimeIndex, pd.TimedeltaIndex, pd.PeriodIndex))
# now, do the calculations
if resample:
a_mean = a.resample(interval).mean()
b_mean = b.resample(interval).mean()
ab_mean = (a*b).resample(interval).mean()
else:
a_mean = a.rolling(interval).mean()
b_mean = b.rolling(interval).mean()
ab_mean = (a*b).rolling(interval).mean()
cov = ab_mean - a_mean*b_mean
if have_multiindex:
return cov.stack()
else:
return cov
def power_spectral_density(df,tstart=None,interval=None,window_size='10min',
window_type='hanning',detrend='linear',scaling='density'):
"""
Calculate power spectral density using welch method and return
a new dataframe. The spectrum is calculated for every column
of the original dataframe.
Notes:
- Input can be a pandas series or dataframe
- Output is a dataframe with frequency as index
"""
from scipy.signal import welch
# Determine time scale
timevalues = df.index.get_level_values(0)
if isinstance(timevalues,pd.DatetimeIndex):
timescale = pd.to_timedelta(1,'s')
else:
# Assuming time is specified in seconds
timescale = 1
# Determine tstart and interval if not specified
if tstart is None:
tstart = timevalues[0]
if interval is None:
interval = timevalues[-1] - timevalues[0]
elif isinstance(interval,str):
interval = pd.to_timedelta(interval)
# Update timevalues
inrange = (timevalues >= tstart) & (timevalues <= tstart+interval)
timevalues = df.loc[inrange].index.get_level_values(0)
# Determine sampling rate and samples per window
dts = np.diff(timevalues.unique())/timescale
dt = dts[0]
nperseg = int( pd.to_timedelta(window_size)/ | pd.to_timedelta(dt,'s') | pandas.to_timedelta |
import sys
import numpy as np
import pandas as pd
from optparse import OptionParser
import os
from scipy.stats import entropy
from scipy import signal
import scipy.stats as spstats
import fnmatch
from datetime import datetime
from scipy.stats import skew
from scipy.stats import kurtosis
from scipy.stats import t
from scipy.optimize import fsolve
import scipy.special as sc
# Extracts aggregate features per run from raw eye tracking and oculomotor event data, and builds a single feature matrix for use as input to train and validate a predictive model. If the feature matrix file already exists from a prior run of getFeatureMatrix(), you can save time by specifying useExisting=True to load it directly from the file rather than recomputing it from scratch.
# Research was sponsored by the United States Air Force Research Laboratory and the
# United States Air Force Artificial Intelligence Accelerator and was accomplished
# under Cooperative Agreement Number FA8750-19-2-1000. The views and conclusions
# contained in this document are those of the authors and should not be interpreted
# as representing the official policies, either expressed or implied, of the United
# States Air Force or the U.S. Government. The U.S. Government is authorized to
# reproduce and distribute reprints for Government purposes notwithstanding any
# copyright notation herein.
# def main():
# parser = OptionParser()
# parser.add_option('-d', '--dataDir', action="store", dest="dataDir", default=None, help="The top level data directory containing all the raw signal files for each subject.")
# parser.add_option('-o', '--outFilePath', action="store", dest="outFilePath", default=None, help="File to write full feature matrix.");
# (options, args) = parser.parse_args()
# getFeatureMatrix(options.dataDir, options.outFilePath);
def getFeatureMatrix(dataDir, filePath, useExisting):
if useExisting:
if os.path.exists(filePath):
print("Found precomputed feature matrix.")
featMatDF = | pd.read_csv(filePath) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Copyright 2018 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import time
from multiprocessing import Pool
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.model_selection import StratifiedShuffleSplit
from kor_char_parser import decompose_str_as_one_hot
from char import line_to_char_ids
from word import line_to_word_ids
def group_count(name, items):
df = | pd.DataFrame(data={name: items}) | pandas.DataFrame |
import pandas as pd
import requests
import ckanapi
import math
import re
from datetime import timedelta
import hashlib
import json
import logging
import os
from pathlib import Path
from airflow import DAG
from airflow.models import Variable
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import BranchPythonOperator, PythonOperator
from airflow.utils.dates import days_ago
from ckan_operators.datastore_operator import (
BackupDatastoreResourceOperator,
DeleteDatastoreResourceRecordsOperator,
InsertDatastoreResourceRecordsOperator,
RestoreDatastoreResourceBackupOperator,
)
from ckan_operators.package_operator import GetPackageOperator
from ckan_operators.resource_operator import (
GetOrCreateResourceOperator,
ResourceAndFileOperator,
)
from dateutil import parser
from utils import agol_utils, airflow_utils, ckan_utils
from utils_operators.directory_operator import CreateLocalDirectoryOperator
from utils_operators.file_operators import DownloadFileOperator
from utils_operators.slack_operators import task_success_slack_alert, task_failure_slack_alert, GenericSlackOperator
RESOURCE_NAME = "Toronto progress portal - Key metrics"
tpp_measure_url = "https://contrib.wp.intra.prod-toronto.ca/app_content/tpp_measures"
tpp_narratives_url = "https://contrib.wp.intra.prod-toronto.ca/app_content/tpp_narratives/"
PACKAGE_NAME = "toronto-s-dashboard-key-indicators"
EXPECTED_COLUMNS = [
"measure_id",
"measure_name",
"interval_type",
"value_type",
"measure_value",
"target",
"year_to_date_variance",
"budget_variance",
"decimal_accuracy",
"desired_direction",
"category",
"data_source_notes",
"city_perspective_note",
"year",
"period_number_in_year",
"keywords",
"notes"
]
mapping = {
"id": "measure_id",
"m": "measure_name",
"it": "interval_type",
"vt": "value_type",
"v": "variance",
"yv": "year_to_date_variance",
"bv": "budget_variance",
"da": "decimal_accuracy",
"dd": "desired_direction",
"c": "category",
"ds":"data_source_notes",
"cp": "city_perspective_note",
"y": "year",
"p": "period_number_in_year",
"v": "measure_value",
"target":"target",
"note":"note",
"c": "category",
}
def get_category_measures(measures, category):
subset = []
for m in measures:
assert len(m["c"]) == 1, f"Measure has more than 1 category: {m['c']}"
if m["c"][0].lower() == category.lower():
subset.append(m)
return subset
def make_measures_records(measures):
records = []
for i in measures:
item = { **i }
data_points = item.pop("vs")
assert len(i["c"]) == 1, f"Item '{i['m']}' ({i['id']}) belongs to more than 1 category: {item['c']}"
item["c"] = item["c"][0]
for dp in data_points:
r = { k: v for k, v in {**item, **dp}.items() if v == v }
r["m"] = r["m"].replace("\n", " ")
r["ds"] = r["ds"].replace("&", "&")
r.pop("ytd")
r.pop("ht")
r.pop("kw")
if "da" in r:
try:
r["da"] = int(r["da"])
except:
r.pop("da")
if "yv" in r:
try:
r["yv"] = float(r["yv"])
except:
r.pop("yv")
if "bv" in r:
try:
r["bv"] = float(r["bv"])
except:
r.pop("bv")
for original,updated in mapping.items():
if original in r:
r[updated] = r.pop(original)
records.append(r)
return records
def ds_fields():
fields=[{'info': {'notes': 'ID Number assigned to uniquely identify each Measure'}, 'type': 'float8', 'id': 'measure_id'},
{'info': {'notes': 'Measure Name'}, 'type': 'text', 'id': 'measure_name'},
{'info': {'notes': 'Interval Type for measure result collection'}, 'type': 'text', 'id': 'interval_type'},
{'info': {'notes': 'Value Type of measure result'}, 'type': 'text', 'id': 'value_type'},
{'info': {'notes': 'Actual value of the measure'}, 'type': 'float8', 'id': 'measure_value'},
{'info': {'notes': 'Year To Date Variance to compare % Changed for Current Year-To-Date vs. Previous Year to determine Analysis in Trend Analysis'}, 'type': 'float8', 'id': 'year_to_date_variance'},
{'info': {'notes': 'Budget Variance to compare % Changed for Current Period vs. Budget/Target to determine Analysis in Trend Analysis'}, 'type': 'float8', 'id': 'budget_variance'},
{'info': {'notes': 'Decimal Accuracy - number of decimals to display in the measure result'}, 'type': 'int4', 'id': 'decimal_accuracy'},
{'info': {'notes': 'Desired Direction - determines colour and direction of trend arrows'}, 'type': 'text', 'id': 'desired_direction'},
{'info': {'notes': 'Category to which measure is assigned'}, 'type': 'text', 'id': 'category'},
{'info': {'notes': 'DataSource Notes to display under the Chart'}, 'type': 'text', 'id': 'data_source_notes'},
{'info': {'notes': 'CityPerspective Note to display under Trend Analysis'}, 'type': 'text', 'id': 'city_perspective_note'},
{'info': {'notes': 'Year', 'label': ''}, 'type': 'int4', 'id': 'year'},
{'info': {'notes': 'Period number - Month'}, 'type': 'int4', 'id': 'period_number_in_year'},
{'info': {'notes': 'Target value of the measure'}, 'type': 'float8', 'id': 'target'},
{'info': {'notes': 'Note'}, 'type': 'text', 'id': 'note'}]
return fields
def string_to_dict(string, pattern):
regex = re.sub(r'{(.+?)}', r'(?P<_\1>.+)', pattern)
values = list(re.search(regex, string).groups())
keys = re.findall(r'{(.+?)}', pattern)
_dict = dict(zip(keys, values))
return _dict
def build_narratives_df(notes):
p_map = {
"January": 1,
"February":2,
"March":3,
"April":4,
"May":5,
"June":6,
"July":7,
"August":8,
"September":9,
"October":10,
"November":11,
"December":12,
"Spring":2,
"Summer":3,
"Fall":4,
"Winter":1,
}
pattern1 = {"a":"^\[Quarter {period_number_in_year} {year}\]{note}$", "b":"\[Quarter \d \d{4}].*"}
pattern2 = {"a":"^\[Annual {year}\]{note}$","b":"\[Annual \d{4}].*"}
pattern3 = {"a":"^\[{period_number_in_year} {year}\]{note}$","b":"\[\w{3,15} \d{4}].*"}
narratives=[]
for k,v in notes.items():
if len(v) > 10:
for n in v.split('<br /><br />'):
note = None
nn = n.replace("<br />", "").strip()
if re.fullmatch(pattern1["b"], nn, flags=0):
note = string_to_dict(nn,pattern1["a"])
elif re.fullmatch(pattern2["b"], nn, flags=0):
note = string_to_dict(nn,pattern2["a"])
note["period_number_in_year"] = note["year"]
elif re.fullmatch(pattern3["b"], nn, flags=0):
note = string_to_dict(nn,pattern3["a"])
note['period_number_in_year'] = p_map[note['period_number_in_year']]
else:
None
# print("note does not match pattern:", n)
if note:
note["year"] = int(note["year"])
note["period_number_in_year"] = int(note["period_number_in_year"])
note["measure_id"] = float(k)
narratives.append(note)
return pd.DataFrame(narratives)
def send_failure_message():
airflow_utils.message_slack(
name=PACKAGE_NAME,
message_type="error",
msg="Job not finished",
active_env=Variable.get("active_env"),
prod_webhook=Variable.get("active_env") == "prod",
)
with DAG(
PACKAGE_NAME,
default_args=airflow_utils.get_default_args(
{
"owner": "Gary",
"depends_on_past": False,
"email": ["<EMAIL>"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(seconds=600),
"on_failure_callback": task_failure_slack_alert,
"start_date": days_ago(1),
"retries": 0,
}
),
description="Take tpp json and narratives from progress portal",
schedule_interval="0 22 * * 1-5",
catchup=False,
tags=["dataset"],
) as dag:
def is_resource_new(**kwargs):
package = kwargs["ti"].xcom_pull(task_ids="get_package")
logging.info(f"resources found: {[r['name'] for r in package['resources']]}")
is_new = RESOURCE_NAME not in [r["name"] for r in package["resources"]]
if is_new:
return "resource_is_new"
return "resource_is_not_new"
def transform_data(**kwargs):
ti = kwargs["ti"]
data_file_measure = ti.xcom_pull(task_ids="get_measure")
data_file_narrative = ti.xcom_pull(task_ids="get_narrative")
tmp_dir = Path(ti.xcom_pull(task_ids="tmp_dir"))
with open(Path(data_file_measure["path"])) as f:
measure = json.load(f)
logging.info(f"tmp_dir: {tmp_dir} | data_file_measure: {data_file_measure}")
with open(Path(data_file_narrative["path"])) as f:
narrative = json.load(f)
logging.info(f"tmp_dir: {tmp_dir} | data_file_narrative: {data_file_narrative}")
df_measure = pd.DataFrame(make_measures_records(measure["measures"])) # measure without target
df_narrative = build_narratives_df(narrative) # narrative with measure id, year, period decoded
# build target df
targets=measure["targets"][0]
df_target = | pd.DataFrame() | pandas.DataFrame |
"""
DeepLabCut2.0 Toolbox (deeplabcut.org)
© <NAME>
https://github.com/AlexEMG/DeepLabCut
Please see AUTHORS for contributors.
https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
Licensed under GNU Lesser General Public License v3.0
"""
import os, pickle, yaml
import pandas as pd
from pathlib import Path
import numpy as np
from deeplabcut.utils import auxiliaryfunctions
def convertcsv2h5(config,userfeedback=True,scorer=None):
"""
Convert (image) annotation files in folder labeled-data from csv to h5.
This function allows the user to manually edit the csv (e.g. to correct the scorer name and then convert it into hdf format).
WARNING: conversion might corrupt the data.
config : string
Full path of the config.yaml file as a string.
userfeedback: bool, optional
If true the user will be asked specifically for each folder in labeled-data if the containing csv shall be converted to hdf format.
scorer: string, optional
If a string is given, then the scorer/annotator in all csv and hdf files that are changed, will be overwritten with this name.
Examples
--------
Convert csv annotation files for reaching-task project into hdf.
>>> deeplabcut.convertcsv2h5('/analysis/project/reaching-task/config.yaml')
--------
Convert csv annotation files for reaching-task project into hdf while changing the scorer/annotator in all annotation files to Albert!
>>> deeplabcut.convertcsv2h5('/analysis/project/reaching-task/config.yaml',scorer='Albert')
--------
"""
cfg = auxiliaryfunctions.read_config(config)
videos = cfg['video_sets'].keys()
video_names = [Path(i).stem for i in videos]
folders = [Path(config).parent / 'labeled-data' /Path(i) for i in video_names]
if scorer==None:
scorer=cfg['scorer']
for folder in folders:
try:
if userfeedback==True:
print("Do you want to convert the csv file in folder:", folder, "?")
askuser = input("yes/no")
else:
askuser="yes"
if askuser=='y' or askuser=='yes' or askuser=='Ja' or askuser=='ha': # multilanguage support :)
fn=os.path.join(str(folder),'CollectedData_' + cfg['scorer'] + '.csv')
data=pd.read_csv(fn)
#nlines,numcolumns=data.shape
orderofbpincsv=list(data.values[0,1:-1:2])
imageindex=list(data.values[2:,0])
#assert(len(orderofbpincsv)==len(cfg['bodyparts']))
print(orderofbpincsv)
print(cfg['bodyparts'])
#TODO: test len of images vs. len of imagenames for another sanity check
index = pd.MultiIndex.from_product([[scorer], orderofbpincsv, ['x', 'y']],names=['scorer', 'bodyparts', 'coords'])
frame = pd.DataFrame(np.array(data.values[2:,1:],dtype=float), columns = index, index = imageindex)
frame.to_hdf(os.path.join(str(folder),'CollectedData_'+ cfg['scorer']+".h5"), key='df_with_missing', mode='w')
frame.to_csv(fn)
except FileNotFoundError:
print("Attention:", folder, "does not appear to have labeled data!")
def analyze_videos_converth5_to_csv(videopath,videotype='.avi'):
"""
By default the output poses (when running analyze_videos) are stored as MultiIndex Pandas Array, which contains the name of the network, body part name, (x, y) label position \n
in pixels, and the likelihood for each frame per body part. These arrays are stored in an efficient Hierarchical Data Format (HDF) \n
in the same directory, where the video is stored. If the flag save_as_csv is set to True, the data is also exported as comma-separated value file. However,
if the flag was *not* set, then this function allows the conversion of all h5 files to csv files (without having to analyze the videos again)!
This functions converts hdf (h5) files to the comma-separated values format (.csv), which in turn can be imported in many programs, such as MATLAB, R, Prism, etc.
Parameters
----------
videopath : string
A strings containing the full paths to videos for analysis or a path to the directory where all the videos with same extension are stored.
videotype: string, optional
Checks for the extension of the video in case the input to the video is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``
Examples
--------
Converts all pose-output files belonging to mp4 videos in the folder '/media/alex/experimentaldata/cheetahvideos' to csv files.
deeplabcut.analyze_videos_converth5_to_csv('/media/alex/experimentaldata/cheetahvideos','.mp4')
"""
start_path=os.getcwd()
os.chdir(videopath)
Videos=[fn for fn in os.listdir(os.curdir) if (videotype in fn) and ('_labeled.mp4' not in fn)] #exclude labeled-videos!
Allh5files=[fn for fn in os.listdir(os.curdir) if (".h5" in fn) and ("resnet" in fn)]
for video in Videos:
vname = Path(video).stem
#Is there a scorer for this?
PutativeOutputFiles=[fn for fn in Allh5files if vname in fn]
for pfn in PutativeOutputFiles:
scorer=pfn.split(vname)[1].split('.h5')[0]
if "DeepCut" in scorer:
DC = pd.read_hdf(pfn, 'df_with_missing')
print("Found output file for scorer:", scorer)
print("Converting to csv...")
DC.to_csv(pfn.split('.h5')[0]+'.csv')
os.chdir(str(start_path))
print("All pose files were converted.")
def pathmagic(string):
parts=string.split('\\')
if len(parts)==1:
return string
elif len(parts)==3: #this is the expected windows case, it will split into labeled-data, video, imgNR.png
return os.path.join(*parts) #unpack arguments from list with splat operator
else:
return string
def convertpaths_to_unixstyle(Data,fn,cfg):
''' auxiliary function that converts paths in annotation files:
labeled-data\\video\\imgXXX.png to labeled-data/video/imgXXX.png '''
Data.to_csv(fn + "windows" + ".csv")
Data.to_hdf(fn + "windows" + '.h5','df_with_missing',format='table', mode='w')
imindex=[pathmagic(s) for s in Data.index]
for j,bpt in enumerate(cfg['bodyparts']):
index = pd.MultiIndex.from_product([[cfg['scorer']], [bpt], ['x', 'y']],names=['scorer', 'bodyparts', 'coords'])
frame = pd.DataFrame(Data[cfg['scorer']][bpt].values, columns = index, index = imindex)
if j==0:
dataFrame=frame
else:
dataFrame = pd.concat([dataFrame, frame],axis=1)
dataFrame.to_csv(fn + ".csv")
dataFrame.to_hdf(fn + '.h5','df_with_missing',format='table', mode='w')
return dataFrame
def merge_windowsannotationdataONlinuxsystem(cfg):
''' If a project was created on Windows (and labeled there,) but ran on unix then the data folders
corresponding in the keys in cfg['video_sets'] are not found. This function gets them directly by
looping over all folders in labeled-data '''
AnnotationData=None
data_path = Path(cfg['project_path'],'labeled-data')
annotationfolders=[fn for fn in os.listdir(data_path) if "_labeled" not in fn]
print("The following folders were found:", annotationfolders)
for folder in annotationfolders:
try:
data = pd.read_hdf(os.path.join(data_path , folder, 'CollectedData_'+cfg['scorer']+'.h5'),'df_with_missing')
if AnnotationData is None:
AnnotationData=data
else:
AnnotationData= | pd.concat([AnnotationData, data]) | pandas.concat |
from os import path
import os.path
from datetime import datetime as dt
import datetime
# import plotly.express as px
# from dash.dependencies import Input, Output, State
# import dash_html_components as html
# import dash_core_components as dcc
import json
import pandas as pd
import numpy as np
# from jupyter_dash import JupyterDash
# import plotly.graph_objs as go
# import dash
# import traceback
import sys
import os
import copy
import os
import glob
# import grasia_dash_components as gdc
import json
# import jsonify
# import ciso8601
old_env = os.environ['PATH']
df_return = []
# couleur(5,98,138)
# 72,145,118 #489176
# # 206,136,87 #ce8857
# 154,80,82 #9a5052
# 160,175,82 #a0af52
# 88,158,157 #589e9d
# 103,120,132 #677884
# 206,182,75 #ceb64b
# 40,72,101 #284865
# 166,135,103 #a68767
from flask import Flask, jsonify,render_template
# from flask_cors import CORS
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# CORS(app, support_credentials=True)
today = dt.today().strftime("%m-%d-%Y")
# csv_today_path = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/' + today + ".csv"
csv_today_path = today + ".csv"
if path.exists(csv_today_path):
df_vaccin_quotidien = pd.read_csv(csv_today_path)
@app.route('/somme/<reg>/<vaccin>')
def filter_data_somme_quotidien(reg,vaccin):
# print(type(reg))
reg=np.int64(reg)
vaccin=np.int64(vaccin)
return df_vaccin_quotidien.query('reg==@reg & vaccin==@vaccin').to_json()
@app.route('/detail/<reg>/<vaccin>')
def filter_data_detail(reg,vaccin):
# print(type(reg))
reg=np.int64(reg)
vaccin=np.int64(vaccin)
# response=df_vaccin_detail.query('reg==@reg & vaccin==@vaccin').to_json()
# response.headers.add("Access-Control-Allow-Origin", "*")
return df_vaccin_detail.query('reg==@reg & vaccin==@vaccin').reset_index().to_json()
@app.route("/")
def helloWorld():
return "Hello, cross-origin-world!"
@app.route('/color')
def choose_color(i):
color_list = ["#489176", "#ce8857", "#9a5052", "#a0af52", " #589e9d", "#677884", "#ceb64b", "#284865", "#a68767"]
if i>=len(color_list):
return color_list[len(color_list)-i]
else:
return color_list[i]
# liste_des_vaccins
df_liste_vaccin = ["Tous","COMIRNATY Pfizer/BioNTech", "Moderna", "AstraZeneka"]
print(df_liste_vaccin)
# CHargement liste des régions
src = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/stat_pop.csv'
# df_population=pd.read_csv(src,sep=";")///////////////a remettre
# bilan des données actuelle
# Le fichier est mis à jour quotidiennement et il comprend la somme par région et par vaccin. Cependant pour certaines
# régions et vaccins,la valeur peut manquer à un jour donné. On créee donc un fichier à jour cumulant les données
# de la journée selon data-france et celles données par data-fr&ance il y a pluysieurs jours pour les valeurs manquantes
date_min=datetime.datetime.strptime("27/12/2020","%d/%m/%Y").timestamp()
date_max=datetime.datetime.timestamp(datetime.datetime.today()-datetime.timedelta(days=1))
print(date_min)
labels = ["A1", "A2", "A3", "A4", "A5", "B1", "B2"]
parents = ["", "", "", "", "", "", ""]
values = ["11", "12", "13", "14", "15", "20", "30"]
# fig = go.Figure(go.Treemap(
# labels = labels,
# values = values,
# parents = parents,
# marker_colors = [ choose_color(i) for i in range(len(labels))]
# ))
@app.route('/req/proutos')
def maj_data_complete():
today = dt.today().strftime("%m-%d-%Y")
csv_today_path = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/' + today + ".csv"
if path.exists(csv_today_path + "r"):
df_vaccin_quotidien = pd.read_csv(csv_today_path)
else:
src = 'https://www.data.gouv.fr/fr/datasets/r/900da9b0-8987-4ba7-b117-7aea0e53f530'
df_vaccin_quotidien = pd.read_csv(src, sep=";")
dernier_jour = df_vaccin_quotidien.tail(1)
dernier_jour = str(dernier_jour['jour'].values[0])
df_vaccin_quotidien = df_vaccin_quotidien.query('jour==@dernier_jour')
df_vaccin_quotidien.query('reg!=6', inplace=True)
df_vaccin_quotidien.query('reg!=8', inplace=True)
# df_vaccin_quotidien.to_csv('C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/01-03-2021.csv')
df_vaccin_quotidien.query('reg!=1', inplace=True)
reg_sans_data = [
x for x in pd.unique(
df_population["id"]) if x not in pd.unique(
df_vaccin_quotidien["reg"])]
# recherche de données dans des anciennes stats
day_diff = 0
# for reg in reg_sans_data:
data_full = False
# boucle sur les archives de données
while not data_full:
day_diff += 1
today = dt.today().strftime("%m-%d-%Y")
day_delta = dt.today() + datetime.timedelta(days=-day_diff)
day_delta_str = day_delta.strftime("%m-%d-%Y")
files = glob.glob(
"C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin/*-*-*.csv")
df_reg_manquant = pd.read_csv(files[-1])
df_reg_manquant = df_reg_manquant[df_reg_manquant.reg.isin(
reg_sans_data)]
if pd.unique(df_reg_manquant.reg) == len(reg_sans_data):
data_full = True
if not data_full:
print("data pas complète")
exit()
df_reg_manquant.sort_values(['jour'], inplace=True)
# on reprend la dernière ligne du fichier de données manquante
dernier_jour = df_reg_manquant.tail(1)
dernier_jour = str(dernier_jour['jour'].values[0])
df_reg_manquant.query('jour==@dernier_jour', inplace=True)
df_vaccin_quotidien = pd.concat([df_vaccin_quotidien, df_reg_manquant])
# sauvegarde des données
df_vaccin_quotidien.iloc[:-2, : 7].to_csv(csv_today_path)
return df_vaccin_quotidien.iloc[:-2, : 7].to_json()
# return "e"
# df_vaccin_quotidien=maj_data_complete()
# somme des datas par jour même incomplète pour pouvoir viusaliser les changements depuis lavaeille
#dans df_vaccin_detail on peut avoir des journées manquantes
@app.route('/b')
def make_vaccin_detail():
today = dt.today().strftime("%m-%d-%Y")
csv_today_path = 'C:/Users/Utilisateur/PycharmProjects/montee_en_competence/csv_vaccin_detail/' + today + ".csv"
if path.exists(csv_today_path):
return pd.read_csv(csv_today_path)
else:
src = 'https://www.data.gouv.fr/fr/datasets/r/900da9b0-8987-4ba7-b117-7aea0e53f530'
df_vaccin_detail = pd.read_csv(src, sep=";")
df_vaccin_detail['datetime']= | pd.to_datetime(df_vaccin_detail['jour'], format='%Y-%m-%d') | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.