prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os, glob, gc, time, yaml, shutil, random
import addict
import argparse
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.preprocessing import StandardScaler, LabelEncoder, QuantileTransformer, KBinsDiscretizer
from datasets import (Features, transform_joint, normalize_npnan,
NormalizeFeats, get_feat_cols, get_folds, split2folds_user_viral,
save_preprocessed
)
from catboost import CatBoostClassifier, Pool
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_colwidth", 100)
pd.set_option("display.max_rows", 20)
np.set_printoptions(precision=4)
osj = os.path.join; osl = os.listdir
def read_yaml(config_path='./config.yaml'):
with open(config_path) as f:
config = yaml.safe_load(f)
return config
def parse_args():
parser = argparse.ArgumentParser()
#parser.add_argument('--kernel_type', type=str, required=True)
parser.add_argument('--debug', type=str, default="False")
parser.add_argument('--seed', type=int, default=24)
args, _ = parser.parse_known_args()
return args
def gettime(t0):
"""return a string of time passed since t0 in min.
Ensure no spaces inside (for using as the name in files and dirs"""
hours = int((time.time() - t0) / 60 // 60)
mins = int((time.time() - t0) / 60 % 60)
return f"{hours:d}h{mins:d}min"
def logprint(log_str, add_new_line=True):
# os.makedirs(out_dir, exist_ok=True)
if add_new_line:
log_str += '\n'
print(log_str)
with open(os.path.join(out_dir, f'log.txt'), 'a') as appender:
appender.write(log_str + '\n')
def copy_code(out_dir: str, src_dir='./'):
code_dir = os.path.join(out_dir, 'code')
os.makedirs(code_dir, exist_ok=False)
py_fns = glob.glob(os.path.join(src_dir, '*.py'))
py_fns += glob.glob(os.path.join(src_dir, '*.yaml'))
for fn in py_fns:
shutil.copy(fn, code_dir)
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True # for faster training, but not deterministic
def compare_data(test,
compare_path='/home/isakev/challenges/viral_tweets/data/processed/test_data_lgb_NoImpute_NoOhe_jun23.csv'):
print(f"preprocessed path:\n{compare_path}")
# print("cfg.test_preprocessed_path\n:", cfg.test_preprocessed_path)
# assert os.path.basename(compare_path) == os.path.basename(cfg.test_preprocessed_path)
test_compare = pd.read_csv(compare_path)
cols_here_not_in_preproc = set(test.columns).difference(set(test_compare.columns))
cols_preproc_not_in_here = set(test_compare.columns).difference(set(test.columns))
print(f"cols_preproc_not_in_here:\n{cols_preproc_not_in_here}")
print(f"cols_here_not_in_preproc:\n{cols_here_not_in_preproc}")
print(f"test.isnull().sum().sort_values().tail():\n{test.isnull().sum().sort_values().tail()}")
print(f"\ntest_compare.isnull().sum().sort_values().tail():\n{test_compare.isnull().sum().sort_values().tail()}")
# print()
minus_ones_compare, minus_ones_here = [], []
for col in test_compare.columns:
minus_ones_compare.append((test_compare[col] == -1).sum())
minus_ones_here.append((test_compare[col] == -1).sum())
print(f"minus_ones_compare:{sum(minus_ones_compare)}")
print(f"minus_ones_here:{sum(minus_ones_here)}")
assert len(cols_preproc_not_in_here) == 0
assert len(cols_preproc_not_in_here) == 0
if len(test) > 5000:
assert len(test) == len(test_compare), f"len test = {len(test)} , len test_compare = {len(test_compare)}"
assert sum(minus_ones_compare) == sum(minus_ones_here)
min_len = min(len(test), len(test_compare))
test = test.iloc[:min_len].reset_index(drop=True)
test_compare = test_compare[:min_len]
unequals = test.compare(test_compare)
print(f"test.compare(test_compare).shape[1] = {unequals.shape[1]}")
print(f"test.compare(test_compare).columns: {unequals.columns}")
diffs_ls = []
for col0 in unequals.columns.get_level_values(0):
diffs = unequals[(col0, 'self')] - unequals[(col0, 'other')]
diffs_ls.append(np.sum(diffs) / len(diffs))
argsorted_cols = unequals.columns.get_level_values(0)[np.argsort(diffs_ls)]
print(f"np.sum(diffs_ls = {np.sum(diffs_ls)}")
cols_diff_ = [(col, diff_) for (col, diff_) in zip(argsorted_cols[-10:], np.sort(diffs_ls)[-10:])]
print(f"some diffs_ls[-10:]:\n{cols_diff_}")
# assert test.compare(test_compare).shape[1] == 0, "test.compare(test_compare).shape[1] == 0"
def create_out_dir(experiment_name, model_arch_name, n_folds, folds_to_train, debug):
# datetime_str = time.strftime("%d_%m_time_%H_%M", time.localtime())
# folds_str = '_'.join([str(fold) for fold in folds_to_train])
# out_dir = '../../submissions/{}_m_{}_ep{}_bs{}_nf{}_t_{}'.format(
# experiment_name, model_arch_name, cfg.n_epochs, cfg.batch_size, n_folds, datetime_str) # bs, weight_decay, , folds_str,
# if debug:
# out_dir = osj(os.path.dirname(out_dir), 'debug_' + os.path.basename(out_dir))
out_dir = cfg.out_dir
models_outdir = osj(out_dir, 'models')
os.makedirs(out_dir)
os.makedirs(models_outdir)
return out_dir, models_outdir
def save_model(path, model, epoch, best_score, save_weights_only=False):
if save_weights_only:
state_dict = {
'model': model.state_dict(),
'epoch': epoch,
'best_score': best_score,
}
else:
scheduler_state = scheduler.state_dict() if scheduler else None
state_dict = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler_state,
'epoch': epoch,
'best_score': best_score,
}
torch.save(state_dict, path)
def rename_outdir_w_metric(out_dir, ave_metric, ave_epoch):
# renames out_dir - prefix_ = f"cv{ave_metric_str}_"
# after cross-validation:
# rename out_dir adding ave_epoch_cv(metric) to the name
if ave_metric < 1:
ave_metric_str = f"{ave_metric:.6f}"[2:]
elif ave_metric < 1000:
ave_metric_str = f"{ave_metric:.5f}".replace('.', '_')
else:
ave_metric_str = f"{int(ave_metric)}"
if ave_epoch: ave_epoch = int(ave_epoch)
prefix_ = f"cv{ave_metric_str}_"
suffix_ = f"_e{ave_epoch}_cv{ave_metric_str}"
new_base_name = prefix_ + os.path.basename(out_dir) + suffix_
out_dir_new_name = osj(os.path.dirname(out_dir), new_base_name)
# os.rename(out_dir, out_dir_new_name)
assert not os.path.exists(out_dir_new_name), f"\nCan't rename: the path exists ({out_dir_new_name}"
print(f"new out_dir directory name:\n{os.path.basename(out_dir_new_name)}")
shutil.move(out_dir, out_dir_new_name)
return out_dir_new_name
def get_cols2normalize(feat_cols):
cols2normalize = [col for col in feat_cols if (not col.startswith('img_feature_'))
and (not col.startswith('feature_'))
and (not col.startswith('user_des_feature_'))
and (not col.startswith('user_img_feature_'))
]
return cols2normalize
def drop_duplicates_func(train, feat_cols):
n = train.shape[0]
train.drop_duplicates(subset=feat_cols, inplace=True)
train.reset_index(drop=True, inplace=True)
# [~train.duplicated(subset=feat_cols)].reset_index(drop=True)
print(f"Dropped {n - train.shape[0]} duplicated rows from train. train.shape = {train.shape}")
return train
def run(fold, train, test, feats, categorical_columns): # cat_idxs):
def feats2list(df, feats, categorical_columns):
# separate categorical and non-categorical features into lists and concatenate
# pass new categorical indices
noncat_cols = [col for col in feats if col not in categorical_columns]
X_cat_list = df[categorical_columns].values.tolist()
X_noncat_list = df[noncat_cols].values.tolist()
X_list = [cat+noncat for (cat,noncat) in zip(X_cat_list,X_noncat_list)]
cat_new_idxs = np.array(range(len(X_cat_list[0])))
return X_list, cat_new_idxs
t_start_fold = time.time()
train_fold = train[train['fold'] != fold].copy()
val_fold = train[train['fold'] == fold]
# test_fold = test.copy()
if isinstance(cfg.adversarial_drop_thresh, float) and cfg.adversarial_valid_path:
# drop adversarial_valid samples from train
# all fold 0(of 5) train set = 23662
thresh = cfg.adversarial_drop_thresh
# 0.23= = =cv68.1013
# 0.24=23489=0.68425=cv68.1554
# 0.25=23177=0.6830=cv67.9593
# 0.27=21342=0.68636=cv68.0463
adv_preds = pd.read_csv(cfg.adversarial_valid_path)
drop_ids = adv_preds.loc[(adv_preds['is_test'] == 0) & (adv_preds['preds'] < thresh), 'tweet_id'].values
print(f"Before adversarial cutoff, train_fold.shape = {train_fold.shape}")
train_fold = train_fold[~train_fold['tweet_id'].isin(drop_ids)]
# X_train = train_fold[feats].values
# X_valid = val_fold[feats].values
y_train = train_fold['virality'].values
y_valid = val_fold['virality'].values
# X_test = test[feats].copy().values
X_train_list, cat_new_idxs = feats2list(train_fold, feats, categorical_columns)
X_valid_list, _ = feats2list(val_fold, feats, categorical_columns)
X_test_list, _ = feats2list(test, feats, categorical_columns)
del train_fold, val_fold, test; _ = gc.collect()
# baseline_value = y_train_first.mean()
# train_baseline = np.array([baseline_value] * y_train_second.shape[0])
# test_baseline = np.array([baseline_value] * y_test.shape[0])
train_pool = Pool(X_train_list, y_train, cat_features=cat_new_idxs)
valid_pool = Pool(X_valid_list, y_valid, cat_features=cat_new_idxs)
test_pool = Pool(X_test_list, cat_features=cat_new_idxs)
logprint(f"Train set n = {len(y_train)}, Valid set n = {len(y_valid)}, Num feats = {len(X_train_list[0])}")
logprint(f"{time.ctime()} ===== Fold {fold} starting ======")
del X_train_list, X_valid_list, X_test_list; _ = gc.collect()
model_file = os.path.join(osj(models_outdir, f'model_best_fold{fold}.cbm'))
stats = {}
model = CatBoostClassifier(**cfg.catboost_params, cat_features=cat_new_idxs,
train_dir=out_dir)
params_from_model = model._init_params
model.fit(train_pool, eval_set=valid_pool, use_best_model=True)
params_from_model = model.get_all_params()
params_from_model = {k: v for (k, v) in params_from_model.items() if k not in ['cat_features']}
# print(f"params from model:\n{params_from_model}")
if fold == cfg.folds_to_train[0]:
print(f"model params: {model}")
print(f'Best Iteration: {model.best_iteration_}')
# print(f"evals_result:\n{evals_result}")
stats['best_iter'] = model.best_iteration_
evals_res_df = pd.DataFrame({'train_accuracy': model.evals_result_['learn']['Accuracy'][::100],
'valid_accuracy': model.evals_result_['validation']['Accuracy'][::100]})
evals_res_df.to_csv(osj(out_dir, f"evals_result_fold{fold}.csv"), index=False)
# train score
preds_train_fold = model.predict(train_pool)
# train_rmse = np.sqrt(mean_squared_error(y_train, preds_train_fold))
#acc_train = accuracy_score(y_train, np.argmax(preds_train_fold, axis=1))
acc_train = accuracy_score(y_train, np.argmax(preds_train_fold, axis=1))
# validation score
preds_val_fold = model.predict_proba(valid_pool)
acc_valid = accuracy_score(y_valid, np.argmax(preds_val_fold, axis=1))
# y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)
# save model
model.save_model(model_file, format="cbm") # "json"
# predict test
preds_test_fold = model.predict_proba(test_pool)
stats['acc_train'] = acc_train
stats['acc_valid'] = acc_valid
stats['fold'] = fold
content = f'lr:{model.learning_rate_}, accuracy train: {acc_train:.5f}, accucary valid: {acc_valid:.5f}'
print(content)
print(f"From model.best_score_: {model.best_score_}")
print(f"ACCURACY: {acc_valid: .5f} \tFold train duration = {gettime(t_start_fold)}\n\n {'-' * 30}")
feature_imp_fold_df = pd.DataFrame({'feature': model.feature_names_,
f'fold_{fold}': model.feature_importances_})
return preds_val_fold, preds_test_fold, stats, feature_imp_fold_df
def main(out_dir, cfg):
t_get_data = time.time()
if cfg.load_train_test:
print(f"Loading preprocessed train and test...Path train:\n{cfg.train_preprocessed_path}")
train = pd.read_csv(cfg.train_preprocessed_path, nrows=n_samples)
test = pd.read_csv(cfg.test_preprocessed_path, nrows=n_samples)
if not cfg.add_user_virality:
del train['user_virality'], test['user_virality']
_ = gc.collect()
(feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some) = get_feat_cols(train)
# train = drop_duplicates_func(train, feat_cols)
else: # preprocess raw data
# assert not os.path.exists(cfg.train_preprocessed_path), f"file exists: {cfg.train_preprocessed_path}"
# assert not os.path.exists(cfg.test_preprocessed_path), f"file exists: {cfg.test_preprocessed_path}"
features = Features()
t_get_data = time.time()
traintest = features.get_data_stage1(cfg, base_dir, n_samples=n_samples)
train, test = features.get_data_stage2(cfg, traintest)
(feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some) = get_feat_cols(train)
train = drop_duplicates_func(train, feat_cols)
if cfg.save_then_load:
# saving and loading preprocessed data in order to reproduce the score
# if cfg.save_train_test: # and (not cfg.debug): # "../../data/preprocessed/"
save_preprocessed(cfg, train, test, path_train=cfg.train_preprocessed_path,
path_test=cfg.test_preprocessed_path)
train = pd.read_csv(cfg.train_preprocessed_path, nrows=n_samples)
test = pd.read_csv(cfg.test_preprocessed_path, nrows=n_samples)
# os.remove(cfg.train_preprocessed_path)
# os.remove(cfg.train_preprocessed_path)
# if cfg.save_train_test: # and (not cfg.debug): # "../../data/preprocessed/"
# save_preprocessed(cfg, train, test, path_train=cfg.train_preprocessed_path,
# path_test=cfg.test_preprocessed_path)
# print(f"Saved train and test after initial preprocess at:\n{cfg.train_preprocessed_path}")
train = get_folds(cfg, train)
(feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some) = get_feat_cols(train)
if cfg.drop_tweet_user_id:
train.drop('tweet_user_id', 1, inplace=True)
test.drop('tweet_user_id', 1, inplace=True)
# compare_data(test)
# drop low feat_imps features
if cfg.n_drop_feat_imps_cols and cfg.n_drop_feat_imps_cols > 0:
feat_imps = pd.read_csv(cfg.feat_imps_path).sort_values(by='importance_mean', ascending=False).reset_index(drop=False)
feat_imps_drops = feat_imps['feature'].iloc[-cfg.n_drop_feat_imps_cols:].values
cols_drop_fi = [col for col in feat_cols if col in feat_imps_drops if col not in ['tweet_user_id']]
train.drop(cols_drop_fi, axis=1, inplace=True)
test.drop(cols_drop_fi, axis=1, inplace=True)
print(f"Dropped {len(cols_drop_fi)} features on feature importance and add'l criteria")
(feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some) = get_feat_cols(train)
# print(f"Some features list: {train[feats_some].columns}\n")
cols2quantile_tfm = [col for col in train.columns if col in media_img_feat_cols+text_feat_cols
+user_img_feat_cols+user_des_feat_cols]
if cfg.quantile_transform:
train, test = transform_joint(train, test, cols2quantile_tfm,
tfm=QuantileTransformer(n_quantiles=cfg.n_quantiles,
random_state=cfg.seed_other,
))
if cfg.impute_nulls:
train = train.fillna(cfg.impute_value)
test = test.fillna(cfg.impute_value)
print(f"Imputed Nulls in train.py with {cfg.impute_value}")
# standardize feats
(feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some) = get_feat_cols(train)
categorical_columns_initial = [col for col in feat_cols if col.startswith('topic_id')
# or col.startswith('tweets_in_')
or col.startswith('tweet_language_id')
or col.startswith('tweet_attachment_class')
or col.startswith('ohe_')
or col in ['user_has_location', 'tweet_has_attachment',
'tweet_has_media', 'tweet_id_hthan1_binary','user_verified', 'user_has_url']
]
print("Normalizing feats ....")
if cfg.normalize_jointtraintest:
cols2normalize = [col for col in feat_cols if
col not in categorical_columns_initial] # get_cols2normalize(feat_cols)
if cfg.quantile_transform:
cols2normalize = [col for col in cols2normalize if col not in cols2quantile_tfm]
is_nulls = (train[cols2normalize].isnull().sum().sum()>0).astype(bool)
if is_nulls:
train, test = normalize_npnan(train, test, cols2normalize)
else:
train, test = transform_joint(train, test, cols2normalize, tfm=StandardScaler())
del cols2normalize
if cfg.kbins_discretizer:
cols2discretize_tfm = [col for col in train.columns if col in media_img_feat_cols + text_feat_cols
+ user_img_feat_cols + user_des_feat_cols]
train.loc[:,cols2discretize_tfm] = train.loc[:,cols2discretize_tfm].fillna(cfg.impute_value)
test.loc[:, cols2discretize_tfm] = test.loc[:, cols2discretize_tfm].fillna(cfg.impute_value)
train, test = transform_joint(train, test, cols2discretize_tfm,
tfm=KBinsDiscretizer(n_bins=cfg.kbins_n_bins,
strategy=cfg.kbins_strategy, # {'uniform', 'quantile', 'kmeans'}
encode='ordinal'))
print(f"KBinsDiscretize {len(cols2discretize_tfm)} cols, e.g. nunique 1 col of train: {train[cols2discretize_tfm[0]].nunique()}")
del cols2discretize_tfm; _ = gc.collect()
# cat columns for TABNET
categorical_columns = []
categorical_dims = {}
len_train = len(train)
train = pd.concat([train, test])
for col in categorical_columns_initial:
# print(col, train[col].nunique())
l_enc = LabelEncoder()
if cfg.model_arch_name=='tabnet':
train[col] = train[col].fillna("VV_likely") # after normalize unlikely
else:
pass
# train[col] = train[col].fillna(cfg.impute_value)
train[col] = l_enc.fit_transform(train[col].values)
categorical_columns.append(col)
categorical_dims[col] = len(l_enc.classes_)
test = train.iloc[len_train:]
train = train.iloc[:len_train]
cat_idxs = [i for i, f in enumerate(feat_cols) if f in categorical_columns]
cat_dims = [categorical_dims[f] for i, f in enumerate(feat_cols) if f in categorical_columns]
if cfg.extracted_feats_path and (cfg.extracted_feats_path.lower()!='none'):
extracted_feats = | pd.read_csv(cfg.extracted_feats_path) | pandas.read_csv |
#author zhanghan
'''
This is the trading calendar of Stock in China, in this version
We only consider the day level data
'''
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from functools import partial
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.Timedelta(days=365)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
#this is the rule of saturday and sunday
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
#first day of the year
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
# 5.1
may_1st = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(may_1st)
#10.1,2,3
oct_1st=rrule.rrule(
rrule.MONTHLY,
bymonth=10,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(oct_1st)
oct_2nd=rrule.rrule(
rrule.MONTHLY,
bymonth=10,
bymonthday=2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(oct_2nd)
oct_3rd=rrule.rrule(
rrule.MONTHLY,
bymonth=10,
bymonthday=3,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(oct_3rd)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
non_trading_days.append(datetime(1991, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1991, 2, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(1991, 2, 18, tzinfo=pytz.utc))
non_trading_days.append(datetime(1991, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1991, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1991, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1992, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1992, 2, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(1992, 2, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(1992, 2, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(1992, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1992, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1992, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1993, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1993, 1, 25, tzinfo=pytz.utc))
non_trading_days.append(datetime(1993, 1, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(1993, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 2, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 2, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 2, 9, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 2, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(1994, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 1, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 1, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 2, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 2, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1995, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 20, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 21, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 28, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 2, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 3, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 9, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1996, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 13, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 2, 14, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 6, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 7, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1997, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 1, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 1, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 1, 28, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 1, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 1, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 2, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 2, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 2, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 2, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1998, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 16, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 17, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 18, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 24, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 25, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 2, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 12, 20, tzinfo=pytz.utc))
non_trading_days.append(datetime(1999, 12, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 1, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 9, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 5, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 5, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2000, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 24, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 25, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 1, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 2, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 2, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 5, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 5, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2001, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 13, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 14, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 18, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 20, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 21, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 2, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 5, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 5, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 9, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2002, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 1, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 1, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 2, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 2, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 2, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 2, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 5, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 5, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 5, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 5, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 5, 9, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2003, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 20, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 21, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 1, 28, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 5, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 5, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 5, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 5, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2004, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 2, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 2, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 2, 9, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 2, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 2, 14, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 2, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 5, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 5, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 5, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2005, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 1, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 1, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 1, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 1, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 2, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 2, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 5, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 5, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2006, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 2, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 2, 20, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 2, 21, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 2, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 2, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 5, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 5, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2007, 12, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 2, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 2, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 2, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 2, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 4, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 6, 9, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 9, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 9, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 9, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2008, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 1, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 1, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 1, 28, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 1, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 1, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 4, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 5, 28, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 5, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2009, 10, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 2, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 2, 16, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 2, 17, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 2, 18, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 2, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 4, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 5, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 6, 14, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 6, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 6, 16, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 9, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 9, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 9, 24, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2010, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 2, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 2, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 2, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 2, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 4, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 4, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 6, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 9, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2011, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 1, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 1, 24, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 1, 25, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 1, 26, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 1, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 4, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 4, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 4, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 4, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 6, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2012, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 1, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 2, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 2, 13, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 2, 14, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 2, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 4, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 4, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 4, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 4, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 6, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 6, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 6, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 9, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 9, 20, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2013, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 1, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 2, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 2, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 2, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 2, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 4, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 6, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 9, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2014, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 2, 18, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 2, 19, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 2, 20, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 2, 23, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 2, 24, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 4, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 6, 22, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 9, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 9, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 10, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2015, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 1, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 2, 8, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 2, 9, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 2, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 2, 11, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 2, 12, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 4, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 5, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 6, 9, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 6, 10, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 9, 15, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 9, 16, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 10, 6, tzinfo=pytz.utc))
non_trading_days.append(datetime(2016, 10, 7, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 1, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 1, 27, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 1, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 1, 31, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 2, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 2, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 4, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 4, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 5, 1, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 5, 29, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 5, 30, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 10, 2, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 10, 3, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 10, 4, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 10, 5, tzinfo=pytz.utc))
non_trading_days.append(datetime(2017, 10, 6, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return | pd.DatetimeIndex(early_closes) | pandas.DatetimeIndex |
import pandas as pd
import yaml
import os
from . import DATA_FOLDER, SCHEMA, SYNONYM_RULES
def run(
rule_file: str = SYNONYM_RULES,
schema_file: str = SCHEMA,
data_folder: str = DATA_FOLDER,
):
"""Add rules to capture more terms as synonyms during named entity
recognition (NER)
:param rule_file: YAML file that contains the rules.,
defaults to SYNONYM_RULES
:param schema_file: YAML file that provides schema., defaults to SCHEMA
:param data_folder: Data folder where the input termlists are located and
the ouput files are saved.,
defaults to DATA_FOLDER
"""
with open(rule_file, "r") as rules, open(schema_file, "r") as sf:
try:
rule_book = yaml.safe_load(rules)
schema = yaml.safe_load(sf)
prefix_cols = ["id", "text"]
rules_cols = schema["classes"]["Rule"]["slots"]
prefix_df = pd.DataFrame(columns=prefix_cols)
rules_df = pd.DataFrame(columns=rules_cols)
terms_cols = [
"cui",
"source",
"id",
"match_term",
"preferred_term",
"category",
]
for key, value in rule_book["prefixes"].items():
row = pd.DataFrame([[value, key]], columns=prefix_cols)
prefix_df = pd.concat([prefix_df, row])
for idx, dic in enumerate(rule_book["rules"]):
row = pd.DataFrame(columns=rules_cols)
for col in row.columns:
if col in dic.keys():
row.loc[idx, col] = dic[col]
if len(row) > 0:
rules_df = pd.concat([rules_df, row])
rules_df = rules_df.reset_index()
rules_df.fillna("", inplace=True)
rules_exp_branch_df = rules_df.explode("branches")
# DEBUG BLOCK *****************************************
# rules_exp_branch_df.to_csv(
# os.path.join(data_folder, "rules.tsv"),
# sep="\t",
# index=None,
# )
# *****************************************************
ontologies = list(
set([x[0] for x in prefix_df["id"].str.split(":")])
)
print(f"Ontologies that need synonymization: {ontologies}")
for ont in ontologies:
terms_filename = ont.lower() + "_termlist.tsv"
new_terms_filename = ont.lower() + "_syn_termlist.tsv"
new_terms_df = | pd.DataFrame(columns=terms_cols) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import pandas as pd
import pandas.testing as pdt
import qiime2
from q2_types.ordination import OrdinationFormat, ProcrustesStatisticsFmt
from qiime2.plugin.testing import TestPluginBase
class TestTransformers(TestPluginBase):
package = 'q2_types.ordination.tests'
def test_skbio_ordination_results_to_ordination_format(self):
filenames = ('pcoa-results-1x1.txt', 'pcoa-results-2x2.txt',
'pcoa-results-NxN.txt')
for filename in filenames:
filepath = self.get_data_path(filename)
transformer = self.get_transformer(skbio.OrdinationResults,
OrdinationFormat)
input = skbio.OrdinationResults.read(filepath)
obs = transformer(input)
self.assertIsInstance(obs, OrdinationFormat)
obs = skbio.OrdinationResults.read(str(obs))
self.assertEqual(str(obs), str(input))
def test_ordination_format_to_skbio_ordination_results(self):
filenames = ('pcoa-results-1x1.txt', 'pcoa-results-2x2.txt',
'pcoa-results-NxN.txt')
for filename in filenames:
input, obs = self.transform_format(OrdinationFormat,
skbio.OrdinationResults,
filename=filename)
exp = skbio.OrdinationResults.read(str(input))
self.assertEqual(str(exp), str(obs))
def test_1x1_ordination_format_to_metadata(self):
_, obs = self.transform_format(OrdinationFormat, qiime2.Metadata,
'pcoa-results-1x1.txt')
index = pd.Index(['s1'], name='Sample ID', dtype=object)
exp_df = pd.DataFrame([0.0], index=index, columns=['Axis 1'],
dtype=float)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_2x2_ordination_format_to_metadata(self):
_, obs = self.transform_format(OrdinationFormat, qiime2.Metadata,
'pcoa-results-2x2.txt')
index = pd.Index(['s1', 's2'], name='Sample ID', dtype=object)
exp_df = pd.DataFrame([[-20.999999999999996, -0.0],
[20.999999999999996, -0.0]], index=index,
columns=['Axis 1', 'Axis 2'], dtype=float)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_NxN_ordination_format_to_metadata(self):
# Not creating a reference dataframe here because manually populating
# that DataFrame is a pain. Specifically we just want to check the
# functionality of the dynamic column naming (e.g. Axis N).
_, obs = self.transform_format(OrdinationFormat, qiime2.Metadata,
'pcoa-results-NxN.txt')
columns = ['Axis %d' % i for i in range(1, 9)]
self.assertEqual(columns, list(obs.columns))
def test_df_to_procrustes_m2_stats_fmt(self):
input_df = pd.DataFrame({'true M^2 value': [1],
'p-value for true M^2 value': [0.2],
'number of Monte Carlo permutations': [300]},
index=pd.Index(['results'], name='id'))
exp = ['id\ttrue M^2 value\tp-value for true M^2 value\t'
'number of Monte Carlo permutations\n',
'#q2:types\tnumeric\tnumeric\tnumeric\n',
'results\t1\t0.2\t300\n']
transformer = self.get_transformer(pd.DataFrame,
ProcrustesStatisticsFmt)
fmt = transformer(input_df)
with open(str(fmt), 'r') as fh:
obs = fh.readlines()
self.assertEqual(exp, obs)
def test_procrustes_m2_stats_fmt_to_df(self):
filepath = self.get_data_path('m2stats-999-permus.tsv')
input_fmt = ProcrustesStatisticsFmt(filepath, mode='r')
exp = pd.DataFrame({'true M^2 value': [0.0789623748362618],
'p-value for true M^2 value': [0.001],
'number of Monte Carlo permutations': [999]},
index=pd.Index(['results'], name='id'))
transformer = self.get_transformer(ProcrustesStatisticsFmt,
pd.DataFrame)
obs = transformer(input_fmt)
pdt.assert_frame_equal(exp, obs)
def test_procrustes_m2_stats_fmt_to_md(self):
filepath = self.get_data_path('m2stats-999-permus.tsv')
input_fmt = ProcrustesStatisticsFmt(filepath, mode='r')
df = pd.DataFrame({'true M^2 value': [0.0789623748362618],
'p-value for true M^2 value': [0.001],
'number of Monte Carlo permutations': [999]},
index= | pd.Index(['results'], name='id') | pandas.Index |
import datetime as dt
import json
import os
import time
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
import datetime
# Statements with "." allows for relative path importing for WebApp and WebAPI
# from .ImportSecurities import *
# from .utils.aws_util import *
# from .utils.data_util import *
# from .utils.indicators import *
# Statements without "." should be used when running the app/main function independent of WebApp and WebAPI
from ImportSecurities import *
from utils.aws_util import *
from utils.data_util import *
from utils.indicators import *
import data_util_test
def gather_download_data(sd, ed, download_new_data=False):
symbols_config_fp = os.path.join(os.getcwd(), 'config', 'symbols_config.json')
with open(symbols_config_fp) as fp:
symbols_config = json.load(fp)
symbols_array = []
for category, array in symbols_config.items():
symbols_array.append(array)
flat_symbols = [item for sublist in symbols_array for item in sublist]
if download_new_data:
spaces_array = []
for array in symbols_array:
spaces = " ".join(array)
spaces_array.append(spaces)
gather_data(symbols_array, spaces_array, sd=sd, ed=ed)
def s3_upload_and_list():
# Set up variables
cwd = os.getcwd()
data_directory = os.path.join(cwd, 'data')
# Read Config
aws_config_fp = os.path.join(os.getcwd(), 'config', 'aws_config.json')
with open(aws_config_fp) as fp:
aws_config = json.load(fp)
# Set up Session & Resource
session = start_session(aws_config['access_key'], aws_config['secret_access_key'])
s3 = get_s3_resource(session)
bucket = aws_config['bucket_name']
# List current Buckets & Objects per Bucket
print_bucket_objects(s3, bucket)
# Upload files to Bucket
files = [f for f in os.listdir(data_directory) if f.endswith('.csv')]
for file in files:
upload_file_to_bucket(s3, bucket, os.path.join(data_directory, file), file)
# (Optional) Delete files from Bucket
# for file in files:
# delete_object(s3, bucket, file)
# List Buckets & Objects after Upload
print_bucket_objects(s3, bucket)
def get_technical_indicators_for_date(symbol,
given_date,
start_date=dt.datetime(2012, 1, 31),
end_date=dt.datetime.today()):
stock_data = get_ohlcv(symbol, start_date, end_date, base_dir='trading_assistant_app/data')
technical_indicators = get_technical_indicators_for_symbol(stock_data)
try:
return_dict = {
'Price/SMA5': technical_indicators['Price/SMA5'][given_date],
'Price/SMA10': technical_indicators['Price/SMA10'][given_date],
'Price/SMA20': technical_indicators['Price/SMA20'][given_date],
'Price/SMA50': technical_indicators['Price/SMA50'][given_date],
'Price/SMA200': technical_indicators['Price/SMA200'][given_date],
'BB%10': technical_indicators['BB%10'][given_date],
'BB%20': technical_indicators['BB%20'][given_date],
'BB%50': technical_indicators['BB%50'][given_date],
'RSI5': technical_indicators['RSI5'][given_date],
'RSI10': technical_indicators['RSI10'][given_date],
'MACD9': technical_indicators['MACD9'][given_date],
'MOM5': technical_indicators['MOM5'][given_date],
'VAMA10': technical_indicators['VAMA10'][given_date]
}
except KeyError as e:
print(f'Invalid given_date index/key for {e}')
return_dict = {
'Price/SMA5': 0,
'Price/SMA10': 0,
'Price/SMA20': 0,
'Price/SMA50': 0,
'Price/SMA200': 0,
'BB%10': 0,
'BB%20': 0,
'BB%50': 0,
'RSI5': 0,
'RSI10': 0,
'MACD9': 0,
'MOM5': 0,
'VAMA10': 0
}
return return_dict
def get_wsb_volume_for_date(symbol, given_date):
# gather reddit mention counts
# This allows for relative path retrieval for WebApp and WebAPI
reddit_fp = os.path.join('trading_assistant_app', 'reddit_refined', f'{symbol}_rss_wc.csv')
# This should be used when running the app/main function independent of WebApp and WebAPI
# reddit_fp = os.path.join(os.getcwd(), 'reddit_data', f'{symbol}_rss_wc.csv')
try:
df_reddit = pd.read_csv(reddit_fp)
except FileNotFoundError as e:
return {
'wsb_volume': 0
}
df_reddit = df_reddit.set_index('Date')
df_reddit.index = pd.to_datetime(df_reddit.index)
df_reddit = df_reddit.drop('Ticker', axis=1)
try:
value = df_reddit['wsb_volume'][given_date].item()
return_dict = {
'wsb_volume': value
}
except KeyError as e:
# print(f'Invalid given_date index/key for {e}')
return_dict = {
'wsb_volume': 0
}
return return_dict
def get_technical_indicators_for_symbol(stock_data):
price_sma_5_symbol = get_price_sma(stock_data, window=5)
price_sma_10_symbol = get_price_sma(stock_data, window=10)
price_sma_20_symbol = get_price_sma(stock_data, window=20)
price_sma_50_symbol = get_price_sma(stock_data, window=50)
price_sma_200_symbol = get_price_sma(stock_data, window=200)
bb10_pct_symbol = get_bb_pct(stock_data, window=10)
bb20_pct_symbol = get_bb_pct(stock_data, window=20)
bb50_pct_symbol = get_bb_pct(stock_data, window=50)
rsi5_symbol = get_rsi(stock_data, window=5)
rsi10_symbol = get_rsi(stock_data, window=10)
macd_symbol = get_macd_signal(stock_data, signal_days=9)
mom_symbol = get_momentum(stock_data, window=5)
vama_symbol = get_vama(stock_data, window=10)
# Compile TA into joined DF & FFILL / BFILL
df_indicators = pd.concat([price_sma_5_symbol, price_sma_10_symbol, price_sma_20_symbol,
price_sma_50_symbol, price_sma_200_symbol, bb10_pct_symbol,
bb20_pct_symbol, bb50_pct_symbol, rsi5_symbol,
rsi10_symbol, macd_symbol, mom_symbol, vama_symbol], axis=1)
df_indicators.fillna(0, inplace=True)
return df_indicators
def write_predictions_to_csv(start_date, end_date, percent_gain, path, debug=False):
date_range = pd.date_range(start_date, end_date)
buy_data = dict()
sell_data = dict()
for date in date_range:
predictions_dictionary = get_list_of_predicted_stocks(percent_gain, date)
buy_signal_recognized_list = predictions_dictionary['buy_signal_recognized_list']
buy_signal_recognized_str = '_'.join(buy_signal_recognized_list)
sell_signal_recognized_list = predictions_dictionary['sell_signal_recognized_list']
sell_signal_recognized_str = '_'.join(sell_signal_recognized_list)
buy_data[date] = buy_signal_recognized_str
sell_data[date] = sell_signal_recognized_str
df_buy = pd.DataFrame(buy_data.items(), columns=['Date', 'Symbols'])
df_buy = df_buy.set_index('Date')
df_buy.to_csv(os.path.join(path, f'buy_predictions.csv'))
df_sell = pd.DataFrame(sell_data.items(), columns=['Date', 'Symbols'])
df_sell = df_sell.set_index('Date')
df_sell.to_csv(os.path.join(path, f'sell_predictions.csv'))
def read_predictions(given_date, minimum_count=0, buy=True, debug=False):
df = pd.read_csv(f'trading_assistant_app/predictions/{"buy_predictions" if buy else "sell_predictions"}.csv')
df = df.set_index('Date')
try:
symbols = df['Symbols'][given_date]
except KeyError as e:
print(f'Invalid given_date index/key for {e}')
symbols = ''
if isinstance(symbols, float):
if np.isnan(symbols):
return []
elif isinstance(symbols, str):
predictions_list = symbols.split('_')
if buy:
filtered = filter(lambda symbol:
get_wsb_volume_for_date(symbol, given_date)['wsb_volume'] > minimum_count, predictions_list)
filtered_list = list(filtered)
else:
filtered_list = predictions_list
return filtered_list
def prepare_data(symbols, start_date, end_date, percent_gain, debug=False):
# df_array = list()
# initialize dictionary to hold dataframe per symbol
df_dict = {}
# remove the index from the list of symbols
if "SPY" in symbols:
symbols.remove("SPY")
for symbol in symbols:
# get stock data for a given time
# This allows for relative path retrieval for WebApp and WebAPI
# ***
# stock_data = get_ohlcv(symbol, start_date, end_date, base_dir=os.path.join('trading_assistant_app', 'data'))
# This should be used when running the app/main function independent of WebApp and WebAPI
stock_data = data_util_test.get_ohlcv(symbol, start_date, end_date, base_dir=os.path.join('data'))
# Filter out empty OHLCV DF
if len(stock_data) == 0:
continue
# calculate technical indicators
df_indicators = get_technical_indicators_for_symbol(stock_data)
# gather reddit mention counts
# This allows for relative path retrieval for WebApp and WebAPI
# ***
#reddit_fp = os.path.join('trading_assistant_app', 'reddit_refined', f'{symbol}_rss_wc.csv')
reddit_fp = os.path.join('reddit_refined', f'{symbol}_rss_wc.csv')
# This should be used when running the app/main function independent of WebApp and WebAPI
# reddit_fp = os.path.join(os.getcwd(), 'reddit_data', f'{symbol}_rss.csv')
if os.path.isfile('reddit_refined/' + symbol + '_rss_wc.csv'):
df_reddit = pd.read_csv(reddit_fp)
df_reddit = df_reddit.set_index('Date')
df_reddit.index = pd.to_datetime(df_reddit.index)
df_reddit = df_reddit.drop('Ticker', axis=1)
else:
df_reddit = pd.DataFrame(columns=["Date","Ticker","wsb_volume"])
# merge and fill nan data
df_merged = pd.merge(df_indicators, df_reddit, how='left', left_index=True, right_index=True)
df_merged[['wsb_volume']] = df_merged[['wsb_volume']].fillna(value=0.0)
# initialize dataframe to hold indicators and signal
df = df_merged.copy(deep=True)
# extract closing prices
prices = stock_data["close"]
# initialize signal
signal = prices * 0
# target holding period to realize gain
holding_period = 5
# buy signal == 1 when price increases by percent_gain and sell == -1 when it decreases by percent_gain
for i in range(prices.shape[0] - holding_period):
ret = (prices.iloc[i + 5] / prices.iloc[i]) - 1
if ret > percent_gain:
signal.iloc[i] = 1
elif ret < (-1 * percent_gain):
signal.iloc[i] = -1
else:
signal.iloc[i] = 0
# ***
df_signal = pd.DataFrame(signal)
df = df.merge(df_signal, how='left', left_index=True, right_index=True)
df["signal"] = df["close"].fillna(0)
df_dict[symbol] = df
if debug:
print(stock_data.head(n=20), '\n')
print(df_indicators.head(n=20), '\n')
print(df_indicators.columns)
print(df_indicators.head(n=20), '\n')
return df_dict
def train_model(df, symbol, debug=False):
feature_cols = df.columns[:-1]
label_cols = df.columns[-1:]
train, test = np.split(df, [int(.6 * len(df))])
X_train, y_train = train[feature_cols], train[label_cols]
X_test, y_test = test[feature_cols], test[label_cols]
# print('X_train\n', X_train.head(20))
# print('y_train\n', y_train.head(20))
# print('X_test\n', X_test.head(20))
# print('y_test\n', y_test.head(20))
clf = RandomForestClassifier(n_estimators=10, random_state=42)
# Workaround to get data with NAN/INF working
if np.any(np.isnan(X_train)) == False and \
np.all(np.isfinite(X_train)) == True and \
np.any(np.isnan(y_train.values.ravel())) == False and \
np.all(np.isfinite(y_train.values.ravel())) == True:
clf.fit(X_train, y_train.values.ravel())
y_pred = clf.predict(X_test)
y_test_ravel = y_test.values.ravel()
df_y_pred = pd.DataFrame(y_pred, index=y_test.index, columns=[f'Y_{symbol}'])
if debug:
print(f'Feature Importances: '
f'{sorted(list(zip(X_train, clf.feature_importances_)), key=lambda tup: tup[1], reverse=True)}')
print(f'Mean Absolute Error: {metrics.mean_absolute_error(y_test_ravel, y_pred)}')
print(f'Mean Squared Error: {metrics.mean_squared_error(y_test_ravel, y_pred)}')
print(f'Root Mean Squared Error: {np.sqrt(metrics.mean_squared_error(y_test_ravel, y_pred))}')
else:
df_y_pred = pd.DataFrame(np.zeros(len(y_test)), index=y_test.index, columns=[f'Y_{symbol}'])
return df_y_pred
def get_list_of_predicted_stocks(percent_gain, given_date, debug=False):
buy_signal_recognized_list = list()
sell_signal_recognized_list = list()
empty_df_count = 0
cwd = os.getcwd()
# This allows for relative path retrieval for WebApp and WebAPI
# ***
#data_directory = os.path.join(cwd, 'trading_assistant_app', 'data')
# This should be used when running the app/main function independent of WebApp and WebAPI
data_directory = os.path.join(cwd, 'data')
files = [f for f in os.listdir(data_directory) if f.endswith('.csv')]
symbols = [symbol.split('.csv')[0] for symbol in files]
start_date = dt.datetime(2012, 1, 31)
end_date = dt.date.today()
df_dictionary = prepare_data(symbols=symbols, start_date=start_date, end_date=end_date, percent_gain=percent_gain)
for symbol, df in df_dictionary.items():
if len(df) == 0:
print(f'len(df) == 0!!! for {symbol}')
empty_df_count += 1
continue
# Train model
df_prediction = train_model(df, symbol, debug=debug)
try:
if df_prediction[f'Y_{symbol}'][given_date] == 1:
buy_signal_recognized_list.append(symbol)
elif df_prediction[f'Y_{symbol}'][given_date] == -1:
sell_signal_recognized_list.append(symbol)
except KeyError as e:
# print(f'Invalid given_date index/key for {e}')
pass
return {
'buy_signal_recognized_list': buy_signal_recognized_list,
'len_buy_signal_list': len(buy_signal_recognized_list),
'sell_signal_recognized_list': sell_signal_recognized_list,
'len_sell_signal_list': len(sell_signal_recognized_list),
'len_files': len(files),
'empty_df_count': empty_df_count,
'given_date': given_date
}
def prepare_data(symbols, start_date, end_date, percent_gain, debug=False):
# df_array = list()
# initialize dictionary to hold dataframe per symbol
df_dict = {}
# remove the index from the list of symbols
if "SPY" in symbols:
symbols.remove("SPY")
for symbol in symbols:
# get stock data for a given time
# This allows for relative path retrieval for WebApp and WebAPI
# ***
# stock_data = get_ohlcv(symbol, start_date, end_date, base_dir=os.path.join('trading_assistant_app', 'data'))
# This should be used when running the app/main function independent of WebApp and WebAPI
stock_data = data_util_test.get_ohlcv(symbol, start_date, end_date, base_dir=os.path.join('data'))
# Filter out empty OHLCV DF
if len(stock_data) == 0:
continue
# calculate technical indicators
df_indicators = get_technical_indicators_for_symbol(stock_data)
# gather reddit mention counts
# This allows for relative path retrieval for WebApp and WebAPI
# ***
#reddit_fp = os.path.join('trading_assistant_app', 'reddit_refined', f'{symbol}_rss_wc.csv')
reddit_fp = os.path.join('reddit_refined', f'{symbol}_rss_wc.csv')
# This should be used when running the app/main function independent of WebApp and WebAPI
# reddit_fp = os.path.join(os.getcwd(), 'reddit_data', f'{symbol}_rss.csv')
if os.path.isfile('reddit_refined/' + symbol + '_rss_wc.csv'):
df_reddit = pd.read_csv(reddit_fp)
df_reddit = df_reddit.set_index('Date')
df_reddit.index = pd.to_datetime(df_reddit.index)
df_reddit = df_reddit.drop('Ticker', axis=1)
else:
df_reddit = pd.DataFrame(columns=["Date","Ticker","wsb_volume"])
# merge and fill nan data
df_merged = pd.merge(df_indicators, df_reddit, how='left', left_index=True, right_index=True)
df_merged[['wsb_volume']] = df_merged[['wsb_volume']].fillna(value=0.0)
# initialize dataframe to hold indicators and signal
df = df_merged.copy(deep=True)
# extract closing prices
prices = stock_data["close"]
# initialize signal
signal = prices * 0
# target holding period to realize gain
holding_period = 5
# buy signal == 1 when price increases by percent_gain and sell == -1 when it decreases by percent_gain
for i in range(prices.shape[0] - holding_period):
ret = (prices.iloc[i + 5] / prices.iloc[i]) - 1
if ret > percent_gain:
signal.iloc[i] = 1
elif ret < (-1 * percent_gain):
signal.iloc[i] = -1
else:
signal.iloc[i] = 0
# ***
df_signal = | pd.DataFrame(signal) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
import operator
import sys
"""
python excel.py "file path" "target file path"
"""
def load_csv(csv_path):
data = pd.read_csv(csv_path,encoding="utf_8_sig")
return data
if __name__ == '__main__':
dir_path = sys.argv[1]
dst_path = sys.argv[2]
pd.set_option('mode.chained_assignment', None)
csv = load_csv(dir_path)
newcsv = []
last_row = pd.Series(csv.loc[0, :].shape)
j = 0
for i in range(csv.shape[0]):
row = csv.loc[i, :]
row_no_label = row[:3]
label = row[3]
if j == 0:
last_row = row
newcsv.append(last_row)
else:
if all(operator.eq(row_no_label.get_values(), last_row[0:3].get_values())):
newcsv[-1].iloc[3] = '精彩配合得分'
else:
last_row = row
newcsv.append(last_row)
pass
pass
j += 1
newcsv = | pd.DataFrame(newcsv) | pandas.DataFrame |
#-*-coding=utf-8-*-
__author__ = 'ni'
import pandas as pd
# import os
import numpy as np
import tushare as ts
import time
import globalSetting as gs
THIS_MODULE = 'ROESatistics'
dirfix= gs.g_data_dir + THIS_MODULE + gs.g_dir_separator
def get_years_report(start_year, end_year):
if(start_year > end_year):
tmp = start_year
start_year = end_year
end_year = tmp
for i in range(start_year, end_year + 1):
print(i + '\n')
df = ts.get_report_data(i, 4)
exclefile = str(i) + '-' + 'ROE' + '.xlsx'
df.to_excel(exclefile)
time.sleep(6)
# 1,2,3,4: 1是一季度 2是中报 3是三季度 4是年报
# for j in range(1, 4 + 1):
# print(j)
# df = ts.get_report_data(i, j)
# exclefile = str(i) + '-' + str(j) + '.xlsx'
# df.to_excel(exclefile)
# time.sleep(60)
def merge_report(start_year, end_year):
if(start_year > end_year):
tmp = start_year
start_year = end_year
end_year = tmp
frames = []
for i in range(start_year, end_year + 1):
exclefile = str(i) + '-' + 'ROE' + '.xlsx'
print(exclefile + '\n')
df = pd.read_excel(exclefile)
# data=open("test.txt",'w+')
# print(df.duplicated())
df.drop_duplicates(subset=['code'],keep='first',inplace=True)
df.set_index("code", drop=True, inplace=True)
#print(df)
frames.append(df)
mergedFrame = | pd.concat(frames, axis=1) | pandas.concat |
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import itertools
import pandas as pd
import numpy as np
from matplotlib import rcParams
import seaborn as sns
import config
def classification_report_df(report):
report_data = []
lines = report.split('\n')
for line in lines[2:-3]:
row = {}
row_data = list(filter(None, line.split(' ')))
print(row_data)
row['class'] = row_data[0]
row['precision'] = float(row_data[1])
row['recall'] = float(row_data[2])
row['f1_score'] = float(row_data[3])
row['support'] = float(row_data[4])
report_data.append(row)
# avg line
str_list = lines[-2].split(' ')
row_data = list(filter(None, str_list)) # fastest
row = {}
row['class'] = row_data[0]+row_data[1]+row_data[2]
row['precision'] = float(row_data[3])
row['recall'] = float(row_data[4])
row['f1_score'] = float(row_data[5])
row['support'] = float(row_data[6])
report_data.append(row)
# build final df
df_report = | pd.DataFrame.from_dict(report_data) | pandas.DataFrame.from_dict |
from wordcloud import WordCloud
from mastodon import Mastodon
from pytz import timezone
from os import path
import datetime as dt
import pandas as pd
import MeCab
import re
PATH = path.dirname(path.abspath(__file__))
if __name__ == "__main__":
mastodon = Mastodon(
client_id = PATH + "/clientcred.secret",
access_token = PATH + "/usercred.secret",
api_base_url = "https://gensokyo.town")
TODAY = dt.date.today()
YESTERDAY = TODAY - dt.timedelta(days=1)
def Extract_content(toots):
"""
tootのリストから使用する日付のものからcontentを集める。
CWが使用されている場合はspoiler_textを集める。
また、使用するtootの数も数える。
"""
#1日の終わりの時刻(JST)
end = timezone("Asia/Tokyo").localize(dt.datetime(TODAY.year, TODAY.month, TODAY.day, 0, 0, 0, 0))
#1日の始まりの時刻(JST)
start = timezone("Asia/Tokyo").localize(dt.datetime(YESTERDAY.year, YESTERDAY.month, YESTERDAY.day, 0, 0, 0, 0))
text = ""
num = 0
for toot in toots:
#時間内のtootのみcontentを追加する
time = toot["created_at"].astimezone(timezone("Asia/Tokyo"))
if start <= time and time < end:
#CWの呟きの場合隠されている方を追加せず表示されている方を追加する
num += 1
if toot["sensitive"] == True:
text = text + " " + toot["spoiler_text"]
else:
text = text + " " + toot["content"]
#HTMLタグ, URL, LSEP,RSEP, 絵文字, HTML特殊文字を取り除く
text = re.sub(r"<[^>]*?>", "", text)
text = re.sub(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+\$,%#]+)", "", text)
text = re.sub(r"[
]", "", text)
text = re.sub(r"&[a-zA-Z0-9]+;", "", text)
return(text, num)
def Get_toots():
"""
Mastodonから前日1日分のtootを取得し、呟き内容を保存する。
また、前日1日分の取得したtootの数も返す
"""
#1日の始まりの時刻(JST)
start = timezone("Asia/Tokyo").localize(dt.datetime(YESTERDAY.year, YESTERDAY.month, YESTERDAY.day, 0, 0, 0, 0))
#tootの取得
toots = mastodon.timeline(timeline="local", limit=40)
while True:
#UTCからJSTに変更
time = toots[-1]["created_at"].astimezone(timezone("Asia/Tokyo"))
#取得したget_toots全てのtootが0:00より前の場合終了
if time < start:
break
#追加でtootの取得
toots = toots + mastodon.timeline(timeline = "local", max_id = toots[-1]["id"] - 1, limit = 40)
#取得したtootのリストからcontent(CWはspoiler_text)を抜き出す
text, num = Extract_content(toots)
with open(PATH + "/toots_log/" + str(YESTERDAY) + ".txt", 'w') as f:
f.write(text)
return(num)
def Emoji_lanking():
"""
絵文字の使用回数のランキング
"""
with open(PATH + "/toots_log/" + str(YESTERDAY) + ".txt", "r") as f:
text = f.read()
#保存されたtootから絵文字だけ取り出してそれの出現回数のSeriesができる
emoji = pd.Series(re.findall(r":[a-zA-Z0-9_-]+:", text)).value_counts()
emoji = | pd.DataFrame(emoji) | pandas.DataFrame |
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.preprocessing import StandardScaler
import random
import copy
anchors = ['anchor1', 'anchor2', 'anchor3', 'anchor4']
channels = ['37','38','39']
polarities = ['V','H']
def iq_processing(data):
"""
Input: Data
Output: Processed Data
Processing: Power Scaling, IQ shifting
"""
cols_real = ['pdda_input_real_{}'.format(x+1) for x in range(5)]
cols_imag = ['pdda_input_imag_{}'.format(x+1) for x in range(5)]
iq_values = pd.DataFrame(data['pdda_input_real'].tolist(), columns=cols_real, index=data.index)
iq_values[cols_imag] = pd.DataFrame(data['pdda_input_imag'].tolist(), columns=cols_imag, index=data.index)
phase = pd.DataFrame(np.arctan2(iq_values['pdda_input_imag_1'],iq_values['pdda_input_real_1']), columns=['phase_1'])
cos = np.cos(phase).values.ravel()
sin = np.sin(phase).values.ravel()
out = data.copy()
iq_ref = np.abs(iq_values[f'pdda_input_real_1']*cos + iq_values[f'pdda_input_imag_1']*sin)
for i in range(1,6):
out[f'pdda_input_real_{i}'] = (iq_values[f'pdda_input_real_{i}']*cos + iq_values[f'pdda_input_imag_{i}']*sin)
out[f'pdda_input_imag_{i}'] = (-iq_values[f'pdda_input_real_{i}']*sin + iq_values[f'pdda_input_imag_{i}']*cos)
iq_ref += iq_values[f'pdda_input_real_{i}']**2 + iq_values[f'pdda_input_imag_{i}']**2
power_norm = StandardScaler().fit_transform((out['reference_power'] + out['relative_power']).values.reshape(-1,1))/10
out.insert(22, 'power', power_norm)
out.insert(21, 'iq_ref', iq_ref)
out.drop(columns=['pdda_input_imag_1', 'pdda_input_real', 'pdda_input_imag'], inplace=True)
return out.iloc[:,-10:]
def create_set(data, rooms, points, augmentation=False):
"""
Input: Data and points for set that we want
Output: x and y for set that we want
"""
x = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
y = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for room in rooms:
for anchor in anchors:
for channel in channels:
util_data = {polarity: points[['point']].merge(data[room][anchor][channel][polarity], on='point') for polarity in polarities}
h,v = util_data['H'], util_data['V']
m = h.where(h['relative_power']+h['reference_power'] > v['reference_power']+v['relative_power'], v)
x[room][anchor][channel] = iq_processing(m)
y[room][anchor][channel] = util_data['H'][['true_phi', 'true_theta']]
if augmentation:
x_reduced = [reduceAmplitude(x, rooms, scale_util=5) for _ in range(30)]
x_aug, y_aug = copy.deepcopy(x), copy.deepcopy(y)
for room in rooms:
for anchor in anchors:
for channel in channels:
x_reduced_concat = pd.concat([x_reduced[i][room][anchor][channel] for i in range(30)])
x_aug[room][anchor][channel] = pd.concat([x_aug[room][anchor][channel], x_reduced_concat])
y_reduced_concat = pd.concat([y[room][anchor][channel] for _ in range(30)])
y_aug[room][anchor][channel] = pd.concat([y_aug[room][anchor][channel], y_reduced_concat])
x,y = x_aug, y_aug
return x, y
def create_iq_images(data):
'''
Preprocess input for CNN model
'''
powers = [data[anchor][channel]['power'] for channel in channels for anchor in anchors]
chanls = []
for channel in channels:
iqs = [data[anchor][channel] for anchor in anchors]
chanls.append(pd.concat(iqs, axis=1).values.reshape((-1, 4, 10)))
iq_images = np.concatenate(chanls, axis=1).reshape((-1, 3, 4, 10)).transpose(0,3,2,1)
powers = pd.concat(powers, axis=1)
return iq_images, powers
def create_set_cnn(data, rooms, points, augmentation=False):
"""
Input: Data and points for set that we want
Output: x -> (IQ Image (10x4x3 : IQs + RSSI x anchors x channels)), y
"""
x = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
y = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
tmp = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for room in rooms:
for anchor in anchors:
for channel in channels:
util_data = {polarity: points[['point']].merge(data[room][anchor][channel][polarity], on='point') for polarity in polarities}
h,v = util_data['H'], util_data['V']
m = h.where(h['relative_power']+h['reference_power'] > v['reference_power']+v['relative_power'], v)
tmp[room][anchor][channel] = iq_processing(m)
y[room][anchor][channel] = util_data['H'][['true_phi', 'true_theta']]
if not augmentation:
x[room]['iq_image'], x[room]['powers'] = create_iq_images(tmp[room])
if augmentation:
x_reduced = [reduceAmplitude(tmp, rooms, scale_util=5) for _ in range(30)]
x_aug, y_aug = tmp, copy.deepcopy(y)
for room in rooms:
for anchor in anchors:
for channel in channels:
x_reduced_concat = pd.concat([x_reduced[i][room][anchor][channel] for i in range(30)])
x_aug[room][anchor][channel] = | pd.concat([x_aug[room][anchor][channel], x_reduced_concat]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 11:11:57 2018
@author: kazuki.onodera
-d- -> /
-x- -> *
-p- -> +
-m- -> -
nohup python -u 000.py 0 > LOG/log_000.py_0.txt &
nohup python -u 000.py 1 > LOG/log_000.py_1.txt &
nohup python -u 000.py 2 > LOG/log_000.py_2.txt &
nohup python -u 000.py 3 > LOG/log_000.py_3.txt &
nohup python -u 000.py 4 > LOG/log_000.py_4.txt &
nohup python -u 000.py 5 > LOG/log_000.py_5.txt &
nohup python -u 000.py 6 > LOG/log_000.py_6.txt &
"""
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
NTHREAD = cpu_count()
from itertools import combinations
from tqdm import tqdm
import sys
argv = sys.argv
import os, utils, gc
utils.start(__file__)
#==============================================================================
folders = [
# '../data',
'../feature', '../feature_unused',
# '../feature_var0', '../feature_corr1'
]
for fol in folders:
os.system(f'rm -rf {fol}')
os.system(f'mkdir {fol}')
col_app_money = ['app_AMT_INCOME_TOTAL', 'app_AMT_CREDIT', 'app_AMT_ANNUITY', 'app_AMT_GOODS_PRICE']
col_app_day = ['app_DAYS_BIRTH', 'app_DAYS_EMPLOYED', 'app_DAYS_REGISTRATION', 'app_DAYS_ID_PUBLISH', 'app_DAYS_LAST_PHONE_CHANGE']
def get_trte():
usecols = ['SK_ID_CURR', 'AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE']
usecols += ['DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH', 'DAYS_LAST_PHONE_CHANGE']
rename_di = {
'AMT_INCOME_TOTAL': 'app_AMT_INCOME_TOTAL',
'AMT_CREDIT': 'app_AMT_CREDIT',
'AMT_ANNUITY': 'app_AMT_ANNUITY',
'AMT_GOODS_PRICE': 'app_AMT_GOODS_PRICE',
'DAYS_BIRTH': 'app_DAYS_BIRTH',
'DAYS_EMPLOYED': 'app_DAYS_EMPLOYED',
'DAYS_REGISTRATION': 'app_DAYS_REGISTRATION',
'DAYS_ID_PUBLISH': 'app_DAYS_ID_PUBLISH',
'DAYS_LAST_PHONE_CHANGE': 'app_DAYS_LAST_PHONE_CHANGE',
}
trte = pd.concat([pd.read_csv('../input/application_train.csv.zip', usecols=usecols).rename(columns=rename_di),
pd.read_csv('../input/application_test.csv.zip', usecols=usecols).rename(columns=rename_di)],
ignore_index=True)
return trte
def prep_prev(df):
df['AMT_APPLICATION'].replace(0, np.nan, inplace=True)
df['AMT_CREDIT'].replace(0, np.nan, inplace=True)
df['CNT_PAYMENT'].replace(0, np.nan, inplace=True)
df['AMT_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'AMT_DOWN_PAYMENT'] = np.nan
df['RATE_DOWN_PAYMENT'].replace(np.nan, 0, inplace=True)
df.loc[df['NAME_CONTRACT_STATUS']!='Approved', 'RATE_DOWN_PAYMENT'] = np.nan
# df['xxx'].replace(0, np.nan, inplace=True)
# df['xxx'].replace(0, np.nan, inplace=True)
return
p = int(argv[1])
if True:
#def multi(p):
if p==0:
# =============================================================================
# application
# =============================================================================
def f1(df):
df['CODE_GENDER'] = 1 - (df['CODE_GENDER']=='F')*1 # 4 'XNA' are converted to 'M'
df['FLAG_OWN_CAR'] = (df['FLAG_OWN_CAR']=='Y')*1
df['FLAG_OWN_REALTY'] = (df['FLAG_OWN_REALTY']=='Y')*1
df['EMERGENCYSTATE_MODE'] = (df['EMERGENCYSTATE_MODE']=='Yes')*1
df['AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-AMT_ANNUITY'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] # how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['AMT_ANNUITY']# how long should user pay?(month)
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['AMT_CREDIT']
df['AMT_GOODS_PRICE-m-AMT_CREDIT-d-AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE-m-AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['age_finish_payment'] = df['DAYS_BIRTH'].abs() + (df['AMT_CREDIT-d-AMT_ANNUITY']*30)
# df['age_finish_payment'] = (df['DAYS_BIRTH']/-365) + df['credit-d-annuity']
df.loc[df['DAYS_EMPLOYED']==365243, 'DAYS_EMPLOYED'] = np.nan
df['DAYS_EMPLOYED-m-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] - df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-m-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] - df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_BIRTH']
df['DAYS_REGISTRATION-m-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] - df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-m-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] - df['DAYS_ID_PUBLISH']
col = ['DAYS_EMPLOYED-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_BIRTH',
'DAYS_ID_PUBLISH-m-DAYS_BIRTH',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_BIRTH',
'DAYS_REGISTRATION-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_EMPLOYED',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_EMPLOYED',
'DAYS_ID_PUBLISH-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_REGISTRATION',
'DAYS_LAST_PHONE_CHANGE-m-DAYS_ID_PUBLISH'
]
col_comb = list(combinations(col, 2))
for i,j in col_comb:
df[f'{i}-d-{j}'] = df[i] / df[j]
df['DAYS_EMPLOYED-d-DAYS_BIRTH'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_BIRTH'] = df['DAYS_REGISTRATION'] / df['DAYS_BIRTH']
df['DAYS_ID_PUBLISH-d-DAYS_BIRTH'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['DAYS_REGISTRATION-d-DAYS_EMPLOYED'] = df['DAYS_REGISTRATION'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_EMPLOYED'] = df['DAYS_ID_PUBLISH'] / df['DAYS_EMPLOYED']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['DAYS_ID_PUBLISH-d-DAYS_REGISTRATION'] = df['DAYS_ID_PUBLISH'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_REGISTRATION'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_REGISTRATION']
df['DAYS_LAST_PHONE_CHANGE-d-DAYS_ID_PUBLISH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_ID_PUBLISH']
df['OWN_CAR_AGE-d-DAYS_BIRTH'] = (df['OWN_CAR_AGE']*(-365)) / df['DAYS_BIRTH']
df['OWN_CAR_AGE-m-DAYS_BIRTH'] = df['DAYS_BIRTH'] + (df['OWN_CAR_AGE']*365)
df['OWN_CAR_AGE-d-DAYS_EMPLOYED'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['OWN_CAR_AGE-m-DAYS_EMPLOYED'] = df['DAYS_EMPLOYED'] + (df['OWN_CAR_AGE']*365)
df['cnt_adults'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']
df['CNT_CHILDREN-d-CNT_FAM_MEMBERS'] = df['CNT_CHILDREN'] / df['CNT_FAM_MEMBERS']
df['income_per_adult'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
# df.loc[df['CNT_CHILDREN']==0, 'CNT_CHILDREN'] = np.nan
df['AMT_INCOME_TOTAL-d-CNT_CHILDREN'] = df['AMT_INCOME_TOTAL'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_CREDIT-d-CNT_CHILDREN'] = df['AMT_CREDIT'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_ANNUITY-d-CNT_CHILDREN'] = df['AMT_ANNUITY'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_GOODS_PRICE-d-CNT_CHILDREN'] = df['AMT_GOODS_PRICE'] / (df['CNT_CHILDREN']+0.000001)
df['AMT_INCOME_TOTAL-d-cnt_adults'] = df['AMT_INCOME_TOTAL'] / df['cnt_adults']
df['AMT_CREDIT-d-cnt_adults'] = df['AMT_CREDIT'] / df['cnt_adults']
df['AMT_ANNUITY-d-cnt_adults'] = df['AMT_ANNUITY'] / df['cnt_adults']
df['AMT_GOODS_PRICE-d-cnt_adults'] = df['AMT_GOODS_PRICE'] / df['cnt_adults']
df['AMT_INCOME_TOTAL-d-CNT_FAM_MEMBERS'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['AMT_CREDIT-d-CNT_FAM_MEMBERS'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']
df['AMT_ANNUITY-d-CNT_FAM_MEMBERS'] = df['AMT_ANNUITY'] / df['CNT_FAM_MEMBERS']
df['AMT_GOODS_PRICE-d-CNT_FAM_MEMBERS'] = df['AMT_GOODS_PRICE'] / df['CNT_FAM_MEMBERS']
# EXT_SOURCE_x
df['EXT_SOURCES_prod'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['EXT_SOURCES_sum'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].sum(axis=1)
df['EXT_SOURCES_sum'] = df['EXT_SOURCES_sum'].fillna(df['EXT_SOURCES_sum'].mean())
df['EXT_SOURCES_mean'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['EXT_SOURCES_mean'] = df['EXT_SOURCES_mean'].fillna(df['EXT_SOURCES_mean'].mean())
df['EXT_SOURCES_std'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['EXT_SOURCES_std'] = df['EXT_SOURCES_std'].fillna(df['EXT_SOURCES_std'].mean())
df['EXT_SOURCES_1-2-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_2-1-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-2'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_2']
df['EXT_SOURCES_2-3'] = df['EXT_SOURCE_2'] - df['EXT_SOURCE_3']
df['EXT_SOURCES_1-3'] = df['EXT_SOURCE_1'] - df['EXT_SOURCE_3']
# =========
# https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features/code
# =========
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
# =========
# https://www.kaggle.com/poohtls/fork-of-fork-lightgbm-with-simple-features/code
# =========
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['alldocs_kurt'] = df[docs].kurtosis(axis=1)
df['alldocs_skew'] = df[docs].skew(axis=1)
df['alldocs_mean'] = df[docs].mean(axis=1)
df['alldocs_sum'] = df[docs].sum(axis=1)
df['alldocs_std'] = df[docs].std(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOYED_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
# =============================================================================
# Maxwell features
# =============================================================================
bdg_avg = df.filter(regex='_AVG$').columns
bdg_mode = df.filter(regex='_MODE$').columns
bdg_medi = df.filter(regex='_MEDI$').columns[:len(bdg_avg)] # ignore FONDKAPREMONT_MODE...
df['building_score_avg_mean'] = df[bdg_avg].mean(1)
df['building_score_avg_std'] = df[bdg_avg].std(1)
df['building_score_avg_sum'] = df[bdg_avg].sum(1)
df['building_score_mode_mean'] = df[bdg_mode].mean(1)
df['building_score_mode_std'] = df[bdg_mode].std(1)
df['building_score_mode_sum'] = df[bdg_mode].sum(1)
df['building_score_medi_mean'] = df[bdg_medi].mean(1)
df['building_score_medi_std'] = df[bdg_medi].std(1)
df['building_score_medi_sum'] = df[bdg_medi].sum(1)
df['maxwell_feature_1'] = (df['EXT_SOURCE_1'] * df['EXT_SOURCE_3']) ** (1 / 2)
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
return
df = pd.read_csv('../input/application_train.csv.zip')
f1(df)
utils.to_pickles(df, '../data/train', utils.SPLIT_SIZE)
utils.to_pickles(df[['TARGET']], '../data/label', utils.SPLIT_SIZE)
df = pd.read_csv('../input/application_test.csv.zip')
f1(df)
utils.to_pickles(df, '../data/test', utils.SPLIT_SIZE)
df[['SK_ID_CURR']].to_pickle('../data/sub.p')
elif p==1:
# =============================================================================
# prev
# =============================================================================
"""
df = utils.read_pickles('../data/previous_application')
"""
df = pd.merge(pd.read_csv('../data/prev_new_v4.csv.gz'),
get_trte(), on='SK_ID_CURR', how='left')
# df = pd.merge(pd.read_csv('../input/previous_application.csv.zip'),
# get_trte(), on='SK_ID_CURR', how='left')
prep_prev(df)
df['FLAG_LAST_APPL_PER_CONTRACT'] = (df['FLAG_LAST_APPL_PER_CONTRACT']=='Y')*1
# day
for c in ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']:
df.loc[df[c]==365243, c] = np.nan
df['days_fdue-m-fdrw'] = df['DAYS_FIRST_DUE'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdrw'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue-m-fdrw'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DRAWING'] # total span
df['days_trm-m-fdrw'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DRAWING']
df['days_ldue1-m-fdue'] = df['DAYS_LAST_DUE_1ST_VERSION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-fdue'] = df['DAYS_LAST_DUE'] - df['DAYS_FIRST_DUE']
df['days_trm-m-fdue'] = df['DAYS_TERMINATION'] - df['DAYS_FIRST_DUE']
df['days_ldue-m-ldue1'] = df['DAYS_LAST_DUE'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue1'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE_1ST_VERSION']
df['days_trm-m-ldue'] = df['DAYS_TERMINATION'] - df['DAYS_LAST_DUE']
# money
df['total_debt'] = df['AMT_ANNUITY'] * df['CNT_PAYMENT']
df['AMT_CREDIT-d-total_debt'] = df['AMT_CREDIT'] / df['total_debt']
df['AMT_GOODS_PRICE-d-total_debt'] = df['AMT_GOODS_PRICE'] / df['total_debt']
df['AMT_GOODS_PRICE-d-AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['AMT_CREDIT']
# app & money
df['AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-d-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-m-app_AMT_INCOME_TOTAL'] = df['AMT_ANNUITY'] - df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_INCOME_TOTAL'] = df['AMT_APPLICATION'] - df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_INCOME_TOTAL'] = df['AMT_CREDIT'] - df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL'] = df['AMT_GOODS_PRICE'] - df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_CREDIT'] = df['AMT_ANNUITY'] / df['app_AMT_CREDIT']
df['AMT_APPLICATION-d-app_AMT_CREDIT'] = df['AMT_APPLICATION'] / df['app_AMT_CREDIT']
df['AMT_CREDIT-d-app_AMT_CREDIT'] = df['AMT_CREDIT'] / df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-d-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] / df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT'] = df['AMT_ANNUITY'] - df['app_AMT_CREDIT']
df['AMT_APPLICATION-m-app_AMT_CREDIT'] = df['AMT_APPLICATION'] - df['app_AMT_CREDIT']
df['AMT_CREDIT-m-app_AMT_CREDIT'] = df['AMT_CREDIT'] - df['app_AMT_CREDIT']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT'] = df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']
df['AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_CREDIT']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] / df['app_AMT_ANNUITY']
df['AMT_APPLICATION-d-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] / df['app_AMT_ANNUITY']
df['AMT_CREDIT-d-app_AMT_ANNUITY'] = df['AMT_CREDIT'] / df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-d-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] / df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY'] = df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']
df['AMT_APPLICATION-m-app_AMT_ANNUITY'] = df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']
df['AMT_CREDIT-m-app_AMT_ANNUITY'] = df['AMT_CREDIT'] - df['app_AMT_ANNUITY']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY'] = df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']
df['AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_ANNUITY']) / df['app_AMT_INCOME_TOTAL']
df['AMT_ANNUITY-d-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] / df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-d-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] / df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-d-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] / df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] / df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE'] = df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE'] = df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE'] = df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE'] = df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']
df['AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_ANNUITY'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_APPLICATION'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_CREDIT'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
df['AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL'] = (df['AMT_GOODS_PRICE'] - df['app_AMT_GOODS_PRICE']) / df['app_AMT_INCOME_TOTAL']
# nejumi
f_name='nejumi'; init_rate=0.9; n_iter=500
df['AMT_ANNUITY_d_AMT_CREDIT_temp'] = df.AMT_ANNUITY / df.AMT_CREDIT
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + init_rate)**df.CNT_PAYMENT - 1)/((1 + init_rate)**df.CNT_PAYMENT)
for i in range(n_iter):
df[f_name] = df['AMT_ANNUITY_d_AMT_CREDIT_temp']*((1 + df[f_name])**df.CNT_PAYMENT - 1)/((1 + df[f_name])**df.CNT_PAYMENT)
df.drop(['AMT_ANNUITY_d_AMT_CREDIT_temp'], axis=1, inplace=True)
df.sort_values(['SK_ID_CURR', 'DAYS_DECISION'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = [
'total_debt',
'AMT_CREDIT-d-total_debt',
'AMT_GOODS_PRICE-d-total_debt',
'AMT_GOODS_PRICE-d-AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-d-app_AMT_CREDIT',
'AMT_APPLICATION-d-app_AMT_CREDIT',
'AMT_CREDIT-d-app_AMT_CREDIT',
'AMT_GOODS_PRICE-d-app_AMT_CREDIT',
'AMT_ANNUITY-d-app_AMT_ANNUITY',
'AMT_APPLICATION-d-app_AMT_ANNUITY',
'AMT_CREDIT-d-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-d-app_AMT_ANNUITY',
'AMT_ANNUITY-d-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-d-app_AMT_GOODS_PRICE',
'AMT_CREDIT-d-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-d-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_CREDIT',
'AMT_APPLICATION-m-app_AMT_CREDIT',
'AMT_CREDIT-m-app_AMT_CREDIT',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT',
'AMT_ANNUITY-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_CREDIT-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_ANNUITY',
'AMT_APPLICATION-m-app_AMT_ANNUITY',
'AMT_CREDIT-m-app_AMT_ANNUITY',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY',
'AMT_ANNUITY-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_ANNUITY-d-app_AMT_INCOME_TOTAL',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE',
'AMT_ANNUITY-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_APPLICATION-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_CREDIT-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'AMT_GOODS_PRICE-m-app_AMT_GOODS_PRICE-d-app_AMT_INCOME_TOTAL',
'nejumi'
]
def multi_prev(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_CURR', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback = pd.concat(pool.map(multi_prev, col), axis=1)
print('===== PREV ====')
print(callback.columns.tolist())
pool.close()
df = pd.concat([df, callback], axis=1)
# app & day
col_prev = ['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']
for c1 in col_prev:
for c2 in col_app_day:
# print(f"'{c1}-m-{c2}',")
df[f'{c1}-m-{c2}'] = df[c1] - df[c2]
df[f'{c1}-d-{c2}'] = df[c1] / df[c2]
df['cnt_paid'] = df.apply(lambda x: min( np.ceil(x['DAYS_FIRST_DUE']/-30), x['CNT_PAYMENT'] ), axis=1)
df['cnt_paid_ratio'] = df['cnt_paid'] / df['CNT_PAYMENT']
df['cnt_unpaid'] = df['CNT_PAYMENT'] - df['cnt_paid']
df['amt_paid'] = df['AMT_ANNUITY'] * df['cnt_paid']
# df['amt_paid_ratio'] = df['amt_paid'] / df['total_debt'] # same as cnt_paid_ratio
df['amt_unpaid'] = df['total_debt'] - df['amt_paid']
df['active'] = (df['cnt_unpaid']>0)*1
df['completed'] = (df['cnt_unpaid']==0)*1
# future payment
df_tmp = pd.DataFrame()
print('future payment')
rem_max = df['cnt_unpaid'].max() # 80
# rem_max = 1
df['cnt_unpaid_tmp'] = df['cnt_unpaid']
for i in range(int( rem_max )):
c = f'future_payment_{i+1}m'
df_tmp[c] = df['cnt_unpaid_tmp'].map(lambda x: min(x, 1)) * df['AMT_ANNUITY']
df_tmp.loc[df_tmp[c]==0, c] = np.nan
df['cnt_unpaid_tmp'] -= 1
df['cnt_unpaid_tmp'] = df['cnt_unpaid_tmp'].map(lambda x: max(x, 0))
# df['prev_future_payment_max'] = df.filter(regex='^prev_future_payment_').max(1)
del df['cnt_unpaid_tmp']
df = pd.concat([df, df_tmp], axis=1)
# past payment
df_tmp = pd.DataFrame()
print('past payment')
rem_max = df['cnt_paid'].max() # 72
df['cnt_paid_tmp'] = df['cnt_paid']
for i in range(int( rem_max )):
c = f'past_payment_{i+1}m'
df_tmp[c] = df['cnt_paid_tmp'].map(lambda x: min(x, 1)) * df['AMT_ANNUITY']
df_tmp.loc[df_tmp[c]==0, c] = np.nan
df['cnt_paid_tmp'] -= 1
df['cnt_paid_tmp'] = df['cnt_paid_tmp'].map(lambda x: max(x, 0))
# df['prev_past_payment_max'] = df.filter(regex='^prev_past_payment_').max(1)
del df['cnt_paid_tmp']
df = pd.concat([df, df_tmp], axis=1)
df['APP_CREDIT_PERC'] = df['AMT_APPLICATION'] / df['AMT_CREDIT']
#df.filter(regex='^amt_future_payment_')
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/previous_application', utils.SPLIT_SIZE)
elif p==2:
# =============================================================================
# POS
# =============================================================================
"""
df = utils.read_pickles('../data/POS_CASH_balance')
"""
df = pd.read_csv('../input/POS_CASH_balance.csv.zip')
# data cleansing!!!
## drop signed. sample SK_ID_PREV==1769939
df = df[df.NAME_CONTRACT_STATUS!='Signed']
## Zombie NAME_CONTRACT_STATUS=='Completed' and CNT_INSTALMENT_FUTURE!=0. 1134377
df.loc[(df.NAME_CONTRACT_STATUS=='Completed') & (df.CNT_INSTALMENT_FUTURE!=0), 'NAME_CONTRACT_STATUS'] = 'Active'
## CNT_INSTALMENT_FUTURE=0 and Active. sample SK_ID_PREV==1998905, 2174168
df.loc[(df.CNT_INSTALMENT_FUTURE==0) & (df.NAME_CONTRACT_STATUS=='Active'), 'NAME_CONTRACT_STATUS'] = 'Completed'
## remove duplicated CNT_INSTALMENT_FUTURE=0. sample SK_ID_PREV==2601827
df_0 = df[df['CNT_INSTALMENT_FUTURE']==0]
df_1 = df[df['CNT_INSTALMENT_FUTURE']>0]
df_0['NAME_CONTRACT_STATUS'] = 'Completed'
df_0.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], ascending=[True, False], inplace=True)
df_0.drop_duplicates('SK_ID_PREV', keep='last', inplace=True)
df = pd.concat([df_0, df_1], ignore_index=True)
del df_0, df_1; gc.collect()
# TODO: end in active. 1002879
# df['CNT_INSTALMENT_FUTURE_min'] = df.groupby('SK_ID_PREV').CNT_INSTALMENT_FUTURE.transform('min')
# df['MONTHS_BALANCE_max'] = df.groupby('SK_ID_PREV').MONTHS_BALANCE.transform('max')
# df.loc[(df.CNT_INSTALMENT_FUTURE_min!=0) & (df.MONTHS_BALANCE_max!=-1)]
df['CNT_INSTALMENT-m-CNT_INSTALMENT_FUTURE'] = df['CNT_INSTALMENT'] - df['CNT_INSTALMENT_FUTURE']
df['CNT_INSTALMENT_FUTURE-d-CNT_INSTALMENT'] = df['CNT_INSTALMENT_FUTURE'] / df['CNT_INSTALMENT']
df.sort_values(['SK_ID_PREV', 'MONTHS_BALANCE'], inplace=True)
df.reset_index(drop=True, inplace=True)
col = ['CNT_INSTALMENT_FUTURE', 'SK_DPD', 'SK_DPD_DEF']
def multi_pos(c):
ret_diff = []
ret_pctchng = []
key_bk = x_bk = None
for key, x in df[['SK_ID_PREV', c]].values:
# for key, x in tqdm(df[['SK_ID_CURR', c]].values, mininterval=30):
if key_bk is None:
ret_diff.append(None)
ret_pctchng.append(None)
else:
if key_bk == key:
ret_diff.append(x - x_bk)
ret_pctchng.append( (x_bk-x) / x_bk)
else:
ret_diff.append(None)
ret_pctchng.append(None)
key_bk = key
x_bk = x
ret_diff = pd.Series(ret_diff, name=f'{c}_diff')
ret_pctchng = pd.Series(ret_pctchng, name=f'{c}_pctchange')
ret = pd.concat([ret_diff, ret_pctchng], axis=1)
return ret
pool = Pool(len(col))
callback = pd.concat(pool.map(multi_pos, col), axis=1)
print('===== POS ====')
print(callback.columns.tolist())
pool.close()
df = pd.concat([df, callback], axis=1)
df['SK_DPD-m-SK_DPD_DEF'] = df['SK_DPD'] - df['SK_DPD_DEF']
# df['SK_DPD_diff_over0'] = (df['SK_DPD_diff']>0)*1
# df['SK_DPD_diff_over5'] = (df['SK_DPD_diff']>5)*1
# df['SK_DPD_diff_over10'] = (df['SK_DPD_diff']>10)*1
# df['SK_DPD_diff_over15'] = (df['SK_DPD_diff']>15)*1
# df['SK_DPD_diff_over20'] = (df['SK_DPD_diff']>20)*1
# df['SK_DPD_diff_over25'] = (df['SK_DPD_diff']>25)*1
df.replace(np.inf, np.nan, inplace=True) # TODO: any other plan?
df.replace(-np.inf, np.nan, inplace=True)
utils.to_pickles(df, '../data/POS_CASH_balance', utils.SPLIT_SIZE)
elif p==3:
# =============================================================================
# ins
# =============================================================================
"""
df = utils.read_pickles('../data/installments_payments')
"""
df = pd.read_csv('../input/installments_payments.csv.zip')
trte = get_trte()
df = pd.merge(df, trte, on='SK_ID_CURR', how='left')
prev = pd.read_csv('../input/previous_application.csv.zip',
usecols=['SK_ID_PREV', 'CNT_PAYMENT', 'AMT_ANNUITY'])
prev['CNT_PAYMENT'].replace(0, np.nan, inplace=True)
# prep_prev(prev)
df = | pd.merge(df, prev, on='SK_ID_PREV', how='left') | pandas.merge |
import sys
sys.path.append(".")
import time
import sys
import benchmark.model.model as model_module
import pandas
from simpy import Environment
RESULTS_DIR = "./benchmark/results/"
runtime = (
"Casymda@SimPy(PyPy73)" if "PyPy" in sys.version else "Casymda@SimPy(CPython38)"
) # to be changed
n_entities = [10, 100, 1000, 10_000, 50_000, 100_000, 200_000] # ~1 min i5
inter_arrival_times = [0, 10]
def run_benchmark():
# warmup run?
results = []
for n_entity in n_entities:
for iat in inter_arrival_times:
sequential_proc_time = 10
overall_seq_time = iat + (n_entity / 2) * sequential_proc_time
last_time = (n_entity - 1) * iat + sequential_proc_time
expected_end = max(last_time, overall_seq_time)
t = run(
max_entities=n_entity,
inter_arrival_time=iat,
sequential_proc_time=sequential_proc_time,
expected_end=expected_end,
)
result = {
"runtime": runtime,
"n_entities": n_entity,
"inter_arrival_time": iat,
"time": t,
}
results.append(result)
| pandas.DataFrame(results) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import datetime
import seaborn as sns
from tqdm import tqdm
from sklearn import tree
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from keras import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
from sklearn.utils import resample
def get_feature_balance_to_montlhypayment_percentage(loan,test):
df_cur_transactions = df_all_transactions_test if test else df_all_transactions
df_loan_transactions = df_cur_transactions.loc[df_cur_transactions['account_id'] == loan['account_id']] #get all transactions for the account of the loan
year = []
month = []
for index, transaction in df_loan_transactions.iterrows(): #gets year and month for each transaction
trans_date = datetime.datetime.strptime(str(transaction['date']), "%y%m%d")
year.append(trans_date.year)
month.append(trans_date.month)
df_loan_transactions['year'] = year
df_loan_transactions['month'] = month
df_mean_balance_bymonth = df_loan_transactions.groupby(['year','month'])['balance'].mean().reset_index(name='balance')
df_mean_balance_allmonth = df_mean_balance_bymonth['balance'].mean()
return df_mean_balance_allmonth / loan['payments']
def get_client_district_from_account(account_id):
df_disposition = df_dispositions.loc[(df_dispositions['account_id'] == account_id) & (df_dispositions['type'] == 'OWNER')] #get the disposition of the owner of the account
df_client = df_clients.loc[df_clients['client_id'] == df_disposition.iloc[0]['client_id']] #get the info of the owner of the account
return df_districts.loc[df_districts['code '] == df_client.iloc[0]['district_id']].iloc[0] #get the district info of the owner of the account
def get_feature_average_no_crimes_per_100_habitants(loan):
district = get_client_district_from_account(loan['account_id'])
no_crimes_95 = district['no. of commited crimes \'95 ']
no_crimes_96 = district['no. of commited crimes \'96 ']
no_crimes_95 = no_crimes_96 if no_crimes_95 == '?' else no_crimes_95
no_crimes_96 = no_crimes_95 if no_crimes_96 == '?' else no_crimes_96
return ((int(no_crimes_95)+int(no_crimes_96))/2)/int(district['no. of inhabitants'])*100
def get_feature_average_unemployment_rate(loan):
district = get_client_district_from_account(loan['account_id'])
unemploymant_rate_95 = district['unemploymant rate \'95 ']
unemploymant_rate_96 = district['unemploymant rate \'96 ']
unemploymant_rate_95 = unemploymant_rate_96 if unemploymant_rate_95 == '?' else unemploymant_rate_95
unemploymant_rate_96 = unemploymant_rate_95 if unemploymant_rate_96 == '?' else unemploymant_rate_96
return (float(unemploymant_rate_95)+float(unemploymant_rate_96))/2
def get_feature_proportion_avgsalary_monthlypayments(loan):
district = get_client_district_from_account(loan['account_id'])
return int(district['average salary '])/int(loan['payments'])
def get_feature_account_credit_Card_type(loan,test):
df_cur_credit_cards = df_credit_cards_test if test else df_credit_cards
df_loan_disposition = df_dispositions.loc[(df_dispositions['account_id'] == loan['account_id'])& (df_dispositions['type'] == 'OWNER')]
df_credit_card_disposition = df_cur_credit_cards.loc[df_cur_credit_cards['disp_id'] == df_loan_disposition.iloc[0]['disp_id']]
if (len(df_credit_card_disposition.index) == 1):
return df_credit_card_disposition.iloc[0]["type"]
else:
return "no credit card"
def get_feature_sex(loan):
df_loan_disposition = df_dispositions.loc[df_dispositions['account_id'] == loan['account_id']]
df_client_disposition = df_clients.loc[df_clients['client_id'] == df_loan_disposition.iloc[0]['client_id']]
trans_date = list(str(df_client_disposition.iloc[0]['birth_number']))
month = int(trans_date[2] + trans_date[3])
#print(month)
if (month > 12):
return 'F'
else:
return 'M'
def get_feature_age(loan):
df_loan_disposition = df_dispositions.loc[df_dispositions['account_id'] == loan['account_id']]
df_client_disposition = df_clients.loc[df_clients['client_id'] == df_loan_disposition.iloc[0]['client_id']]
trans_date = list(str(df_client_disposition.iloc[0]['birth_number']))
year = int(trans_date[0] + trans_date[1])
age = 97 - year
return age
df_train = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\loan_train.csv',sep=';')
df_test = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\loan_test.csv',sep=';')
df_dispositions = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\disp.csv',sep=';')
df_clients = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\client.csv',sep=';')
df_districts = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\district.csv',sep=';')
df_all_transactions = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\trans_train.csv',sep=';')
df_all_transactions_test = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\trans_test.csv',sep=';')
df_credit_cards = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\card_train.csv', sep=';', header=0)
df_credit_cards_test = pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\card_test.csv', sep=';', header=0)
'''
df_train = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/loan_train.csv', sep=';', header=0)
df_test = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/loan_test.csv', sep=';', header=0)
df_dispositions = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/disp.csv', sep=';', header=0)
df_clients = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/client.csv', sep=';', header=0)
df_districts = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/district.csv', sep=';', header=0)
df_all_transactions = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/trans_train.csv', sep=';', header=0)
df_all_transactions_test = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/trans_test.csv', sep=';', header=0)
df_credit_cards = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/card_train.csv', sep=';', header=0)
df_credit_cards_test = pd.read_csv(filepath_or_buffer='../input/to-loan-or-not-to-loan-that-is-the-question-7/public data/card_test.csv', sep=';', header=0)
'''
df_train_processed = pd.DataFrame(columns=['amount', 'duration', 'payments', 'balance_monthlypayment_percentage', 'average_no_crimes_per_100_habitants', 'average_unemployment_rate', 'proportion_avgsalary_monthlypayments','account_credit_Card_type','sex','age', 'status'])
df_test_processed = pd.DataFrame(columns=['amount', 'duration', 'payments', 'balance_monthlypayment_percentage', 'average_no_crimes_per_100_habitants', 'average_unemployment_rate', 'proportion_avgsalary_monthlypayments','account_credit_Card_type','sex','age', 'loan_id'])
for index_loan, loan in tqdm(df_train.iterrows()):
df_train_processed.loc[index_loan] = [loan['amount'], loan['duration'], loan['payments'], get_feature_balance_to_montlhypayment_percentage(loan,False), get_feature_average_no_crimes_per_100_habitants(loan), get_feature_average_unemployment_rate(loan), get_feature_proportion_avgsalary_monthlypayments(loan),get_feature_account_credit_Card_type(loan,False),get_feature_sex(loan),get_feature_age(loan), loan['status']]
#print(df_train_processed)
for index_loan, loan in tqdm(df_test.iterrows()):
df_test_processed.loc[index_loan] = [loan['amount'], loan['duration'], loan['payments'], get_feature_balance_to_montlhypayment_percentage(loan,True), get_feature_average_no_crimes_per_100_habitants(loan), get_feature_average_unemployment_rate(loan), get_feature_proportion_avgsalary_monthlypayments(loan),get_feature_account_credit_Card_type(loan,True),get_feature_sex(loan),get_feature_age(loan), loan['loan_id']]
#print(df_test_processed)
df_data = pd.get_dummies(df_train_processed.drop(columns=['status']), columns=['account_credit_Card_type','sex'])
df_target = df_train_processed[['status']]
df_target = df_target.astype(int)
df_test_target = pd.get_dummies(df_test_processed.drop(columns=['loan_id']), columns=['account_credit_Card_type','sex'])
df_test_id = df_test_processed[['loan_id']]
#UP-SAMPLING STEP
df_merged = pd.concat([df_data,df_target],axis=1)
# Separate majority and minority classes
df_merged_majority = df_merged[df_merged.status==1]
df_merged_minority = df_merged[df_merged.status==-1]
df_minority_upsampled = resample(df_merged_minority,
replace=True, # sample with replacement
n_samples=282, # to match majority class
random_state=123) # reproducible results
# Combine majority class with upsampled minority class
df_upsampled = | pd.concat([df_merged_majority, df_minority_upsampled]) | pandas.concat |
#!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generates a plot of crossword statistics generated by the crossword crate
It expects two positional arguments:
1. The path to a CSV file generated from the crossword crate
2. The output path and filename where the rendered plot should be saved. Both SVG and PNG formats
are supported.
"""
import datetime
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import sys
def parse_data(csv_path):
"""Parse crossword database stored at the given path into a pandas DataFrame. The DataFrame
only contains solve data for unaided, solved puzzles and is sorted by the index, the time when
each puzzle was solved.
Interesting columns in the returned DataFrame:
solve_time_secs
weekday
"""
df = | pd.read_csv(csv_path, parse_dates=["date"], index_col="date") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 13:51:15 2019
@author: Lieke
"""
import os
import numpy as np
import pandas as pd
import time as tm
import rpy2.robjects as robjects
import tensorflow as tf
import math
import scipy.io as sio
import optunity as opt
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources
import SparseMatrix as sm
def run_LAmbDA(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath = "", NumGenes = 0):
'''
run LAmbDA classifier
Wrapper script to run LAmbDA on a benchmark dataset with 5-fold cross validation,
outputs lists of true and predicted cell labels as csv files, as well as computation time.
Parameters
----------
DataPath : Data file path (.csv), cells-genes matrix with cell unique barcodes
as row names and gene names as column names.
LabelsPath : Cell population annotations file path (.csv).
CV_RDataPath : Cross validation RData file path (.RData), obtained from Cross_Validation.R function.
OutputDir : Output directory defining the path of the exported file.
GeneOrderPath : Gene order file path (.csv) obtained from feature selection,
defining the genes order for each cross validation fold, default is NULL.
NumGenes : Number of genes used in case of feature selection (integer), default is 0.
'''
# read the Rdata file
robjects.r['load'](CV_RDataPath)
nfolds = np.array(robjects.r['n_folds'], dtype = 'int')
tokeep = np.array(robjects.r['Cells_to_Keep'], dtype = 'bool')
col = np.array(robjects.r['col_Index'], dtype = 'int')
col = col - 1
test_ind = np.array(robjects.r['Test_Idx'])
train_ind = np.array(robjects.r['Train_Idx'])
# read the data
data = sm.importMM(DataPath)
labels = pd.read_csv(LabelsPath, header=0,index_col=None, sep=',', usecols = col)
labels = labels.iloc[tokeep]
data = data.iloc[tokeep]
data = data.fillna("0").astype(int)
# read the feature file
if (NumGenes > 0):
features = pd.read_csv(GeneOrderPath,header=0,index_col=None, sep=',')
# folder with results
os.chdir(OutputDir)
tr_time=[]
ts_time=[]
truelab = np.zeros([len(labels),1],dtype = int)
predlab = np.zeros([len(labels),1],dtype = int)
for i in range(np.squeeze(nfolds)):
global X, Y, Gnp, Dnp, train, test, prt, cv
test_ind_i = np.array(test_ind[i], dtype = 'int') - 1
train_ind_i = np.array(train_ind[i], dtype = 'int') - 1
X = np.array(data)
if (NumGenes > 0):
X = np.log2(X/10+1)
feat_to_use = features.iloc[0:NumGenes,i]
X = X[:,feat_to_use]
else:
X = np.log2(np.transpose(select_feats(np.transpose(X),0.5,80))/10+1)
uniq = np.unique(labels)
Y = np.zeros([len(labels),len(uniq)],int)
for j in range(len(uniq)):
Y[np.where(labels == uniq[j])[0],j] = 1
Y = np.array(Y)
Gnp = np.zeros([len(uniq),len(uniq)],int)
np.fill_diagonal(Gnp,1)
Gnp = np.array(Gnp)
Dnp = np.ones([len(uniq),1],int)
Dnp = np.array(Dnp)
train_samp = int(np.floor(0.75*len(train_ind_i)))
test_samp = len(train_ind_i) - train_samp
perm = np.random.permutation(len(train_ind_i))
train = perm[0:train_samp]
test = perm[train_samp:test_samp+1]
while(np.sum(np.sum(Y[train,:],0)<5)>0):
perm = np.random.permutation(X.shape[0])
train = perm[0:train_samp+1]
test = perm[train_samp+1:train_samp+test_samp+1]
cv = i
optunity_it = 0
prt = False
opt_params = None
start=tm.time()
opt_params, _, _ = opt.minimize(run_LAmbDA2,solver_name='sobol', gamma=[0.8,1.2], delta=[0.05,0.95], tau=[10.0,11.0], prc_cut=[20,50], bs_prc=[0.2,0.6], num_trees=[10,200], max_nodes=[100,1000], num_evals=50)
tr_time.append(tm.time()-start)
print("Finished training!")
prt = True
train = train_ind_i
test = test_ind_i
start=tm.time()
err = run_LAmbDA2(opt_params['gamma'], opt_params['delta'], opt_params['tau'], opt_params['prc_cut'], opt_params['bs_prc'], opt_params['num_trees'], opt_params['max_nodes'])
ts_time.append(tm.time()-start)
tf.reset_default_graph();
predfile = 'preds_cv' + str(cv) + '.mat'
truefile = 'truth_cv' + str(cv) + '.mat'
pred = sio.loadmat(predfile)
truth = sio.loadmat(truefile)
pred = pred['preds']
truth = truth['labels']
pred_ind = np.argmax(pred,axis=1)
truth_ind = np.argmax(truth,axis=1)
predlab[test_ind_i,0] = pred_ind
truelab[test_ind_i,0] = truth_ind
truelab = pd.DataFrame(truelab)
predlab = pd.DataFrame(predlab)
tr_time = | pd.DataFrame(tr_time) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# **About the Competition:**
#
# [Avito](https://www.avito.ru/), Russia’s largest classified advertisements website, is hosting its fourth Kaggle competition. The challenge is to predict demand for an online advertisement based on its full description (title, description, images, etc.), its context (geographically where it was posted, similar ads already posted) and historical demand for similar ads in similar contexts.
#
# **About the Notebook:**
#
# One more exciting competition ahead and this involves both NLP (text data in Russian) and Image data along with numerical . In this notebook, let us get into the basic data exploration using python.
#
# Thanks to [Yandex Translate](https://translate.yandex.com/), I was able to get english names for the russian names and used them whenever possible. Most of the plots are in plotly and so please hover over them to see more details.
# In[26]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
color = sns.color_palette()
get_ipython().run_line_magic('matplotlib', 'inline')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
# Now let us look at the input files present in the dataset.
# In[2]:
from subprocess import check_output
print(check_output(["ls", "../input/"]).decode("utf8"))
# The description of the data files from the data page:
#
# * train.csv - Train data.
# * test.csv - Test data. Same schema as the train data, minus deal_probability.
# * train_active.csv - Supplemental data from ads that were displayed during the same period as train.csv. Same schema as the train data, minus deal_probability.
# * test_active.csv - Supplemental data from ads that were displayed during the same period as test.csv. Same schema as the train data, minus deal_probability.
# * periods_train.csv - Supplemental data showing the dates when the ads from train_active.csv were activated and when they where displayed.
# * periods_test.csv - Supplemental data showing the dates when the ads from test_active.csv were activated and when they where displayed. Same schema as periods_train.csv, except that the item ids map to an ad in test_active.csv.
# * train_jpg.zip - Images from the ads in train.csv.
# * test_jpg.zip - Images from the ads in test.csv.
# * sample_submission.csv - A sample submission in the correct format.
#
# Let us start with the train file.
# In[3]:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
test_df = pd.read_csv("../input/test.csv", parse_dates=["activation_date"])
print("Train file rows and columns are : ", train_df.shape)
print("Test file rows and columns are : ", test_df.shape)
# In[4]:
train_df.head()
# The train dataset description is as follows:
#
# * item_id - Ad id.
# * user_id - User id.
# * region - Ad region.
# * city - Ad city.
# * parent_category_name - Top level ad category as classified by Avito's ad model.
# * category_name - Fine grain ad category as classified by Avito's ad model.
# * param_1 - Optional parameter from Avito's ad model.
# * param_2 - Optional parameter from Avito's ad model.
# * param_3 - Optional parameter from Avito's ad model.
# * title - Ad title.
# * description - Ad description.
# * price - Ad price.
# * item_seq_number - Ad sequential number for user.
# * activation_date- Date ad was placed.
# * user_type - User type.
# * image - Id code of image. Ties to a jpg file in train_jpg. Not every ad has an image.
# * image_top_1 - Avito's classification code for the image.
# * deal_probability - The target variable. This is the likelihood that an ad actually sold something. It's not possible to verify every transaction with certainty, so this column's value can be any float from zero to one.
#
# So deal probability is our target variable and is a float value between 0 and 1 as per the data page. Let us have a look at it.
# In[5]:
plt.figure(figsize=(12,8))
sns.distplot(train_df["deal_probability"].values, bins=100, kde=False)
plt.xlabel('Deal Probility', fontsize=12)
plt.title("Deal Probability Histogram", fontsize=14)
plt.show()
plt.figure(figsize=(8,6))
plt.scatter(range(train_df.shape[0]), np.sort(train_df['deal_probability'].values))
plt.xlabel('index', fontsize=12)
plt.ylabel('deal probability', fontsize=12)
plt.title("Deal Probability Distribution", fontsize=14)
plt.show()
# So almost 100K Ads has 0 probaility (which means it did not sell anything) and few ads have a probability of 1. Rest of the deal probabilities have values in between.
#
# **Region wise distribution of Ads:**
#
# Let us look at the region wise distribution of ads.
# In[6]:
from io import StringIO
temp_data = StringIO("""
region,region_en
Свердловская область, Sverdlovsk oblast
Самарская область, Samara oblast
Ростовская область, Rostov oblast
Татарстан, Tatarstan
Волгоградская область, Volgograd oblast
Нижегородская область, Nizhny Novgorod oblast
Пермский край, Perm Krai
Оренбургская область, Orenburg oblast
Ханты-Мансийский АО, Khanty-Mansi Autonomous Okrug
Тюменская область, Tyumen oblast
Башкортостан, Bashkortostan
Краснодарский край, Krasnodar Krai
Новосибирская область, Novosibirsk oblast
Омская область, Omsk oblast
Белгородская область, Belgorod oblast
Челябинская область, Chelyabinsk oblast
Воронежская область, Voronezh oblast
Кемеровская область, Kemerovo oblast
Саратовская область, Saratov oblast
Владимирская область, Vladimir oblast
Калининградская область, Kaliningrad oblast
Красноярский край, Krasnoyarsk Krai
Ярославская область, Yaroslavl oblast
Удмуртия, Udmurtia
Алтайский край, Altai Krai
Иркутская область, Irkutsk oblast
Ставропольский край, Stavropol Krai
Тульская область, Tula oblast
""")
region_df = pd.read_csv(temp_data)
train_df = pd.merge(train_df, region_df, how="left", on="region")
# In[7]:
temp_series = train_df['region_en'].value_counts()
labels = (np.array(temp_series.index))
sizes = (np.array((temp_series / temp_series.sum())*100))
trace = go.Pie(labels=labels, values=sizes)
layout = go.Layout(
title='Region distribution',
width=900,
height=900,
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="region")
# The regions have percentage of ads between 1.71% to 9.41%. So the top regions are:
# 1. Krasnodar region - 9.41%
# 2. Sverdlovsk region - 6.28%
# 3. Rostov region - 5.99%
#
# In[8]:
plt.figure(figsize=(12,8))
sns.boxplot(y="region_en", x="deal_probability", data=train_df)
plt.xlabel('Deal probability', fontsize=12)
plt.ylabel('Region', fontsize=12)
plt.title("Deal probability by region")
plt.xticks(rotation='vertical')
plt.show()
# **City wise distribution of Ads:**
#
# Now let us have a look at the top 20 cities present in the dataset.
# In[9]:
cnt_srs = train_df['city'].value_counts().head(20)
trace = go.Bar(
y=cnt_srs.index[::-1],
x=cnt_srs.values[::-1],
orientation = 'h',
marker=dict(
color=cnt_srs.values[::-1],
colorscale = 'Blues',
reversescale = True
),
)
layout = dict(
title='City distribution of Ads',
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="CityAds")
# So the top cities where the ads are shown are
# 1. Krasnodar
# 2. Ekaterinburg
# 3. Novosibirsk
# **Parent Category Name:**
#
# Now let us look at the distribution of parent cateory names.
# In[10]:
temp_data = StringIO("""
parent_category_name,parent_category_name_en
Личные вещи,Personal belongings
Для дома и дачи,For the home and garden
Бытовая электроника,Consumer electronics
Недвижимость,Real estate
Хобби и отдых,Hobbies & leisure
Транспорт,Transport
Услуги,Services
Животные,Animals
Для бизнеса,For business
""")
temp_df = | pd.read_csv(temp_data) | pandas.read_csv |
"""Parallelized, single-point launch script to run DSR or GP on a set of benchmarks."""
import os
import sys
import json
import time
from datetime import datetime
import multiprocessing
from copy import deepcopy
from functools import partial
from pkg_resources import resource_filename
import zlib
import click
import numpy as np
import pandas as pd
from sympy.parsing.sympy_parser import parse_expr
from sympy import srepr
from dsr.program import Program
from dsr.dataset import Dataset
from dsr.baselines import gpsr
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
def train_dsr(name_and_seed, config_dataset, config_controller, config_training):
"""Trains DSR and returns dict of reward, expression, and traversal"""
name, seed = name_and_seed
try:
import tensorflow as tf
from dsr.controller import Controller
from dsr.train import learn
# Ignore TensorFlow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
except:
pass
start = time.time()
# Rename the output file
config_training["output_file"] = "dsr_{}_{}.csv".format(name, seed)
# Define the dataset and library
dataset = get_dataset(name, config_dataset)
Program.clear_cache()
Program.set_training_data(dataset)
Program.set_library(dataset.function_set, dataset.n_input_var)
tf.reset_default_graph()
# Shift actual seed by checksum to ensure it's different across different benchmarks
tf.set_random_seed(seed + zlib.adler32(name.encode("utf-8")))
with tf.Session() as sess:
# Instantiate the controller
controller = Controller(sess, debug=config_training["debug"], summary=config_training["summary"], **config_controller)
# Train the controller
result = learn(sess, controller, **config_training) # r, base_r, expression, traversal
result["name"] = name
result["t"] = time.time() - start
result["seed"] = seed
return result
def train_gp(name_and_seed, logdir, config_dataset, config_gp):
"""Trains GP and returns dict of reward, expression, and program"""
name, seed = name_and_seed
config_gp["seed"] = seed + zlib.adler32(name.encode("utf-8"))
start = time.time()
# Load the dataset
dataset = get_dataset(name, config_dataset)
# Fit the GP
gp = gpsr.GP(dataset=dataset, **config_gp)
p, logbook = gp.train()
# Retrieve results
r = base_r = p.fitness.values[0]
r_test = base_r_test = gp.eval_test(p)[0]
str_p = str(p)
nmse = gp.nmse(p)
r_noiseless = base_r_noiseless = gp.eval_train_noiseless(p)[0]
r_test_noiseless = base_r_test_noiseless = gp.eval_test_noiseless(p)[0]
# Many failure cases right now for converting to SymPy expression
try:
expression = repr(parse_expr(str_p.replace("X", "x").replace("add", "Add").replace("mul", "Mul")))
except:
expression = "N/A"
# Save run details
drop = ["gen", "nevals"]
df_fitness = pd.DataFrame(logbook.chapters["fitness"]).drop(drop, axis=1)
df_fitness = df_fitness.rename({"avg" : "fit_avg", "min" : "fit_min"}, axis=1)
df_fitness["fit_best"] = df_fitness["fit_min"].cummin()
df_len = pd.DataFrame(logbook.chapters["size"]).drop(drop, axis=1)
df_len = df_len.rename({"avg" : "l_avg"}, axis=1)
df = pd.concat([df_fitness, df_len], axis=1, sort=False)
df.to_csv(os.path.join(logdir, "gp_{}_{}.csv".format(name, seed)), index=False)
result = {
"name" : name,
"nmse" : nmse,
"r" : r,
"base_r" : base_r,
"r_test" : r_test,
"base_r_test" : base_r_test,
"r_noiseless" : r_noiseless,
"base_r_noiseless" : base_r_noiseless,
"r_test_noiseless" : r_test_noiseless,
"base_r_test_noiseless" : base_r_test_noiseless,
"expression" : expression,
"traversal" : str_p,
"t" : time.time() - start,
"seed" : seed
}
return result
def get_dataset(name, config_dataset):
"""Creates and returns the dataset"""
config_dataset["name"] = name
dataset = Dataset(**config_dataset)
return dataset
@click.command()
@click.argument('config_template', default="config.json")
@click.option('--method', default="dsr", type=click.Choice(["dsr", "gp"]), help="Symbolic regression method")
@click.option('--mc', default=1, type=int, help="Number of Monte Carlo trials for each benchmark")
@click.option('--output_filename', default=None, help="Filename to write results")
@click.option('--num_cores', default=multiprocessing.cpu_count(), help="Number of cores to use")
@click.option('--seed_shift', default=0, type=int, help="Integer to add to each seed (i.e. to combine multiple runs)")
@click.option('--benchmark', '--b', '--only', multiple=True, type=str, help="Benchmark or benchmark prefix to include")
def main(config_template, method, mc, output_filename, num_cores, seed_shift, benchmark):
"""Runs DSR or GP on multiple benchmarks using multiprocessing."""
# Load the config file
with open(config_template, encoding='utf-8') as f:
config = json.load(f)
config_dataset = config["dataset"] # Problem specification parameters
config_training = config["training"] # Training hyperparameters
if "controller" in config:
config_controller = config["controller"] # Controller hyperparameters
if "gp" in config:
config_gp = config["gp"] # GP hyperparameters
# Create output directories
if output_filename is None:
output_filename = "benchmark_{}.csv".format(method)
timestamp = datetime.now().strftime("%Y-%m-%d-%H%M%S")
config_training["logdir"] += "_" + timestamp
logdir = config_training["logdir"]
os.makedirs(logdir, exist_ok=True)
output_filename = os.path.join(logdir, output_filename)
# Load the benchmark names
data_path = resource_filename("dsr", "data/")
benchmark_path = os.path.join(data_path, config_dataset["file"])
df = pd.read_csv(benchmark_path, encoding="ISO-8859-1")
names = df["name"].to_list()
# Load raw dataset names
# HACK: Exclude "benchmark" names
for f in os.listdir(data_path):
if f.endswith(".csv") and "benchmarks" not in f and "function_sets" not in f:
names.append(f.split('.')[0])
# Load raw dataset from external directory in config
if "extra_data_dir" in config_dataset:
if not config_dataset["extra_data_dir"] == None:
for f in os.listdir(config_dataset["extra_data_dir"]):
if f.endswith(".csv"):
names.append(f.split('.')[0])
# Filter out expressions
expressions = [parse_expr(e) for e in df["sympy"]]
if len(benchmark) > 0:
keep = [False]*len(names)
for included_name in benchmark:
if '-' in included_name:
keep = [True if included_name == n else k for k,n in zip(keep, names)]
else:
keep = [True if n.startswith(included_name) else k for k,n in zip(keep, names)]
names = [n for k,n in zip(keep, names) if k]
unique_names = names.copy()
names *= mc
# When passed to RNGs, these seeds will actually be added to checksums on the name
seeds = (np.arange(mc) + seed_shift).repeat(len(unique_names)).tolist()
names_and_seeds = list(zip(names, seeds))
if num_cores > len(names):
print("Setting 'num_cores' to {} for batch because there are only {} expressions.".format(len(names), len(names)))
num_cores = len(names)
if method == "dsr":
if config_training["verbose"] and num_cores > 1:
print("Setting 'verbose' to False for parallelized run.")
config_training["verbose"] = False
if config_training["num_cores"] != 1 and num_cores > 1:
print("Setting 'num_cores' to 1 for training (i.e. constant optimization) to avoid nested child processes.")
config_training["num_cores"] = 1
print("Running {} for n={} on benchmarks {}".format(method, mc, unique_names))
# Write terminal command and config.json into log directory
cmd_filename = os.path.join(logdir, "cmd.out")
with open(cmd_filename, 'w') as f:
print(" ".join(sys.argv), file=f)
config_filename = os.path.join(logdir, "config.json")
with open(config_filename, 'w') as f:
json.dump(config, f, indent=4)
# Define the work
if method == "dsr":
work = partial(train_dsr, config_dataset=config_dataset, config_controller=config_controller, config_training=config_training)
elif method == "gp":
work = partial(train_gp, logdir=logdir, config_dataset=config_dataset, config_gp=config_gp)
# Farm out the work
columns = ["name", "nmse", "base_r", "r", "base_r_test", "r_test", "base_r_noiseless", "r_noiseless", "base_r_test_noiseless", "r_test_noiseless", "expression", "traversal", "t", "seed"]
pd.DataFrame(columns=columns).to_csv(output_filename, header=True, index=False)
if num_cores > 1:
pool = multiprocessing.Pool(num_cores)
for result in pool.imap_unordered(work, names_and_seeds):
| pd.DataFrame(result, columns=columns, index=[0]) | pandas.DataFrame |
import math
import itertools
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import scipy.stats as ss
import scikit_posthocs as sp
from dash_table.Format import Format, Scheme
from Bio import Phylo
from ete3 import Tree
from plotly.subplots import make_subplots
# -------------------------------------------------------------------------------------
# --------------------------------------- Classes -------------------------------------
class DrawTree():
def __init__(self, newicktree, template, topology, color_map, branch_len, font_family):
self.newicktree = Phylo.read(newicktree, "newick")
self.template = template
self.topology = topology
self.color_map = color_map
self.branch_len = branch_len
self.font_family = font_family
def create_square_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
if self.branch_len:
xcoords = tree.depths(unit_branch_lengths=True)
else:
xcoords = tree.depths()
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1.3):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
ycoords = dict(
(leaf, maxheight - i * dist)
for i, leaf in enumerate(reversed(tree.get_terminals()))
)
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
# This is intermediate placement of internal nodes
ycoords[clade] = (ycoords[clade.clades[0]] + ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="white",
line_width=2,
root_clade = False
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if root_clade:
branch_line.update(x0=-0.01, y0=y_curr, x1=-0.01, y1=y_curr)
return branch_line
elif orientation == "horizontal":
branch_line.update(x0=x_start, y0=y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="white",
line_width=2,
x_coords=0,
y_coords=0,
init_clade=False,
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
if init_clade:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=True,
)
else:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=False,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_coords[clade.clades[0]]
y_bot = y_coords[clade.clades[-1]]
line_shapes.append(
get_clade_lines(
orientation="vertical",
x_curr=x_curr,
y_bot=y_bot,
y_top=y_top,
line_color=line_color,
line_width=line_width,
)
)
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes,
x_coords=x_coords, y_coords=y_coords,
line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
tree = self.newicktree
tree.ladderize()
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
init_clade=True,
)
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
if not cl.name:
text.append(" ")
else:
text.append(cl.name)
else:
text.append(cl.name)
axis = dict(
showline=False,
visible=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title="", # y title
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='middle right',
textfont=dict(color=text_color, size=12),
showlegend=False,
name=elt,
)
nodes.append(node)
# Set graph x-range
if self.branch_len:
x_range = [-0.5, (max(x_coords.values())+2)]
show_xaxis = False
elif max(x_coords.values()) < 0.1:
x_range = [0, (max(x_coords.values())+(max(x_coords.values())*1.25))]
show_xaxis = True
elif max(x_coords.values()) < 0.5:
x_range = [0, 0.5]
show_xaxis = True
elif max(x_coords.values()) < 1:
x_range = [0, 1]
show_xaxis = True
elif max(x_coords.values()) == 1:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
else:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
layout = dict(
autosize=True,
showlegend=False,
template=self.template,
dragmode="pan",
margin=dict(t=20, b=10, r=20, l=10),
xaxis=dict(
showline=True,
zeroline=False,
visible=show_xaxis,
showgrid=False,
showticklabels=True,
range=x_range,
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
font=dict(family=self.font_family,size=14),
)
fig = go.Figure(data=nodes, layout=layout)
return fig
def create_angular_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
# xcoords = tree.depths(unit_branch_lengths=True)
# print("===========================")
# nodes = [n for n in tree.find_clades()]
# nodes = tree.get_terminals() + tree.get_nonterminals()
# print(tree.root.clades)
# root_xcoord = {tree.root.clades[1]:0}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_xcoords = dict((leaf, i) for i, leaf in enumerate(terminal_nodes))
internal_xcoords = dict(
(leaf, i+0.5) for leaf, i in zip(internal_nodes, range(1, len(internal_nodes)))
)
xcoords = {**terminal_xcoords, **internal_xcoords}
# print(xcoords)
# print("===========================")
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth
# is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
# root_ycoord = {tree.root:maxheight}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_ycoords = dict((leaf, 1) for _, leaf in enumerate(terminal_nodes))
internal_ycoords = dict(
(leaf, i) for leaf, i in zip(internal_nodes, reversed(range(1, len(internal_nodes))))
)
ycoords = {**terminal_ycoords, **internal_ycoords}
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
ycoords[clade] = (ycoords[clade.clades[0]] +
ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
last_y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="rgb(25,25,25)",
line_width=0.5,
init_flag=False,
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if orientation == "horizontal":
if init_flag:
branch_line.update(x0=x_start, y0=y_curr,
x1=x_curr, y1=y_curr)
else:
branch_line.update(
x0=x_start, y0=last_y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="rgb(15,15,15)",
line_width=1,
x_coords=0,
y_coords=0,
last_clade_y_coord=0,
init_flag=True
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
last_y_curr=last_clade_y_coord,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
init_flag=init_flag,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes, x_coords=x_coords,
y_coords=y_coords, last_clade_y_coord=y_coords[clade],
init_flag=False, line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
# Load in Tree object and ladderize
tree = self.newicktree
tree.ladderize()
# Get coordinates + put into dictionary
# dict(keys=clade_names, values=)
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
)
#
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
text.append(cl.confidence)
else:
text.append(cl.name)
axis = dict(
showline=False,
zeroline=False,
showgrid=False,
visible=False,
showticklabels=False,
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='right',
textfont=dict(color=text_color, size=25),
showlegend=False,
name=elt,
)
nodes.append(node)
layout = dict(
template=self.template,
dragmode="select",
autosize=True,
showlegend=True,
xaxis=dict(
showline=True,
zeroline=False,
visible=False,
showgrid=False,
showticklabels=True,
range=[0, (max(x_coords.values())+2)]
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
legend={"x": 0, "y": 1},
font=dict(family="Open Sans"),
)
fig = dict(data=nodes, layout=layout)
return fig
def create_circular_tree(self):
def get_circular_tree_data(tree, order='level', dist=1, start_angle=0, end_angle=360, start_leaf='first'):
"""Define data needed to get the Plotly plot of a circular tree
Source code found at: https://chart-studio.plotly.com/~empet/14834.embed
"""
# tree: an instance of Bio.Phylo.Newick.Tree or Bio.Phylo.PhyloXML.Phylogeny
# order: tree traversal method to associate polar coordinates to its nodes
# dist: the vertical distance between two consecutive leafs in the associated rectangular tree layout
# start_angle: angle in degrees representing the angle of the first leaf mapped to a circle
# end_angle: angle in degrees representing the angle of the last leaf
# the list of leafs mapped in anticlockwise direction onto circles can be tree.get_terminals()
# or its reversed version tree.get_terminals()[::-1].
# start leaf: is a keyword with two possible values"
# 'first': to map the leafs in the list tree.get_terminals() onto a circle,
# in the counter-clockwise direction
# 'last': to map the leafs in the list, tree.get_terminals()[::-1]
start_angle *= np.pi/180 # conversion to radians
end_angle *= np.pi/180
def get_radius(tree):
"""
Associates to each clade root its radius, equal to the distance from that clade to the tree root
returns dict {clade: node_radius}
"""
if self.branch_len:
node_radius = tree.depths(unit_branch_lengths=True)
else:
node_radius = tree.depths()
# If the tree did not record the branch lengths assign the unit branch length
# (ex: the case of a newick tree "(A, (B, C), (D, E))")
if not np.count_nonzero(node_radius.values()):
node_radius = tree.depths(unit_branch_lengths=True)
return node_radius
def get_vertical_position(tree):
"""
returns a dict {clade: ycoord}, where y-coord is the cartesian y-coordinate
of a clade root in a rectangular phylogram
"""
n_leafs = tree.count_terminals() # Counts the number of tree leafs.
# Assign y-coordinates to the tree leafs
if start_leaf == 'first':
node_ycoord = dict((leaf, k) for k, leaf in enumerate(tree.get_terminals()))
elif start_leaf == 'last':
node_ycoord = dict((leaf, k) for k, leaf in enumerate(reversed(tree.get_terminals())))
else:
raise ValueError("start leaf can be only 'first' or 'last'")
def assign_ycoord(clade):#compute the y-coord for the root of this clade
for subclade in clade:
if subclade not in node_ycoord: # if the subclade root hasn't a y-coord yet
assign_ycoord(subclade)
node_ycoord[clade] = 0.5 * (node_ycoord[clade.clades[0]] + node_ycoord[clade.clades[-1]])
if tree.root.clades:
assign_ycoord(tree.root)
return node_ycoord
node_radius = get_radius(tree)
node_ycoord = get_vertical_position(tree)
y_vals = node_ycoord.values()
ymin, ymax = min(y_vals), max(y_vals)
ymin -= dist # this dist subtraction is necessary to avoid coincidence of the first and last leaf angle
# when the interval [ymin, ymax] is mapped onto [0, 2pi],
def ycoord2theta(y):
# maps an y in the interval [ymin-dist, ymax] to the interval [radian(start_angle), radian(end_angle)]
return start_angle + (end_angle - start_angle) * (y-ymin) / float(ymax-ymin)
def get_points_on_lines(linetype='radial', x_left=0, x_right=0, y_right=0, y_bot=0, y_top=0):
"""
- define the points that generate a radial branch and the circular arcs, perpendicular to that branch
- a circular arc (angular linetype) is defined by 10 points on the segment of ends
(x_bot, y_bot), (x_top, y_top) in the rectangular layout,
mapped by the polar transformation into 10 points that are spline interpolated
- returns for each linetype the lists X, Y, containing the x-coords, resp y-coords of the
line representative points
"""
if linetype == 'radial':
theta = ycoord2theta(y_right)
X = [x_left*np.cos(theta), x_right*np.cos(theta), None]
Y = [x_left*np.sin(theta), x_right*np.sin(theta), None]
elif linetype == 'angular':
theta_b = ycoord2theta(y_bot)
theta_t = ycoord2theta(y_top)
t = np.linspace(0,1, 10)# 10 points that span the circular arc
theta = (1-t) * theta_b + t * theta_t
X = list(x_right * np.cos(theta)) + [None]
Y = list(x_right * np.sin(theta)) + [None]
else:
raise ValueError("linetype can be only 'radial' or 'angular'")
return X,Y
def get_line_lists(clade, x_left, xlines, ylines, xarc, yarc):
"""Recursively compute the lists of points that span the tree branches"""
# xlines, ylines - the lists of x-coords, resp y-coords of radial edge ends
# xarc, yarc - the lists of points generating arc segments for tree branches
x_right = node_radius[clade]
y_right = node_ycoord[clade]
X,Y = get_points_on_lines(linetype='radial', x_left=x_left, x_right=x_right, y_right=y_right)
xlines.extend(X)
ylines.extend(Y)
if clade.clades:
y_top = node_ycoord[clade.clades[0]]
y_bot = node_ycoord[clade.clades[-1]]
X,Y = get_points_on_lines(linetype='angular', x_right=x_right, y_bot=y_bot, y_top=y_top)
xarc.extend(X)
yarc.extend(Y)
# get and append the lists of points representing the branches of the descedants
for child in clade:
get_line_lists(child, x_right, xlines, ylines, xarc, yarc)
xlines = []
ylines = []
xarc = []
yarc = []
get_line_lists(tree.root, 0, xlines, ylines, xarc, yarc)
xnodes = []
ynodes = []
for clade in tree.find_clades(order='preorder'): #it was 'level'
theta = ycoord2theta(node_ycoord[clade])
xnodes.append(node_radius[clade]*np.cos(theta))
ynodes.append(node_radius[clade]*np.sin(theta))
return xnodes, ynodes, xlines, ylines, xarc, yarc
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
tree = self.newicktree
tree.ladderize()
traverse_order = 'preorder'
all_clades=list(tree.find_clades(order=traverse_order))
for k in range(len((all_clades))):
all_clades[k].id=k
xnodes, ynodes, xlines, ylines, xarc, yarc = get_circular_tree_data(tree, order=traverse_order, start_leaf='last')
tooltip=[]
clade_names=[]
color=[]
for clade in tree.find_clades(order=traverse_order):
if self.branch_len:
branch_length = 1
else:
branch_length = clade.branch_length
if clade.name and clade.confidence and clade.branch_length:
tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}\
<br>confidence: {int(clade.confidence)}")
color.append[clade.confidence.value]
clade_names.append(clade.name)
elif clade.name is None and clade.branch_length is not None and clade.confidence is not None:
color.append(clade.confidence)
clade_names.append(clade.name)
tooltip.append(f"branch-length: {branch_length}\
<br>confidence: {int(clade.confidence)}")
elif clade.name and clade.branch_length and clade.confidence is None:
tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}")
color.append(-1)
clade_names.append(clade.name)
else:
tooltip.append('')
color.append(-1)
clade_names.append(clade.name)
trace_nodes=dict(type='scatter',
x=xnodes,
y= ynodes,
mode='markers+text',
marker=dict(color=text_color, size=8),
text=clade_names,
textposition='top center',
textfont=dict(color=text_color, size=12),
hoverinfo='text',
hovertemplate=tooltip,
)
trace_radial_lines=dict(type='scatter',
x=xlines,
y=ylines,
mode='lines',
line=dict(color=line_color, width=1),
hoverinfo='none',
)
trace_arcs=dict(type='scatter',
x=xarc,
y=yarc,
mode='lines',
line=dict(color=line_color, width=1, shape='spline'),
hoverinfo='none',
)
layout=dict(
font=dict(family=self.font_family,size=14),
autosize=True,
showlegend=False,
template=self.template,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode='closest',
margin=dict(t=20, b=10, r=20, l=10, pad=20),
)
fig = go.Figure(data=[trace_radial_lines, trace_arcs, trace_nodes], layout=layout)
return fig
class RFDistance():
def __init__(self, t1, t2):
self.t1 = Tree(t1)
self.t2 = Tree(t2)
self.compare = self.t1.compare(self.t2)
def NormRF(self):
return self.compare['norm_rf']
def RF(self):
return self.compare['rf']
def MaxRF(self):
return self.compare['max_rf']
# -------------------------------------------------------------------------------------
# ------------------------------ Alt Data Graph Functions -----------------------------
def make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
whole_genome,
):
# sort dataframe
topology_df.sort_values(by=["Window"], inplace=True)
topology_df.fillna("NULL", inplace=True)
# Build graph
if whole_genome:
fig = px.histogram(
topology_df,
x="Window",
y=[1]*len(topology_df),
category_orders={"Chromosome": chromosome_df['Chromosome']},
color=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
nbins=int(chromosome_df["End"].max()/window_size),
facet_row="Chromosome",
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
height=100*len(topology_df["Chromosome"].unique())
)
else:
fig = px.histogram(
topology_df,
x="Window",
y=[1]*len(topology_df),
color=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
nbins=int(chromosome_df["End"].max()/window_size),
)
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
)
if dataRange:
fig.update_xaxes(
title="Position",
range=dataRange,
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
else:
fig.update_xaxes(
title="Position",
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
fig.update_yaxes(
title="y-axis",
range=[0, 1],
nticks=1,
showline=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
)
return fig
def make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
whole_genome,
):
# sort dataframe
topology_df = topology_df.sort_values(by=["Window"])
y_range = [0, (y_max*1.1)]
# Build graph
if whole_genome:
fig = px.line(
topology_df,
x="Window",
y=alt_data_to_graph,
category_orders={"Chromosome": chromosome_df['Chromosome']},
color_discrete_sequence=list(color_mapping.values()),
facet_row="Chromosome",
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
height=100*len(topology_df["Chromosome"].unique()),
)
else:
fig = px.line(
topology_df,
x="Window",
y=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
)
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
)
# Update X-axis
if dataRange:
fig.update_xaxes(
title="Position",
range=dataRange,
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
else:
fig.update_xaxes(
title="Position",
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
if y_max < 0.1:
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
range=y_range,
showgrid=yaxis_gridlines,
showline=True,
title="Edit me",
showexponent = 'all',
exponentformat = 'e',
)
else:
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
range=y_range,
showgrid=yaxis_gridlines,
showline=True,
title="Edit me",
)
return fig
# ----------------------------------------------------------------------------------------
# -------------------------- Single Chromosome Graph Functions ---------------------------
def build_histogram_with_rug_plot(
topology_df,
chromosome,
chromosome_df,
template,
current_topologies,
window_size,
color_mapping,
dataRange,
topoOrder,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# --- Set up topology data ---
# Extract current topology data
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)]
# Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend)
if len(wanted_rows['TopologyID'].unique()) < len(current_topologies):
missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()]
for mt in missing_topologies:
missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4)
missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0])
wanted_rows = pd.concat([wanted_rows, missing_row])
# Group data by topology ID
grouped_topology_df = wanted_rows.sort_values(['TopologyID'],ascending=False).groupby(by='TopologyID')
# Set row heights based on number of current_topologies being shown
if len(current_topologies) <= 6:
subplot_row_heights = [1, 1]
elif len(current_topologies) <= 8:
subplot_row_heights = [4, 2]
else:
subplot_row_heights = [8, 2]
# Build figure
# fig = make_subplots(rows=2, cols=1, row_heights=subplot_row_heights, vertical_spacing=0.05, shared_xaxes=True)
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.05, shared_xaxes=True)
for topology, data in grouped_topology_df:
fig.add_trace(
go.Scatter(
x=data['Window'],
y=data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_line_width=1,
marker_color=[color_mapping[topology]]*len(data),
),
# go.Box(
# x=data['Window'],
# y=data['TopologyID'],
# boxpoints='all',
# jitter=0,
# legendgroup=topology,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# name=topology,
# ),
row=1, col=1,
)
fig.add_trace(
go.Bar(
x=data['Window'],
y=[1]*len(data),
name=topology,
legendgroup=topology,
showlegend=False,
marker_color=color_mapping[topology],
marker_line_width=0,
),
row=2, col=1
)
# Update layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='constant'
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
row=1,
col=1
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
title='Position',
showgrid=xaxis_gridlines,
row=2,
col=1,
)
fig.update_yaxes(
rangemode="tozero",
categoryarray=topoOrder,
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
showticklabels=False,
fixedrange=True,
ticklen=0,
title="",
type='category',
row=1,
col=1,
)
fig.update_yaxes(
rangemode="tozero",
fixedrange=True,
linewidth=axis_line_width,
nticks=1,
showgrid=yaxis_gridlines,
showticklabels=False,
ticklen=0,
title="",
row=2,
col=1,
)
return fig
def build_rug_plot(
topology_df,
chromosome,
template,
current_topologies,
color_mapping,
dataRange,
topoOrder,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# --- Group wanted data ---
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)]
# Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend)
if len(wanted_rows['TopologyID'].unique()) < len(current_topologies):
missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()]
for mt in missing_topologies:
missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4)
missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0])
wanted_rows = pd.concat([wanted_rows, missing_row])
else:
pass
# --- Group data by topology ID
grouped_topology_df = wanted_rows.groupby(by='TopologyID')
# --- Build figure ---
fig = go.Figure()
for topology, data in grouped_topology_df:
fig.add_trace(go.Scatter(
x=data['Window'],
y=data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_size=int(100/len(grouped_topology_df)),
marker_line_width=1,
marker_color=[color_mapping[topology]]*len(data),
))
# Update figure layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
xaxis_title_text='Position',
margin=dict(
l=60,
r=60,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
showline=True,
)
fig.update_yaxes(
fixedrange=True,
title="",
showline=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
showticklabels=False,
type='category',
categoryarray=topoOrder,
)
fig.for_each_annotation(lambda a: a.update(text=""))
return fig
def build_tile_plot(
topology_df_filtered,
chromosome_df,
template,
current_topologies,
color_mapping,
dataRange,
window_size,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Extract current topology data
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"].isin(current_topologies)]
# fig = px.histogram(
# wanted_rows,
# x="Window",
# y=[1]*len(wanted_rows),
# color="TopologyID",
# color_discrete_map=color_mapping,
# nbins=int(chromosome_df["End"].max()/window_size)
# )
grouped_topology_df = wanted_rows.groupby(by='TopologyID')
# Build figure
fig = go.Figure()
for topology, data in grouped_topology_df:
fig.add_trace(
go.Scatter(
x=data['Window'],
y=[1]*len(data),
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_size=225,
# marker_line_width=2,
marker_color=[color_mapping[topology]]*len(data),
# showlegend = False
),
)
# Update layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
linewidth=axis_line_width,
rangemode="tozero",
range=dataRange,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
# range=[0, 1],
showline=False,
showgrid=yaxis_gridlines,
showticklabels=False,
ticklen=0,
title="",
)
return fig
def build_alt_data_graph(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Check input type and graph accordingly
try:
input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0])
except IndexError:
return no_data_graph(template)
if input_type == str:
alt_data_graph_data = make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
False,
)
else:
alt_data_graph_data = make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
False,
)
return alt_data_graph_data
def build_whole_genome_alt_data_graph(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Check input type and graph accordingly
try:
input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0])
except IndexError:
return no_data_graph(template)
if input_type == str:
alt_data_graph_data = make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
None,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
True,
)
else:
alt_data_graph_data = make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
None,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
True,
)
return alt_data_graph_data
def build_gff_figure(
data,
dataRange,
template,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
regionStart, regionEnd = dataRange
# Show gene names if showing less than 1Mb of data
# if abs(regionEnd - regionStart) <= 10000000:
if abs(regionEnd - regionStart) <= 10000000:
show_gene_names = True
else:
show_gene_names = False
# Separate
# group data by feature and gene name
attr_group = data.groupby(by=['feature', 'attribute', 'strand'])
positive_text_pos = "top center"
negative_text_pos = "top center"
features_graphed = list()
fig = go.Figure()
y_idx = 1
curr_feature = dict()
for fg, gene_data in attr_group:
feature, gene, strand = fg
feature_strand = f"{feature} ({strand})"
x_values = sorted(gene_data['start'].to_list() + gene_data['end'].to_list())
# Update y-axis value if new feature
if not curr_feature:
curr_feature[feature_strand] = y_idx
y_idx += 1
elif feature_strand in curr_feature.keys():
pass
else:
curr_feature[feature_strand] = y_idx
y_idx += 1
# Set legend show if feature in list already
if feature_strand in features_graphed:
show_legend = False
else:
show_legend = True
features_graphed.append(feature_strand)
# Set color, y-values, and arrow direction
if strand == '+':
colorValue = 'red'
y_values = [curr_feature[feature_strand]]*len(x_values)
markerSymbol = ['square']*(len(x_values)-1) + ['triangle-right']
text_pos = positive_text_pos
text_val = [gene] + ['']*(len(x_values)-1)
if positive_text_pos == "top center":
positive_text_pos = "bottom center"
elif positive_text_pos == "bottom center":
positive_text_pos = "top center"
else:
colorValue = '#009BFF'
y_values = [curr_feature[feature_strand]]*len(x_values)
markerSymbol = ['triangle-left'] + ['square']*(len(x_values)-1)
text_pos = negative_text_pos
text_val = ['']*(len(x_values)-1) + [gene]
if negative_text_pos == "top center":
negative_text_pos = "bottom center"
elif negative_text_pos == "bottom center":
negative_text_pos = "top center"
if show_gene_names:
fig.add_trace(go.Scatter(
x=x_values,
y=y_values,
name=feature_strand,
legendgroup=feature_strand,
mode='markers+lines+text',
marker_symbol=markerSymbol,
marker_size=8,
marker_color=colorValue,
text=text_val,
textposition=text_pos,
textfont=dict(
size=10,
),
hovertemplate=None,
showlegend=show_legend,
))
else:
fig.add_trace(go.Scatter(
x=x_values,
y=y_values,
name=feature_strand,
legendgroup=feature_strand,
mode='markers+lines',
marker_symbol=markerSymbol,
marker_size=8,
marker_color=colorValue,
# hoverinfo=['all'],
hovertemplate=None,
showlegend=show_legend,
))
fig.update_layout(
hovermode="x unified",
showlegend=True,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
template=template,
title='',
margin=dict(
l=62,
r=50,
b=20,
t=20,
),
height=150*len(features_graphed),
font=dict(family=font_family,),
)
fig.update_xaxes(
range=dataRange,
title='Position',
matches="x",
rangemode="tozero",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, len(features_graphed)+1],
fixedrange=True,
showticklabels=False,
showgrid=yaxis_gridlines,
title='',
linewidth=axis_line_width,
)
return fig
# ----------------------------------------------------------------------------------------
# ------------------------------- Quantile Graph Functions -------------------------------
def get_quantile_coordinates(
chromLengths,
QUANTILES,
WINDOWSIZE,
):
quantileCoordinates = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1))
for row in chromLengths.itertuples(index=False):
chrom, _, end = row
chunkSize = end // QUANTILES
for i in range(QUANTILES):
q = i + 1
if q == 1:
quantileCoordinates.at[q, chrom] = [0, chunkSize]
else:
quantileCoordinates.at[q, chrom] = [chunkSize*(q-1) + WINDOWSIZE, chunkSize*q]
return quantileCoordinates
def calculateFrequencies(
quantileCoordinates,
input_df,
chromLengths,
QUANTILES,
):
quantileFrequencies = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1))
topos = input_df["TopologyID"].unique()
for chrom in quantileCoordinates.columns:
for q, quantile in enumerate(quantileCoordinates[chrom], 1):
quantileData = input_df[(input_df['Window'] >= quantile[0]) & (input_df['Window'] <= quantile[1]) & (input_df['Chromosome'] == chrom)]
topoQD = quantileData['TopologyID'].value_counts().to_dict()
# Add missing topologies as count=0
for i in topos:
if i not in topoQD.keys():
topoQD[i] = 0
quantileFrequencies.at[q, chrom] = topoQD
continue
return quantileFrequencies
def plot_frequencies(
quantileFrequencies,
n_quantiles,
template,
color_mapping,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
):
def reorganizeDF(df):
new_df = pd.DataFrame(columns=['Chr', 'Quantile', 'TopologyID', 'Frequency'])
nidx = 0
for c in df.columns:
for idx in df.index:
chromTotal = sum([v for v in df.at[idx, c].values()])
for topo, freq in zip(df.at[idx, c].keys(), df.at[idx, c].values()):
new_df.at[nidx, 'TopologyID'] = topo
new_df.at[nidx, 'Chr'] = c
new_df.at[nidx, 'Quantile'] = idx
try:
new_df.at[nidx, 'Frequency'] = int(freq)/chromTotal
except ZeroDivisionError:
new_df.at[nidx, 'Frequency'] = 0.0
nidx += 1
return new_df
# Organize DataFrame
organizedDF= reorganizeDF(quantileFrequencies)
# Create line graph
fig = px.line(
organizedDF,
x='Quantile',
y='Frequency',
color='TopologyID',
facet_col='Chr',
facet_col_wrap=1,
facet_row_spacing=0.01,
color_discrete_map=color_mapping,
)
fig.update_traces(texttemplate='%{text:.3}', textposition='top center')
if len(organizedDF["Chr"].unique()) == 1:
fig.update_layout(
uniformtext_minsize=12,
template=template,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
height=300,
)
else:
fig.update_layout(
uniformtext_minsize=12,
template=template,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
height=100*len(organizedDF["Chr"].unique()),
)
fig.update_xaxes(
range=[1, n_quantiles],
rangemode="tozero",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, 1],
fixedrange=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return fig
def calculate_topo_quantile_frequencies(df, current_topologies, additional_data, n_quantiles):
final_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"])
for topology in current_topologies:
topo_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"])
tidx = 0
df = df.sort_values(by=additional_data)
df = df.assign(Quantile = pd.qcut(df[additional_data].rank(method='first'), q=n_quantiles, labels=False))
df['Quantile'] = df['Quantile'].apply(lambda x: x+1)
df_group = df.groupby(by="Quantile")
for rank, data in df_group:
counts = data["TopologyID"].value_counts()
for t, f in zip(counts.index, counts):
if t == topology:
topo_df.at[tidx, "TopologyID"] = t
topo_df.at[tidx, "Frequency"] = f/len(df)
topo_df.at[tidx, "Quantile"] = rank
tidx += 1
break
else:
continue
# -- Concat dfs --
final_df = pd.concat([final_df, topo_df])
return final_df
def plot_frequencies_topo_quantile(
final_df,
template,
color_mapping,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
graph_title,
additional_data
):
fig = px.line(
final_df,
x="Quantile", y="Frequency",
color="TopologyID",
color_discrete_map=color_mapping,
markers=True,
)
fig.update_layout(
template=template,
title=graph_title,
title_x=0.5,
margin=dict(
t=80
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
# itemsizing='constant'
),
)
fig.update_xaxes(
title=f"{additional_data} Quantiles",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
tick0=0,
dtick=1,
)
fig.update_yaxes(
rangemode="tozero",
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
title='% Windows Observed',
)
return fig
# ---------------------------------------------------------------------------------
# -------------------------------- Whole Genome Graph Functions -------------------------------
def build_topology_frequency_pie_chart(
df,
template,
color_mapping,
font_family,
):
"""Returns pie graph for whole genome topology frequencies"""
fig = px.pie(
df,
values='Frequency',
names='TopologyID',
color="TopologyID",
color_discrete_map=color_mapping,
template=template,
title='Whole Genome Topology Frequencies',
)
fig.update_traces(textposition='inside')
fig.update_layout(
margin=dict(l=120, r=20, t=40, b=10),
uniformtext_minsize=12,
uniformtext_mode='hide',
legend=dict(itemclick=False, itemdoubleclick=False),
title_x=0.5,
font=dict(family=font_family,),
)
return fig
def build_rf_graph(
df,
ref_topo,
template,
color_mapping,
axis_line_width,
font_family,
):
fig = px.bar(
df, x="TopologyID", y="normRF-Distance",
color="TopologyID", color_discrete_map=color_mapping,
text='normRF-Distance')
fig.update_traces(texttemplate='%{text:.2f}', textposition='inside')
fig.update_layout(
title=f"Normalized RF-Distance from {ref_topo}",
title_x=0.5,
template=template,
font=dict(family=font_family,),
)
fig.update_xaxes(linewidth=axis_line_width)
fig.update_yaxes(linewidth=axis_line_width, range=[0, 1])
return fig
def build_whole_genome_rug_plot(
df,
chrom_df,
chromGroup,
template,
color_mapping,
currTopologies,
topoOrder,
window_size,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
wg_squish_expand,
font_family,
):
df = df[(df['TopologyID'].isin(currTopologies)) & (df['Chromosome'].isin(chromGroup))]
grouped_topology_df = df.groupby(by='TopologyID')
num_chroms = len(df['Chromosome'].unique())
chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))}
chrom_shapes = []
row_height = [2]*num_chroms
# --- Build figure ---
# If chromosome name longer than 5 characters, use subplot titles
# instead of row ittles
if df.Chromosome.map(len).max() > 5:
fig = make_subplots(
rows=num_chroms,
subplot_titles=chrom_row_dict.keys(),
shared_xaxes=True,
cols=1,
row_heights=row_height,
)
else:
fig = make_subplots(
rows=num_chroms,
row_titles=[c for c in chrom_row_dict.keys()],
shared_xaxes=True,
cols=1,
row_heights=row_height,
)
for topology, data in grouped_topology_df:
add_legend = True
for chrom in chrom_row_dict.keys():
chrom_data = data[data["Chromosome"] == chrom]
chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom]
chrom_length = chrom_length_data['End'].max()
if len(chrom_data) == 0:
fig.add_trace(
go.Scatter(
x=[0],
y=[topology],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
showlegend = False,
),
row=chrom_row_dict[chrom], col=1,
)
elif add_legend:
fig.add_trace(
go.Scatter(
x=chrom_data['Window'],
y=chrom_data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
# marker_size=int(25/len(grouped_topology_df)),
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
),
# go.Box(
# x=chrom_data['Window'],
# y=chrom_data['TopologyID'],
# boxpoints='all',
# jitter=0,
# legendgroup=topology,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# name=topology,
# ),
row=chrom_row_dict[chrom], col=1,
)
chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2))
add_legend = False
else:
fig.add_trace(
go.Scatter(
x=chrom_data['Window'],
y=chrom_data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
# marker_size=int(25/len(grouped_topology_df)),
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
showlegend = False,
),
# go.Box(
# x=chrom_data['Window'],
# y=chrom_data['TopologyID'],
# boxpoints='all',
# jitter=0,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# legendgroup=topology,
# showlegend = False,
# name=topology,
# ),
row=chrom_row_dict[chrom], col=1,
)
chrom_ref = chrom_row_dict[chrom]
chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2))
# Update layout + axes
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(
rangemode="tozero",
range=[0, (chrom_df['End'].max()+(2*window_size))],
fixedrange=True,
linewidth=axis_line_width,
ticklen=0,
matches="x",
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
fixedrange=True,
title="",
showgrid=yaxis_gridlines,
showticklabels=False,
linewidth=axis_line_width,
categoryarray=topoOrder,
)
if wg_squish_expand == 'expand':
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=160*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=100*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
elif wg_squish_expand == 'squish':
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=125*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=50*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=105*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=20*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
margin=dict(
t=10,
b=30,
),
font=dict(family=font_family,),
)
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
annotation['textangle']=0
annotation['align']="center"
return fig
def build_whole_genome_tile_plot(
df,
chrom_df,
template,
color_mapping,
currTopologies,
topoOrder,
window_size,
axis_line_width,
chromGroup,
xaxis_gridlines,
yaxis_gridlines,
wg_squish_expand,
font_family,
):
"""
Max chromosomes per graph if # current_topologies <= 3: 20
Max chromosomes per graph if # current_topologies > 3: 20/2
Returns: List of figures to display
"""
df = df[df['TopologyID'].isin(currTopologies)]
df = df[df['Chromosome'].isin(chromGroup)]
grouped_topology_df = df.groupby(by='TopologyID')
num_chroms = len(df['Chromosome'].unique())
chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))}
chrom_shapes = []
# --- Build figure ---
# If longest chromosome name longer
# than 5 characters, use subplot titles
# instead of row titles
if df.Chromosome.map(len).max() > 5:
fig = make_subplots(
rows=num_chroms,
cols=1,
shared_xaxes=True,
subplot_titles=chrom_row_dict.keys(),
vertical_spacing=0.03,
)
else:
fig = make_subplots(
rows=num_chroms,
cols=1,
shared_xaxes=True,
row_titles=[c for c in chrom_row_dict.keys()],
vertical_spacing=0.001,
)
for topology, data in grouped_topology_df:
add_legend = True
for chrom in chrom_row_dict.keys():
chrom_data = data[data["Chromosome"] == chrom]
chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom]
chrom_length = chrom_length_data['End'].max()
if add_legend:
fig.add_trace(
go.Histogram(
x=chrom_data['Window'],
y=[1]*len(chrom_data),
nbinsx=int(chrom_length/window_size),
name=topology,
legendgroup=topology,
marker_line_width=0,
marker_color=color_mapping[topology],
),
row=chrom_row_dict[chrom], col=1,
)
chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2))
add_legend = False
else:
fig.add_trace(
go.Histogram(
x=chrom_data['Window'],
y=[1]*len(chrom_data),
nbinsx=int(chrom_length/window_size),
name=topology,
legendgroup=topology,
marker_line_width=0,
marker_color=color_mapping[topology],
showlegend = False
),
row=chrom_row_dict[chrom], col=1,
)
chrom_ref = chrom_row_dict[chrom]
chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2))
# Update layout + axes
if wg_squish_expand == 'expand':
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=130*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=100*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
elif wg_squish_expand == 'squish':
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=80*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=50*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=55*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=20*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
fig.update_xaxes(
linewidth=axis_line_width,
fixedrange=True,
rangemode="tozero",
range=[0, chrom_df['End'].max()],
ticklen=0,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
# categoryarray=topoOrder,
range=[0, 1],
fixedrange=True,
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
showticklabels=False,
title="",
ticklen=0,
)
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
annotation['textangle']=0
annotation['align']="center"
return fig
def build_whole_genome_bar_plot(
df,
template,
color_mapping,
currTopologies,
axis_line_width,
chromGroup,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Filter df to chromosomes in group
df = df[df['Chromosome'].isin(chromGroup)]
df = df[df['TopologyID'].isin(currTopologies)]
number_of_chrom_rows = len(df["Chromosome"].unique()) // 3
fig = px.bar(
df,
x='TopologyID',
y='Frequency',
facet_col='Chromosome',
facet_col_wrap=3,
facet_row_spacing=0.05,
color='TopologyID',
template=template,
color_discrete_map=color_mapping,
text='Frequency',
height=int(500*number_of_chrom_rows),
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_traces(texttemplate='%{text:.2}', textposition='outside')
# Remove y-axis labels
for axis in fig.layout:
if type(fig.layout[axis]) == go.layout.YAxis:
fig.layout[axis].title.text = ''
fig.update_layout(
uniformtext_minsize=12,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
margin=dict(l=10, r=10, t=10, b=10),
title="",
annotations = list(fig.layout.annotations) +
[go.layout.Annotation(
x=-0.07,
y=0.5,
font=dict(
size=12,
# color='white',
),
showarrow=False,
text="Frequency",
textangle=-90,
xref="paper",
yref="paper"
)
],
title_x=0.5,
font=dict(family=font_family,),
)
fig.update_xaxes(
title="",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, 1.1],
matches='y',
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
)
return fig
def build_whole_genome_pie_charts(
df,
template,
color_mapping,
chromGroup,
font_family,
):
# Filter df to chromosomes in group
df = df[df['Chromosome'].isin(chromGroup)]
number_of_chrom_rows = (len(df["Chromosome"].unique()) // 3)+(math.ceil(len(df["Chromosome"].unique()) % 3))
specs = [[{'type':'domain'}, {'type':'domain'}, {'type':'domain'}] for _ in range(number_of_chrom_rows)]
fig = make_subplots(
rows=number_of_chrom_rows,
cols=3,
specs=specs,
vertical_spacing=0.03,
horizontal_spacing=0.001,
subplot_titles=sorted(df["Chromosome"].unique()),
column_widths=[2]*3,
)
col_pos = 1
row_num = 1
for c in sorted(df['Chromosome'].unique()):
chrom_df = df[df["Chromosome"] == c]
fig.add_trace(go.Pie(labels=chrom_df["TopologyID"], values=chrom_df['Frequency'], marker_colors=list(color_mapping.values())), row=row_num, col=col_pos)
if col_pos == 3:
col_pos = 1
row_num += 1
else:
col_pos += 1
fig.update_traces(textposition='inside')
fig.update_layout(
uniformtext_minsize=12,
showlegend=True,
template=template,
height=int(200*number_of_chrom_rows),
font=dict(family=font_family,),
)
return fig
# ---------------------------------------------------------------------------------
# --------------------------- Stats DataFrame Generators --------------------------
def _get_valid_cols(topology_df):
valid_cols = list()
for i in topology_df.columns[4:]:
data = topology_df[i].unique()
flag = None
for j in data:
if type(j) == str:
flag = False
break
else:
flag = True
if flag:
valid_cols.append(i)
else:
continue
return valid_cols
def basic_stats_dfs(topology_df):
"""Generate dataframes of basic statistics
:param topology_df: Current View Tree Viewer input file dataframe
:type topology_df: Object
"""
# Calculate current view topologies
topo_freq_df = pd.DataFrame(topology_df["TopologyID"].value_counts()/len(topology_df))
if len(topo_freq_df) > 25: # If more than 25 topologies loaded, just show top 25
topo_freq_df = topo_freq_df.head(25)
remainder_freq = 1.0 - sum(topo_freq_df['TopologyID'])
topo_freq_df.at["Other", "TopologyID"] = remainder_freq
topo_names = [i for i in topo_freq_df.index]
topo_freqs = [round(i, 4) for i in topo_freq_df["TopologyID"]]
# Calculate median + average of additional data
if len(topology_df.columns) > 4:
valid_cols = _get_valid_cols(topology_df)
additional_dt_names = [i for i in valid_cols]
additional_dt_avg = [topology_df[i].mean() for i in valid_cols]
additional_dt_std = [topology_df[i].std() for i in valid_cols]
topo_freq_df = pd.DataFrame(
{
"TopologyID": topo_names,
"Frequency": topo_freqs,
}
)
additional_data_df = pd.DataFrame(
{
"Additional Data": additional_dt_names,
"Average": additional_dt_avg,
"Std Dev": additional_dt_std,
}
)
return topo_freq_df, additional_data_df
else: # No additional data types present in file
topo_freq_df = pd.DataFrame(
{
"TopologyID": topo_names,
"Frequency": topo_freqs,
}
)
return topo_freq_df, pd.DataFrame()
def current_view_topo_freq_chart(basic_stats_topo_freqs, template, color_mapping):
"""Return pie chart figure object for local topology frequencies
:param basic_stats_topo_freqs: Dataframe of topology frequencies
:type basic_stats_topo_freqs: DataFrame
:return: Plotly express pie chart
:rtype: Figure object
"""
if "Other" in basic_stats_topo_freqs["TopologyID"].to_list():
fig = px.bar(
basic_stats_topo_freqs,
x='TopologyID',
y="Frequency",
color="TopologyID",
color_discrete_map=color_mapping,
text="Frequency",
)
fig.update_layout(
template=template,
uniformtext_minsize=12,
uniformtext_mode='hide',
)
fig.update_traces(textposition='outside')
return fig
else:
fig = px.pie(
basic_stats_topo_freqs,
values="Frequency",
names="TopologyID",
color="TopologyID",
color_discrete_map=color_mapping,
template=template,
title="Current View Topology Frequencies",
)
fig.update_layout(
legend=dict(itemclick=False, itemdoubleclick=False),
margin=dict(l=120, r=20, t=40, b=10),
uniformtext_minsize=12,
uniformtext_mode='hide',
title_x=0.5,
)
fig.update_traces(textposition='inside')
return fig
def whole_genome_datatable(tv_df):
valid_cols = _get_valid_cols(tv_df[4:])
for i in tv_df.columns.to_list()[4:]:
if i in valid_cols:
continue
else:
tv_df.drop(labels=i, axis=1, inplace=True)
df_group = tv_df.groupby(by="TopologyID")
out_df = pd.DataFrame(columns=["TopologyID", "Additional Data", "Num. Windows", "Average", "Std Dev"])
idx = 0
for topology, data in df_group:
additional_datatypes = [i for i in data.columns[4:]]
for datatype in additional_datatypes:
dt_data = data[datatype]
mean = dt_data.mean()
stdev = dt_data.std()
out_df.at[idx, "TopologyID"] = topology
out_df.at[idx, "Additional Data"] = datatype
out_df.at[idx, "Num. Windows"] = len(dt_data)
out_df.at[idx, "Average"] = mean
out_df.at[idx, "Std Dev"] = stdev
idx += 1
continue
columns = [{'id': c, 'name': ["Per-Topology Whole Genome Comparison", c], 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal)} for c in out_df.columns]
data = out_df.to_dict('records')
return data, columns
# --- post-hoc tests ---
def mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment):
return sp.posthoc_mannwhitney(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment)
def dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment):
return sp.posthoc_dunn(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment)
def tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha):
return sp.posthoc_tukey_hsd(tv_df[additional_data_type], tv_df["TopologyID"], alpha=alpha)
# --- Significance tests ---
def kruskal_wallis_H_test(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha):
"""Return dataframe with Kruskal-Wallis H test information for each topology
"""
d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()]
H, p = ss.kruskal(*d, nan_policy='omit')
if posthoc_type == "Mann-Whitney rank test":
posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "Dunn's test":
posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "TukeyHSD":
posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
else:
pass
return posthoc, data, columns, H, p
def one_way_anova(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha):
d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()]
F, p = ss.f_oneway(*d)
if posthoc_type == "Mann-Whitney rank test":
posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "Dunn's test":
posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "TukeyHSD":
posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
else:
pass
return posthoc, data, columns, F, p
def stats_test_heatmap(posthoc, template):
fig = go.Figure(data=go.Heatmap(
z=posthoc.values,
x=posthoc.columns,
y=posthoc.index,
zmin=0,
zmax=1,
colorscale='Viridis',
colorbar=dict(title='p-value'),
hovertemplate = 'p-value: %{z}<extra></extra>',
))
fig.update_layout(
template=template,
coloraxis_colorbar=dict(title="log(p-value)"),
margin=dict(
t=60,
),
)
return fig
def frequency_distribution(data, name, template):
"""Return frequency density distribution"""
fig = px.histogram(data, x=name, histnorm='density')
fig.update_layout(template=template, margin=dict(t=20, pad=30))
return fig
def mean_frequency_of_alt_data_per_topology(tv_df, topologies, additional_data_type):
out_df = pd.DataFrame(columns=["TopologyID", "Total Windows", f"Mean ({additional_data_type})"])
idx = 1
for i in topologies:
topo_df = tv_df[tv_df["TopologyID"] == i]
additional_data_mean = topo_df[f"{additional_data_type}"].mean()
out_df.at[idx, "TopologyID"] = i
out_df.at[idx, "Total Windows"] = len(topo_df)
out_df.at[idx, f"Mean ({additional_data_type})"] = additional_data_mean
idx += 1
continue
return out_df.to_dict('records')
# ---------------------------------------------------------------------------------
# ------------------------- Graph Customization Functions -------------------------
def set_topology_colors(data, color):
df = | pd.read_json(data) | pandas.read_json |
import pandas as pd
import plotly
from path import Path
from jinja2 import Environment, FileSystemLoader # html template engine
from flask import url_for
import visualize as bv
def generate_voc_html(feature: str, values: list, results: dict, template_name: str='voc.html'):
# express plots in html and JS
mutation_diversity = ''
# config = dict({'displaylogo': False})
config = {'displaylogo': False,
'scrollZoom': False,
'modeBarButtonsToAdd':['drawline',
'drawopenpath',
'drawrect',
'eraseshape'
],
'modeBarButtonsToRemove': ['toggleSpikelines','hoverCompareCartesian','lasso2d']}
# config = {'displayModeBar': False}
if results.get('mutation_diversity', None):
mutation_diversity = plotly.offline.plot(results['mutation_diversity'], include_plotlyjs=False, output_type='div', config=config)
sampling_img = plotly.offline.plot(results['sampling_fig'], include_plotlyjs=False, output_type='div', config=config)
world_time = plotly.offline.plot(results['world_time'], include_plotlyjs=False, output_type='div', config=config)
us_time = plotly.offline.plot(results['us_time'], include_plotlyjs=False, output_type='div', config=config)
ca_time = plotly.offline.plot(results['ca_time'], include_plotlyjs=False, output_type='div', config=config)
world_rtime = plotly.offline.plot(results['world_rtime'], include_plotlyjs=False, output_type='div', config=config)
us_rtime = plotly.offline.plot(results['us_rtime'], include_plotlyjs=False, output_type='div', config=config)
ca_rtime = plotly.offline.plot(results['ca_rtime'], include_plotlyjs=False, output_type='div', config=config)
world_map = plotly.offline.plot(results['world_map'],
include_plotlyjs=False, output_type='div', config=config)
state_map = plotly.offline.plot(results['state_map'], include_plotlyjs=False, output_type='div', config=config)
county_map = plotly.offline.plot(results['county_map'], include_plotlyjs=False, output_type='div', config=config)
# genetic_distance_plot = plotly.offline.plot(results['genetic_distance_plot'], include_plotlyjs=False, output_type='div')
strain_distance_plot = plotly.offline.plot(results['strain_distance_plot'], include_plotlyjs=False, output_type='div', config=config)
# aa_distance_plot = plotly.offline.plot(results['aa_distance_plot'], include_plotlyjs=False, output_type='div')
# s_aa_distance_plot = plotly.offline.plot(results['s_aa_distance_plot'], include_plotlyjs=False, output_type='div')
# generate output messages
#TODO: expt_name, first_detected
date = results['date']
strain = results['strain']
total_num = results['total_num']
num_countries = results['num_countries']
us_num = results['us_num']
num_states = results['num_states']
ca_num = results['ca_num']
num_lineages = results.get('num_lineages', '')
mutations = results.get('mutations', '')
# dir containing our template
file_loader = FileSystemLoader('templates')
# load the environment
env = Environment(loader=file_loader)
# load the template
template = env.get_template(template_name)
# render data in our template format
html_output = template.render(feature=feature, values=values,
total_num=total_num, num_countries=num_countries,
us_num=us_num, num_states=num_states, ca_num=ca_num,
num_lineages=num_lineages, strain=strain,
mutations=mutations,
date=date, world_time=world_time, us_time=us_time,
ca_time=ca_time, world_rtime=world_rtime,
ca_rtime=ca_rtime, us_rtime=us_rtime,
world_map=world_map,
state_map=state_map, county_map=county_map,
# genetic_distance_plot=genetic_distance_plot,
strain_distance_plot=strain_distance_plot,
# aa_distance_plot=aa_distance_plot,
# s_aa_distance_plot=s_aa_distance_plot,
first_detected=results['first_detected'],
sampling_img=sampling_img,
mutation_diversity=mutation_diversity)
print(f"Results for {values} embedded in HTML report")
return html_output
def generate_voc_data(feature, values, input_params):
results = pd.DataFrame()
res = pd.DataFrame()
if feature == 'mutation':
print(f"Loading variant data...")
gisaid_data = pd.read_csv(input_params['gisaid_data_fp'], compression='gzip')
if len(values) > 1:
res = (gisaid_data.groupby(['date', 'country', 'division',
'purpose_of_sequencing',
'location', 'pangolin_lineage', 'strain'])
.agg(mutations=('mutation', 'unique')).reset_index())
res['is_vui'] = res['mutations'].apply(bv.is_vui, args=(set(values),))
else:
print(f"Loading metadata...")
gisaid_data = | pd.read_csv(input_params['gisaid_meta_fp'], sep='\t', compression='gzip') | pandas.read_csv |
import pandas as pd
import os, requests, logging
import sys
# from bs4 import BeautifulSoup as bs
from .utils import *
class EdgarBase(object):
def __init__(self, dir_edgar=None):
# self.dir_edgar =
# self.__dir_download = None
# self.__dir_data = None
self.__dir_output = None
self.ulr_sec = 'https://www.sec.gov/Archives/'
self.__dir_config = None
self.dir_curr = os.path.abspath(os.path.dirname(__file__))
self.dir_config = os.path.join(self.dir_curr, 'config')
self.today = pd.datetime.today()
self.__fact_mapping = None
self.__dir_edgar = dir_edgar
self.__cache_file = {}
@property
def dir_edgar(self):
if self.__dir_edgar is None:
logger.error('please set output data directory ')
if 'DIR_EDGAR' not in os.environ:
logger.error('please set environment variable DIR_EDGAR')
logger.error("os.environ['DIR_EDGAR']=/path/to/dir'")
import tempfile
self.__dir_edgar = tempfile.gettempdir()
else:
self.__dir_edgar = os.environ['DIR_EDGAR']
return self.__dir_edgar
def set_dir_edgar(self, dir_edgar):
if not os.path.exists(dir_edgar):
os.makedirs(dir_edgar)
self.__dir_edgar = dir_edgar
return self
@property
def _dir_download(self):
# dir_download = os.path.join(self.dir_edgar, 'download')
# if not os.path.isdir(dir_download):
# os.makedirs(dir_download)
return self.dir_edgar
def set_dir_config(self, dir_input):
logger.info('setting dir_config={f}'.format(f=dir_input))
self.dir_curr = dir_input
@property
def fact_mapping(self):
if self.__fact_mapping is None:
path_fact_mapping = os.path.join(self.dir_config, 'fact_mapping.csv')
logger.info('reading fact_mapping from {f}'.format(f=path_fact_mapping))
fm = pd.read_csv(path_fact_mapping).set_index('item')
self.__fact_mapping = fm
else:
fm = self.__fact_mapping
return fm
def get_cik(self, ticker):
return ticker2cik(ticker)
def get_filing_path(self, ticker, filing_type=None, start_date=None, end_date=None):
"""
:param ticker:
:param filing_type: '10-K', '10-Q', etc...
:param start_date: str or datetime
:param end_date: str or datetime
:return: data frame columns=ticker|cik|filing_type|date|filepath
"""
pass
def parse_filing(self, filepath, section):
pass
def reindex_master(self, start_date=None, end_date=None):
pass
class EdgarDownloader(EdgarBase):
def __init__(self, dir_edgar):
super(EdgarDownloader, self).__init__(dir_edgar)
self.__conn_master_db = None
self.valid_form_type = ['10-Q', '10-K', '8-K']
def __exit__(self):
self._close_master_db()
@property
def _dir_master(self):
dir_master = os.path.join(self.dir_edgar, 'master')
if not os.path.isdir(dir_master):
os.makedirs(dir_master)
return dir_master
@property
def conn_master_db(self):
file_master_db = os.path.join(self.dir_edgar, 'master_idx.db')
if self.__conn_master_db is None:
import sqlite3
if not os.path.exists(file_master_db):
conn = sqlite3.connect(file_master_db)
pd.DataFrame().to_sql('master_idx', conn)
else:
conn = sqlite3.connect(file_master_db)
self.__conn_master_db = conn
return self.__conn_master_db
def _close_master_db(self):
conn = self.__conn_master_db
conn.close()
self.__conn_master_db = None
def load_master_db(self, start_date, end_date=None, force_reload=False):
#start_date = pd.to_datetime(str(start_date))
#end_date = pd.datetime.today() if end_date is None else pd.to_datetime(str(end_date))
list_yyyyqq = self._yyyyqq_between(start_date, end_date)
"edgar/full-index/{yyyy}/QTR{q}/master.idx"
list_file_master = ["edgar/full-index/{y}/QTR{q}/master.idx".format(y=yq.split('Q')[0], q=yq.split('Q')[1])
for yq in list_yyyyqq]
#list_file_download = [f for f in list_file_master if not os.path.exists(f) or force_reload]
list_file_downloaded = download_list(list_file_master, self.dir_edgar, force_download=force_reload)
self._update_master_db(list_file_downloaded)
def _update_master_db(self, list_files):
conn = self.conn_master_db
col_names = ['cik', 'company_name', 'form_type', 'date_filed', 'filename']
dfs = dd.read_csv(list_files, sep='|', skiprows=11, header=None)
dfs.columns = col_names
df_load = dfs[dfs['form_type'].isin(self.valid_form_type)].compute()
sql_all = 'select * from master_idx'
df_all = pd.read_sql_query(sql_all, conn)
logger.info('read master_idx db, n={s}'.format(s=df_all.shape[0]))
df_all = pd.concat([df_all, df_load], sort=False).drop_duplicates()
df_all.to_sql('master_idx', conn, if_exists='replace', index=False)
logger.info('write master_idx db, n={s}'.format(s=df_all.shape[0]))
return 0
# def _refresh_master_idx(self, yyyy, q, force=False):
# # yyyy, q = self._year_quarter(date)
# file_master = os.path.join(self._dir_master, "{y}_QTR{q}_master.csv".format(y=yyyy, q=q))
# if not os.path.exists(file_master) or force:
# url_master = self._url_master_idx(yyyy, q)
# logger.info('downloading {f}'.format(f=url_master))
# resp = req.get(url_master)
# if resp.status_code != 200:
# logger.error('error downloading {f}'.format(f=url_master))
# else:
# write_data = '\n'.join(resp.content.decode('latin1').split('\n')[11:])
# logger.info('saving {f}'.format(f=file_master))
# with open(file_master, 'w+', encoding='utf-8') as f:
# f.write("cik|company|form_type|file_date|file_name\n")
# f.write(write_data)
# self._update_master_db([file_master])
# else:
# logger.info('use existing file. {f}'.format(f=file_master))
# return file_master
def filings_between(self, symbol, start_date, end_date=None, form_type='10-K', download=True):
#list_year_quarter = self._yyyyqq_between(start_date, end_date)
#list_master_file = [self._refresh_master_idx(t.split('Q')) for t in list_year_quarter]
# dfs = dd.read_csv(list_master_file, sep='|')
cik = int(ticker2cik(symbol))
# df_res = dfs[(dfs.cik == cik) & (dfs.form_type == form_type)].compute()
sql_filings = "select * from master_idx where cik=={cik} and form_type=='{f}' " \
"and date_filed>='{t0}' ".format(cik=cik, f=form_type, t0=pd.to_datetime(start_date).date())
if end_date:
sql_filings += "and file_date<'{t1}'".format(t1=pd.to_datetime(end_date).date())
df_res = pd.read_sql_query(sql_filings, self.conn_master_db)
list_filename = df_res['filename'].tolist()
if download:
list_filename = download_list(list_filename, self._dir_download, force_download=True)
return list_filename
# @staticmethod
# def _url_master_idx(yyyy, q):
# url = "https://www.sec.gov/Archives/edgar/full-index/{yyyy}/QTR{q}/master.idx".format(yyyy=yyyy, q=q)
# return url
# @staticmethod
# def _year_quarter(date=pd.datetime.today()):
# t = pd.to_datetime(date).date()
# return t.year, (t.month - 1) // 3 + 1
@staticmethod
def _yyyyqq(date):
yq = pd.Period(pd.to_datetime(str(date)), freq='Q')
return str(yq)
def _yyyyqq_between(self, start_date, end_date=None):
end_date = pd.datetime.today() if end_date is None else pd.to_datetime(end_date)
end_date += pd.tseries.offsets.QuarterEnd()
start_date = pd.to_datetime(str(start_date))
logger.info('using quarters between {t0} to {t1}'.format(t0=start_date, t1=end_date))
list_year_quarter = list(set(self._yyyyqq(t) for t in | pd.date_range(start_date, end_date, freq='M') | pandas.date_range |
from flask import Flask, render_template
import pandas as pd
from pandas.tseries.offsets import DateOffset
import requests
import numpy as np
import tensorflow.keras.models as tf
import pickle
app = Flask(__name__)
def weekly_cases(select, column_name):
cases = [0, 0, 0, 0, 0, 0]
for x in range(6, len(select)):
weekly_avg = (select.loc[x, column_name] +
select.loc[x-1, column_name] +
select.loc[x-2, column_name] +
select.loc[x-3, column_name] +
select.loc[x-4, column_name] +
select.loc[x-5, column_name] +
select.loc[x-6, column_name])
cases.append(weekly_avg)
return cases
def weekly_ratio(select, column_name):
ratio = [0.0]*13
for x in range(13, len(select)):
if select.loc[x-7, column_name] == 0:
ratio.append(ratio[-1])
else:
avg_ratio = (select.loc[x, column_name])/select.loc[x-7, column_name]
ratio.append(avg_ratio)
return ratio
def data_extract():
api_path = 'https://covidsitrep.moh.gov.sg/_dash-layout'
moh = requests.get(api_path).json()
date = moh['props']['children'][1]['props']['children'][2]['props']['children'][0]['props']['figure']['data'][1]['x']
comm_cases = moh['props']['children'][1]['props']['children'][2]['props']['children'][0]['props']['figure']['data'][1]['y']
dorm_cases = moh['props']['children'][1]['props']['children'][2]['props']['children'][0]['props']['figure']['data'][3]['y']
import_cases = moh['props']['children'][1]['props']['children'][2]['props']['children'][0]['props']['figure']['data'][5]['y']
d = {"date": date, "comm_cases": comm_cases,"dorm_cases": dorm_cases, "import_cases": import_cases}
df = pd.DataFrame(data=d)
df["comm_weekly_cases"] = weekly_cases(df, "comm_cases")
df["comm_weekly_ratio"] = weekly_ratio(df, "comm_weekly_cases")
df["dorm_weekly_cases"] = weekly_cases(df, "dorm_cases")
df["dorm_weekly_ratio"] = weekly_ratio(df, "dorm_weekly_cases")
df["import_weekly_cases"] = weekly_cases(df, "import_cases")
df["import_weekly_ratio"] = weekly_ratio(df, "import_weekly_cases")
return df[-14:]
def predicting(df, n_days_for_prediction):
covid_model = tf.load_model('covid_model')
cols = list(df)[4:10]
print(cols)
df_input = df[cols].astype(float)
with open('scaler.pkl', 'rb') as handle:
scaler = pickle.load(handle)
df_scaled = scaler.transform(df_input)
last_days = np.array(df_scaled)
last_days = np.asarray(last_days).reshape(1, 14, 6)
for x in range(n_days_for_prediction):
days_14 = np.asarray(last_days[-1][-14:]).reshape(1, 14, 6)
last_days = np.concatenate([last_days[0], covid_model.predict(days_14)])
last_days = np.asarray(last_days).reshape(1, last_days.shape[0], last_days.shape[1])
prediction = scaler.inverse_transform(last_days[-1])
future = prediction[-n_days_for_prediction-1:]
return future[:, 1].tolist(), future[:, 3].tolist(),future[:, 5].tolist()
@app.route('/')
def home():
df = data_extract()
p_comm, p_dorm, p_import = predicting(df, 7)
p_comm2 = [0.0 if i < 0.0 else i for i in p_comm]
p_dorm2 = [0.0 if i < 0.0 else i for i in p_dorm]
p_import2 = [0.0 if i < 0.0 else i for i in p_import]
labels = df["date"].tolist()
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
dates = [df.index[-1] + | DateOffset(days=x+1) | pandas.tseries.offsets.DateOffset |
'''
episodestats.py
implements statistic that are used in producing employment statistics for the
lifecycle model
'''
import h5py
import numpy as np
import numpy_financial as npf
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from scipy.stats import norm
#import locale
from tabulate import tabulate
import pandas as pd
import scipy.optimize
from tqdm import tqdm_notebook as tqdm
from . empstats import Empstats
from scipy.stats import gaussian_kde
#locale.setlocale(locale.LC_ALL, 'fi_FI')
def modify_offsettext(ax,text):
'''
For y axis
'''
x_pos = 0.0
y_pos = 1.0
horizontalalignment='left'
verticalalignment='bottom'
offset = ax.yaxis.get_offset_text()
#value=offset.get_text()
# value=float(value)
# if value>=1e12:
# text='biljoonaa'
# elif value>1e9:
# text=str(value/1e9)+' miljardia'
# elif value==1e9:
# text=' miljardia'
# elif value>1e6:
# text=str(value/1e6)+' miljoonaa'
# elif value==1e6:
# text='miljoonaa'
# elif value>1e3:
# text=str(value/1e3)+' tuhatta'
# elif value==1e3:
# text='tuhatta'
offset.set_visible(False)
ax.text(x_pos, y_pos, text, transform=ax.transAxes,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment)
class Labels():
def get_labels(self,language='English'):
labels={}
if language=='English':
labels['osuus tilassa x']='Proportion in state {} [%]'
labels['age']='Age [y]'
labels['ratio']='Proportion [%]'
labels['unemp duration']='Length of unemployment [y]'
labels['scaled freq']='Scaled frequency'
labels['probability']='probability'
labels['telp']='Employee pension premium'
labels['sairausvakuutus']='Health insurance'
labels['työttömyysvakuutusmaksu']='Unemployment insurance'
labels['puolison verot']='Partners taxes'
labels['taxes']='Taxes'
labels['asumistuki']='Housing benefit'
labels['toimeentulotuki']='Supplementary benefit'
labels['tyottomyysturva']='Unemployment benefit'
labels['paivahoito']='Daycare'
labels['elake']='Pension'
labels['tyollisyysaste']='Employment rate'
labels['tyottomien osuus']='Proportion of unemployed'
labels['havainto']='Observation'
labels['tyottomyysaste']='Unemployment rate [%]'
labels['tyottomien osuus']='Proportion of unemployed [%]'
labels['tyollisyysaste %']='Employment rate [%]'
labels['ero osuuksissa']='Difference in proportions [%]'
labels['osuus']='proportion'
labels['havainto, naiset']='data, women'
labels['havainto, miehet']='data, men'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='Life cycle model'
else:
labels['osuus tilassa x']='Osuus tilassa {} [%]'
labels['age']='Ikä [v]'
labels['ratio']='Osuus tilassa [%]'
labels['unemp duration']='työttömyysjakson pituus [v]'
labels['scaled freq']='skaalattu taajuus'
labels['probability']='todennäköisyys'
labels['telp']='TEL-P'
labels['sairausvakuutus']='Sairausvakuutus'
labels['työttömyysvakuutusmaksu']='Työttömyysvakuutusmaksu'
labels['puolison verot']='puolison verot'
labels['taxes']='Verot'
labels['asumistuki']='Asumistuki'
labels['toimeentulotuki']='Toimeentulotuki'
labels['tyottomyysturva']='Työttömyysturva'
labels['paivahoito']='Päivähoito'
labels['elake']='Eläke'
labels['tyollisyysaste']='työllisyysaste'
labels['tyottomien osuus']='työttömien osuus'
labels['havainto']='havainto'
labels['tyottomyysaste']='Työttömyysaste [%]'
labels['tyottomien osuus']='Työttömien osuus väestöstö [%]'
labels['tyollisyysaste %']='Työllisyysaste [%]'
labels['ero osuuksissa']='Ero osuuksissa [%]'
labels['osuus']='Osuus'
labels['havainto, naiset']='havainto, naiset'
labels['havainto, miehet']='havainto, miehet'
labels['palkkasumma']='Palkkasumma [euroa]'
labels['Verokiila %']='Verokiila [%]'
labels['Työnteko [hlö/htv]']='Työnteko [hlö/htv]'
labels['Työnteko [htv]']='Työnteko [htv]'
labels['Työnteko [hlö]']='Työnteko [hlö]'
labels['Työnteko [miljoonaa hlö/htv]']='Työnteko [miljoonaa hlö/htv]'
labels['Työnteko [miljoonaa htv]']='Työnteko [miljoonaa htv]'
labels['Työnteko [miljoonaa hlö]']='Työnteko [miljoonaa hlö]'
labels['Osatyönteko [%-yks]']='Osa-aikatyössä [%-yks]'
labels['Muut tulot [euroa]']='Muut tulot [euroa]'
labels['Henkilöitä']='Henkilöitä'
labels['Verot [euroa]']='Verot [euroa]'
labels['Verot [[miljardia euroa]']='Verot [[miljardia euroa]'
labels['Verokertymä [euroa]']='Verokertymä [euroa]'
labels['Verokertymä [miljardia euroa]']='Verokertymä [miljardia euroa]'
labels['Muut tarvittavat tulot [euroa]']='Muut tarvittavat tulot [euroa]'
labels['Muut tarvittavat tulot [miljardia euroa]']='Muut tarvittavat tulot [miljardia euroa]'
labels['malli']='elinkaarimalli'
return labels
class EpisodeStats():
def __init__(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year=2018,version=3,params=None,gamma=0.92,lang='English'):
self.version=version
self.gamma=gamma
self.params=params
self.lab=Labels()
self.reset(timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,params=params,lang=lang)
print('version',version)
def reset(self,timestep,n_time,n_emps,n_pop,env,minimal,min_age,max_age,min_retirementage,year,version=None,params=None,lang=None,dynprog=False):
self.min_age=min_age
self.max_age=max_age
self.min_retirementage=min_retirementage
self.minimal=minimal
if params is not None:
self.params=params
if lang is None:
self.language='English'
else:
self.language=lang
if version is not None:
self.version=version
self.setup_labels()
self.n_employment=n_emps
self.n_time=n_time
self.timestep=timestep # 0.25 = 3kk askel
self.inv_timestep=int(np.round(1/self.timestep)) # pitää olla kokonaisluku
self.n_pop=n_pop
self.year=year
self.env=env
self.reaalinen_palkkojenkasvu=0.016
self.palkkakerroin=(0.8*1+0.2*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.elakeindeksi=(0.2*1+0.8*1.0/(1+self.reaalinen_palkkojenkasvu))**self.timestep
self.dynprog=dynprog
if self.minimal:
self.version=0
if self.version in set([0,101]):
self.n_groups=1
else:
self.n_groups=6
self.empstats=Empstats(year=self.year,max_age=self.max_age,n_groups=self.n_groups,timestep=self.timestep,n_time=self.n_time,
min_age=self.min_age)
self.init_variables()
def init_variables(self):
n_emps=self.n_employment
self.empstate=np.zeros((self.n_time,n_emps))
self.gempstate=np.zeros((self.n_time,n_emps,self.n_groups))
self.deceiced=np.zeros((self.n_time,1))
self.alive=np.zeros((self.n_time,1))
self.galive=np.zeros((self.n_time,self.n_groups))
self.rewstate=np.zeros((self.n_time,n_emps))
self.poprewstate=np.zeros((self.n_time,self.n_pop))
self.salaries_emp=np.zeros((self.n_time,n_emps))
#self.salaries=np.zeros((self.n_time,self.n_pop))
self.actions=np.zeros((self.n_time,self.n_pop))
self.popempstate=np.zeros((self.n_time,self.n_pop))
self.popunemprightleft=np.zeros((self.n_time,self.n_pop))
self.popunemprightused=np.zeros((self.n_time,self.n_pop))
self.tyoll_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.unemp_distrib_bu=np.zeros((self.n_time,self.n_pop))
self.siirtyneet=np.zeros((self.n_time,n_emps))
self.siirtyneet_det=np.zeros((self.n_time,n_emps,n_emps))
self.pysyneet=np.zeros((self.n_time,n_emps))
self.aveV=np.zeros((self.n_time,self.n_pop))
self.time_in_state=np.zeros((self.n_time,n_emps))
self.stat_tyoura=np.zeros((self.n_time,n_emps))
self.stat_toe=np.zeros((self.n_time,n_emps))
self.stat_pension=np.zeros((self.n_time,n_emps))
self.stat_paidpension=np.zeros((self.n_time,n_emps))
self.out_of_work=np.zeros((self.n_time,n_emps))
self.stat_unemp_len=np.zeros((self.n_time,self.n_pop))
self.stat_wage_reduction=np.zeros((self.n_time,n_emps))
self.stat_wage_reduction_g=np.zeros((self.n_time,n_emps,self.n_groups))
self.infostats_group=np.zeros((self.n_pop,1))
self.infostats_taxes=np.zeros((self.n_time,1))
self.infostats_wagetaxes=np.zeros((self.n_time,1))
self.infostats_taxes_distrib=np.zeros((self.n_time,n_emps))
self.infostats_etuustulo=np.zeros((self.n_time,1))
self.infostats_etuustulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_perustulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo=np.zeros((self.n_time,1))
self.infostats_palkkatulo_eielakkeella=np.zeros((self.n_time,1))
self.infostats_palkkatulo_group=np.zeros((self.n_time,self.n_groups))
self.infostats_palkkatulo_eielakkeella_group=np.zeros((self.n_time,1))
self.infostats_ansiopvraha=np.zeros((self.n_time,1))
self.infostats_ansiopvraha_group=np.zeros((self.n_time,self.n_groups))
self.infostats_asumistuki=np.zeros((self.n_time,1))
self.infostats_asumistuki_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero=np.zeros((self.n_time,1))
self.infostats_valtionvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_kunnallisvero=np.zeros((self.n_time,1))
self.infostats_kunnallisvero_group=np.zeros((self.n_time,self.n_groups))
self.infostats_valtionvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_kunnallisvero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_ptel=np.zeros((self.n_time,1))
self.infostats_tyotvakmaksu=np.zeros((self.n_time,1))
self.infostats_tyoelake=np.zeros((self.n_time,1))
self.infostats_kokoelake=np.zeros((self.n_time,1))
self.infostats_opintotuki=np.zeros((self.n_time,1))
self.infostats_isyyspaivaraha=np.zeros((self.n_time,1))
self.infostats_aitiyspaivaraha=np.zeros((self.n_time,1))
self.infostats_kotihoidontuki=np.zeros((self.n_time,1))
self.infostats_sairauspaivaraha=np.zeros((self.n_time,1))
self.infostats_toimeentulotuki=np.zeros((self.n_time,1))
self.infostats_tulot_netto=np.zeros((self.n_time,1))
self.infostats_pinkslip=np.zeros((self.n_time,n_emps))
self.infostats_pop_pinkslip=np.zeros((self.n_time,self.n_pop))
self.infostats_chilren18_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren7_emp=np.zeros((self.n_time,n_emps))
self.infostats_chilren18=np.zeros((self.n_time,1))
self.infostats_chilren7=np.zeros((self.n_time,1))
self.infostats_tyelpremium=np.zeros((self.n_time,self.n_pop))
self.infostats_paid_tyel_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_sairausvakuutus=np.zeros((self.n_time))
self.infostats_pvhoitomaksu=np.zeros((self.n_time,self.n_pop))
self.infostats_ylevero=np.zeros((self.n_time,1))
self.infostats_ylevero_distrib=np.zeros((self.n_time,n_emps))
self.infostats_irr=np.zeros((self.n_pop,1))
self.infostats_npv0=np.zeros((self.n_pop,1))
self.infostats_mother_in_workforce=np.zeros((self.n_time,1))
self.infostats_children_under3=np.zeros((self.n_time,self.n_pop))
self.infostats_children_under7=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis=np.zeros((self.n_time,self.n_pop))
self.infostats_unempwagebasis_acc=np.zeros((self.n_time,self.n_pop))
self.infostats_toe=np.zeros((self.n_time,self.n_pop))
self.infostats_ove=np.zeros((self.n_time,n_emps))
self.infostats_kassanjasen=np.zeros((self.n_time))
self.infostats_poptulot_netto=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_wage=np.zeros((self.n_time,self.n_pop))
self.infostats_pop_pension=np.zeros((self.n_time,self.n_pop))
self.infostats_equivalent_income=np.zeros(self.n_time)
self.infostats_alv=np.zeros(self.n_time)
self.infostats_puoliso=np.zeros(self.n_time)
self.pop_predrew=np.zeros((self.n_time,self.n_pop))
if self.version==101:
self.infostats_savings=np.zeros((self.n_time,self.n_pop))
self.sav_actions=np.zeros((self.n_time,self.n_pop))
def add(self,n,act,r,state,newstate,q=None,debug=False,plot=False,aveV=None,pred_r=None):
if self.version==0:
emp,_,_,a,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
puoliso=0
elif self.version==1:
# v1
emp,_,_,_,a,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,oof,bu,wr,p=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==2:
# v2
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58=self.env.state_decode(newstate)
ove=0
jasen=0
puoliso=0
elif self.version==3:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,c3,c7,c18,unemp_left,aa,toe58,ove,jasen=self.env.state_decode(newstate)
puoliso=0
elif self.version==4:
# v3
emp,_,_,_,a,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_=self.env.state_decode(state) # current employment state
newemp,g,newpen,newsal,a2,tis,paidpens,pink,toe,toek,ura,bu,wr,upr,uw,uwr,pr,\
c3,c7,c18,unemp_left,aa,toe58,ove,jasen,puoliso,puoliso_tyossa,puoliso_palkka=self.env.state_decode(newstate)
elif self.version==101:
emp,_,_,a,_,_,_=self.env.state_decode(state) # current employment state
newemp,newpen,newsal,a2,tis,next_wage,savings=self.env.state_decode(newstate)
g=0
bu=0
ove=0
jasen=0
t=int(np.round((a2-self.min_age)*self.inv_timestep))#-1
if a2>a and newemp>=0: # new state is not reset (age2>age)
if a2>self.min_retirementage and newemp==3 and self.version in set([1,2,3,4]):
newemp=2
if self.version in set([1,2,3,4]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.poprewstate[t,n]=r
self.actions[t,n]=act
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
if tis<=0.25 and newemp==5:
self.infostats_mother_in_workforce[t]+=1
self.infostats_pinkslip[t,newemp]+=pink
self.infostats_pop_pinkslip[t,n]=pink
self.gempstate[t,newemp,g]+=1
self.stat_wage_reduction[t,newemp]+=wr
self.stat_wage_reduction_g[t,newemp,g]+=wr
self.galive[t,g]+=1
self.stat_tyoura[t,newemp]+=ura
self.stat_toe[t,newemp]+=toe
self.stat_pension[t,newemp]+=newpen
self.stat_paidpension[t,newemp]+=paidpens
self.stat_unemp_len[t,n]=tis
self.popunemprightleft[t,n]=-self.env.unempright_left(newemp,tis,bu,a2,ura)
self.popunemprightused[t,n]=bu
self.infostats_group[n]=int(g)
self.infostats_unempwagebasis[t,n]=uw
self.infostats_unempwagebasis_acc[t,n]=uwr
self.infostats_toe[t,n]=toe
self.infostats_ove[t,newemp]+=ove
self.infostats_kassanjasen[t]+=jasen
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
self.infostats_puoliso[t]+=puoliso
if q is not None:
#print(newsal,q['palkkatulot'])
self.infostats_taxes[t]+=q['verot']*self.timestep*12
self.infostats_wagetaxes[t]+=q['verot_ilman_etuuksia']*self.timestep*12
self.infostats_taxes_distrib[t,newemp]+=q['verot']*self.timestep*12
self.infostats_etuustulo[t]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_etuustulo_group[t,g]+=q['etuustulo_brutto']*self.timestep*12
self.infostats_perustulo[t]+=q['perustulo']*self.timestep*12
self.infostats_palkkatulo[t]+=q['palkkatulot']*self.timestep*12
self.infostats_palkkatulo_eielakkeella[t]+=q['palkkatulot_eielakkeella']*self.timestep*12
self.infostats_ansiopvraha[t]+=q['ansiopvraha']*self.timestep*12
self.infostats_asumistuki[t]+=q['asumistuki']*self.timestep*12
self.infostats_valtionvero[t]+=q['valtionvero']*self.timestep*12
self.infostats_valtionvero_distrib[t,newemp]+=q['valtionvero']*self.timestep*12
self.infostats_kunnallisvero[t]+=q['kunnallisvero']*self.timestep*12
self.infostats_kunnallisvero_distrib[t,newemp]+=q['kunnallisvero']*self.timestep*12
self.infostats_ptel[t]+=q['ptel']*self.timestep*12
self.infostats_tyotvakmaksu[t]+=q['tyotvakmaksu']*self.timestep*12
self.infostats_tyoelake[t]+=q['elake_maksussa']*self.timestep*12
self.infostats_kokoelake[t]+=q['kokoelake']*self.timestep*12
self.infostats_opintotuki[t]+=q['opintotuki']*self.timestep*12
self.infostats_isyyspaivaraha[t]+=q['isyyspaivaraha']*self.timestep*12
self.infostats_aitiyspaivaraha[t]+=q['aitiyspaivaraha']*self.timestep*12
self.infostats_kotihoidontuki[t]+=q['kotihoidontuki']*self.timestep*12
self.infostats_sairauspaivaraha[t]+=q['sairauspaivaraha']*self.timestep*12
self.infostats_toimeentulotuki[t]+=q['toimtuki']*self.timestep*12
self.infostats_tulot_netto[t]+=q['kateen']*self.timestep*12
self.infostats_tyelpremium[t,n]=q['tyel_kokomaksu']*self.timestep*12
self.infostats_paid_tyel_pension[t,n]=q['puhdas_tyoelake']*self.timestep*12
self.infostats_sairausvakuutus[t]+=q['sairausvakuutus']*self.timestep*12
self.infostats_pvhoitomaksu[t,n]=q['pvhoito']*self.timestep*12
self.infostats_ylevero[t]+=q['ylevero']*self.timestep*12
self.infostats_ylevero_distrib[t,newemp]=q['ylevero']*self.timestep*12
self.infostats_poptulot_netto[t,n]=q['kateen']*self.timestep*12
self.infostats_children_under3[t,n]=c3
self.infostats_children_under7[t,n]=c7
self.infostats_npv0[n]=q['multiplier']
self.infostats_equivalent_income[t]+=q['eq']
if 'alv' in q:
self.infostats_alv[t]+=q['alv']
#self.infostats_kassanjasen[t]+=1
elif self.version in set([0,101]):
self.empstate[t,newemp]+=1
self.alive[t]+=1
self.rewstate[t,newemp]+=r
self.infostats_tulot_netto[t]+=q['netto'] # already at annual level
self.infostats_poptulot_netto[t,n]=q['netto']
self.poprewstate[t,n]=r
self.popempstate[t,n]=newemp
#self.salaries[t,n]=newsal
self.salaries_emp[t,newemp]+=newsal
self.time_in_state[t,newemp]+=tis
self.infostats_equivalent_income[t]+=q['eq']
self.infostats_pop_wage[t,n]=newsal
self.infostats_pop_pension[t,n]=newpen
if self.dynprog and pred_r is not None:
self.pop_predrew[t,n]=pred_r
if self.version==101:
self.infostats_savings[t,n]=savings
self.actions[t,n]=act[0]
self.sav_actions[t,n]=act[1]
else:
self.actions[t,n]=act
# if self.version in set([1,2,3]):
# self.gempstate[t,newemp,g]+=1
# self.stat_wage_reduction[t,newemp]+=wr
# self.galive[t,g]+=1
# self.stat_tyoura[t,newemp]+=ura
# self.stat_toe[t,newemp]+=toe
# self.stat_pension[t,newemp]+=newpen
# self.stat_paidpension[t,newemp]+=paidpens
# self.stat_unemp_len[t,n]=tis
# self.popunemprightleft[t,n]=0
# self.popunemprightused[t,n]=0
if aveV is not None:
self.aveV[t,n]=aveV
if not emp==newemp:
self.siirtyneet[t,emp]+=1
self.siirtyneet_det[t,emp,newemp]+=1
else:
self.pysyneet[t,emp]+=1
elif newemp<0:
self.deceiced[t]+=1
def scale_error(self,x,target=None,averaged=False):
return (target-self.comp_scaled_consumption(x,averaged=averaged))
def comp_employed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=np.squeeze(self.gempstate[:,:,g])
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])
tyoll_osuus=np.reshape(tyoll_osuus,(tyoll_osuus.shape[0],1))
htv_osuus=np.reshape(htv_osuus,(htv_osuus.shape[0],1))
return tyoll_osuus,htv_osuus
def comp_employed_aggregate(self,emp=None,start=20,end=63.5,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyoll_osuus=(emp[:,1]+emp[:,3])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,3])/nn
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyoll_osuus=(emp[:,1]+emp[:,8]+emp[:,9]+emp[:,10])/nn
htv_osuus=(emp[:,1]+0.5*emp[:,8]+emp[:,9]+0.5*emp[:,10])/nn
htv_osuus=self.comp_state_stats(htv_osuus,start=start,end=end,ratio=True)
tyoll_osuus=self.comp_state_stats(tyoll_osuus,start=start,end=end,ratio=True)
return tyoll_osuus,htv_osuus
def comp_group_ps(self):
return self.comp_palkkasumma(grouped=True)
def comp_palkkasumma(self,start=19,end=68,grouped=False,scale_time=True):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
if grouped:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,6))
ps_norw=np.zeros((self.n_time,6))
a_ps=np.zeros(6)
a_ps_norw=np.zeros(6)
for k in range(self.n_pop):
g=int(self.infostats_group[k,0])
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,g]+=self.infostats_pop_wage[t,k]
ps_norw[t,g]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,g]+=self.infostats_pop_wage[t,k]*self.timestep
for g in range(6):
a_ps[g]=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage,g])
a_ps_norw[g]=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage,g])
else:
scalex=demog2/self.n_pop*self.timestep
ps=np.zeros((self.n_time,1))
ps_norw=np.zeros((self.n_time,1))
for k in range(self.n_pop):
for t in range(min_cage,max_cage):
e=int(self.popempstate[t,k])
if e in set([1,10]):
ps[t,0]+=self.infostats_pop_wage[t,k]
ps_norw[t,0]+=self.infostats_pop_wage[t,k]
elif e in set([8,9]):
ps[t,0]+=self.infostats_pop_wage[t,k]
a_ps=np.sum(scalex[min_cage:max_cage]*ps[min_cage:max_cage])
a_ps_norw=np.sum(scalex[min_cage:max_cage]*ps_norw[min_cage:max_cage])
return a_ps,a_ps_norw
def comp_stats_agegroup(self,border=[19,35,50]):
n_groups=len(border)
low=border.copy()
high=border.copy()
high[0:n_groups-1]=border[1:n_groups]
high[-1]=65
employed=np.zeros(n_groups)
unemployed=np.zeros(n_groups)
ahtv=np.zeros(n_groups)
parttimeratio=np.zeros(n_groups)
unempratio=np.zeros(n_groups)
empratio=np.zeros(n_groups)
i_ps=np.zeros(n_groups)
i_ps_norw=np.zeros(n_groups)
for n in range(n_groups):
l=low[n]
h=high[n]
htv,tyollvaikutus,tyollaste,tyotosuus,tyottomat,osatyollaste=\
self.comp_tyollisyys_stats(self.empstate,scale_time=True,start=l,end=h,agegroups=True)
ps,ps_norw=self.comp_palkkasumma(start=l,end=h)
print(f'l {l} h {h}\nhtv {htv}\ntyollaste {tyollaste}\ntyotosuus {tyotosuus}\ntyottomat {tyottomat}\nosatyollaste {osatyollaste}\nps {ps}')
employed[n]=tyollvaikutus
ahtv[n]=htv
unemployed[n]=tyottomat
unempratio[n]=tyotosuus
empratio[n]=tyollaste
parttimeratio[n]=osatyollaste
i_ps[n]=ps
i_ps_norw[n]=ps_norw
return employed,ahtv,unemployed,parttimeratio,i_ps,i_ps_norw,unempratio,empratio
def comp_unemployed_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])[:,None]
#tyot_osuus=np.reshape(tyot_osuus,(tyot_osuus.shape[0],1))
return tyot_osuus
def comp_unemployed_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
tyot_osuus=emp[:,0]/nn
else:
tyot_osuus=(emp[:,0]+emp[:,4]+emp[:,13])/nn
#print(f'tyot_osuus {tyot_osuus}')
unemp=self.comp_state_stats(tyot_osuus,start=start,end=end,ratio=True)
return unemp
def comp_parttime_aggregate(self,emp=None,start=20,end=63.5,scale_time=True,grouped=False,g=0):
'''
Lukumäärätiedot (EI HTV!)
'''
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if not self.minimal:
tyossa=(emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])/nn
osatyossa=(emp[:,10]+emp[:,8])/nn
else:
tyossa=emp[:,1]/nn
osatyossa=0*tyossa
osatyo_osuus=osatyossa/tyossa
osatyo_osuus=self.comp_state_stats(osatyo_osuus,start=start,end=end,ratio=True)
kokotyo_osuus=1-osatyo_osuus
return kokotyo_osuus,osatyo_osuus
def comp_parttime_ratio_by_age(self,emp=None,grouped=False,g=0):
if emp is None:
if grouped:
emp=self.gempstate[:,:,g]
else:
emp=self.empstate
nn=np.sum(emp,1)
if self.minimal:
kokotyo_osuus=(emp[:,1])/nn
osatyo_osuus=(emp[:,3])/nn
else:
if grouped:
for g in range(6):
kokotyo_osuus=(emp[:,1,g]+emp[:,9,g])/nn
osatyo_osuus=(emp[:,8,g]+emp[:,10,g])/nn
else:
kokotyo_osuus=(emp[:,1]+emp[:,9])/nn
osatyo_osuus=(emp[:,8]+emp[:,10])/nn
osatyo_osuus=np.reshape(osatyo_osuus,(osatyo_osuus.shape[0],1))
kokotyo_osuus=np.reshape(kokotyo_osuus,(osatyo_osuus.shape[0],1))
return kokotyo_osuus,osatyo_osuus
def comp_employed_ratio(self,emp):
tyoll_osuus,htv_osuus=self.comp_employed_ratio_by_age(emp)
tyot_osuus=self.comp_unemployed_ratio_by_age(emp)
kokotyo_osuus,osatyo_osuus=self.comp_parttime_ratio_by_age(emp)
return tyoll_osuus,htv_osuus,tyot_osuus,kokotyo_osuus,osatyo_osuus
def comp_unemployed_detailed(self,emp):
if self.minimal:
ansiosid_osuus=emp[:,0]/np.sum(emp,1)
tm_osuus=ansiosid_osuus*0
else:
# työllisiksi lasketaan kokoaikatyössä olevat, osa-aikaiset, ve+työ, ve+osatyö
# isyysvapaalla olevat jötetty pois, vaikka vapaa kestöö alle 3kk
ansiosid_osuus=(emp[:,0]+emp[:,4])/np.sum(emp,1)
tm_osuus=(emp[:,13])/np.sum(emp,1)
return ansiosid_osuus,tm_osuus
def comp_tyollisyys_stats(self,emp,scale_time=True,start=19,end=68,full=False,tyot_stats=False,agg=False,shapes=False,only_groups=False,g=0,agegroups=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
scalex=demog2[min_cage:max_cage]/self.n_pop*scale
if only_groups:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
else:
tyollosuus,htvosuus,tyot_osuus,kokotyo_osuus,osatyo_osuus=self.comp_employed_ratio(emp)
htv=np.sum(scalex*htvosuus[min_cage:max_cage])
tyollvaikutus=np.sum(scalex*tyollosuus[min_cage:max_cage])
tyottomat=np.sum(scalex*tyot_osuus[min_cage:max_cage])
osatyollvaikutus=np.sum(scalex*osatyo_osuus[min_cage:max_cage])
kokotyollvaikutus=np.sum(scalex*kokotyo_osuus[min_cage:max_cage])
haj=np.mean(np.std(tyollosuus[min_cage:max_cage]))
tyollaste=tyollvaikutus/(np.sum(scalex)*self.n_pop)
osatyollaste=osatyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
kokotyollaste=kokotyollvaikutus/(kokotyollvaikutus+osatyollvaikutus)
if tyot_stats:
if agg:
#d2=np.squeeze(demog2)
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
else:
d2=np.squeeze(demog2)
tyolliset_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyottomat_ika=np.squeeze(scale*d2*np.squeeze(tyot_osuus))
htv_ika=np.squeeze(scale*d2*np.squeeze(htvosuus))
tyolliset_osuus=np.squeeze(tyollosuus)
tyottomat_osuus=np.squeeze(tyot_osuus)
return tyolliset_ika,tyottomat_ika,htv_ika,tyolliset_osuus,tyottomat_osuus
elif full:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus,osatyollvaikutus,kokotyollvaikutus,osatyollaste,kokotyollaste
elif agegroups:
tyot_osuus=self.comp_unemployed_aggregate(start=start,end=end)
return htv,tyollvaikutus,tyollaste,tyot_osuus,tyottomat,osatyollaste
else:
return htv,tyollvaikutus,haj,tyollaste,tyollosuus
def comp_employment_stats(self,scale_time=True,returns=False):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
min_cage=self.map_age(self.min_age)
max_cage=self.map_age(self.max_age)+1
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
d=np.squeeze(demog2[min_cage:max_cage])
self.ratiostates=self.empstate/self.alive
self.demogstates=(self.empstate.T*scalex).T
if self.minimal>0:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,3]
self.stats_parttime=self.demogstates[:,3]
self.stats_unemployed=self.demogstates[:,0]
self.stats_all=np.sum(self.demogstates,1)
else:
self.stats_employed=self.demogstates[:,0]+self.demogstates[:,10]+self.demogstates[:,8]+self.demogstates[:,9]
self.stats_parttime=self.demogstates[:,10]+self.demogstates[:,8]
self.stats_unemployed=self.demogstates[:,0]+self.demogstates[:,4]+self.demogstates[:,13]
self.stats_all=np.sum(self.demogstates,1)
if returns:
return self.stats_employed,self.stats_parttime,self.stats_unemployed
# def test_emp(self):
# g_emp=0
# g_htv=0
# g_10=0
# g_1=0
# g_8=0
# g_9=0
# g_x=0
# scalex=1
#
# demog2=self.empstats.get_demog()
# scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#
#
# for g in range(6):
# q=self.comp_participants(grouped=True,g=g)
# #g_1+=np.sum(self.gempstate[:,1,g])
# #g_10+=np.sum(self.gempstate[:,10,g])
# #g_8+=np.sum(self.gempstate[:,8,g])
# #g_9+=np.sum(self.gempstate[:,9,g])
# g_emp+=q['palkansaajia']
# g_htv+=q['htv']
# g_x+=np.sum((self.gempstate[:,1,g]+self.gempstate[:,10,g])*scalex)
#
# q=self.comp_participants()
# s_1=np.sum(self.empstate[:,1])
# s_10=np.sum(self.empstate[:,10])
# s_8=np.sum(self.empstate[:,8])
# s_9=np.sum(self.empstate[:,9])
# s_x=np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex)
# emp=q['palkansaajia']
# htv=q['htv']
#
# print(f'htv {htv} vs g_htv {g_htv}')
# print(f'emp {emp} vs g_emp {g_emp}')
# print(f's_x {s_x} vs g_x {g_x}')
# #print(f's_1 {s_1} vs g_1 {g_1}')
# #print(f's_10 {s_10} vs g_10 {g_10}')
# #print(f's_8 {s_8} vs g_8 {g_8}')
# #print(f's_9 {s_9} vs g_9 {g_9}')
def comp_participants(self,scale=True,include_retwork=True,grouped=False,g=0):
'''
<NAME> lkm
scalex olettaa, että naisia & miehiä yhtä paljon. Tämän voisi tarkentaa.
'''
demog2=self.empstats.get_demog()
scalex=np.squeeze(demog2/self.n_pop*self.timestep)
#print('version',self.version)
q={}
if self.version in set([1,2,3,4]):
if grouped:
#print('group=',g)
emp=np.squeeze(self.gempstate[:,:,g])
q['yhteensä']=np.sum(np.sum(emp,axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10]+emp[:,8]+emp[:,9])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10]+0.5*emp[:,8]+emp[:,9])*scalex)
else:
q['palkansaajia']=np.sum((emp[:,1]+emp[:,10])*scalex)
q['htv']=np.sum((emp[:,1]+0.5*emp[:,10])*scalex)
q['ansiosidonnaisella']=np.sum((emp[:,0]+emp[:,4])*scalex)
q['tmtuella']=np.sum(emp[:,13]*scalex)
q['isyysvapaalla']=np.sum(emp[:,6]*scalex)
q['kotihoidontuella']=np.sum(emp[:,7]*scalex)
q['vanhempainvapaalla']=np.sum(emp[:,5]*scalex)
else:
q['yhteensä']=np.sum(np.sum(self.empstate[:,:],axis=1)*scalex)
if include_retwork:
q['palkansaajia']=np.sum((self.empstate[:,1]+self.empstate[:,10]+self.empstate[:,8]+self.empstate[:,9])*scalex)
q['htv']=np.sum((self.empstate[:,1]+0.5*self.empstate[:,10]+0.5*self.empstate[:,8]+self.empstate[:,9])*scalex)
else:
q['palkansaajia']=np.sum((self.empstate[:,1]+self.empstate[:,10])*scalex)
q['htv']=np.sum((self.empstate[:,1]+0.5*self.empstate[:,10])*scalex)
q['ansiosidonnaisella']=np.sum((self.empstate[:,0]+self.empstate[:,4])*scalex)
q['tmtuella']=np.sum(self.empstate[:,13]*scalex)
q['isyysvapaalla']=np.sum(self.empstate[:,6]*scalex)
q['kotihoidontuella']=np.sum(self.empstate[:,7]*scalex)
q['vanhempainvapaalla']=np.sum(self.empstate[:,5]*scalex)
else:
q['yhteensä']=np.sum(np.sum(self.empstate[:,:],1)*scalex)
q['palkansaajia']=np.sum((self.empstate[:,1])*scalex)
q['htv']=np.sum((self.empstate[:,1])*scalex)
q['ansiosidonnaisella']=np.sum((self.empstate[:,0])*scalex)
q['tmtuella']=np.sum(self.empstate[:,1]*0)
q['isyysvapaalla']=np.sum(self.empstate[:,1]*0)
q['kotihoidontuella']=np.sum(self.empstate[:,1]*0)
q['vanhempainvapaalla']=np.sum(self.empstate[:,1]*0)
return q
def comp_employment_groupstats(self,scale_time=True,g=0,include_retwork=True,grouped=True):
demog2=self.empstats.get_demog()
if scale_time:
scale=self.timestep
else:
scale=1.0
#min_cage=self.map_age(self.min_age)
#max_cage=self.map_age(self.max_age)+1
scalex=np.squeeze(demog2/self.n_pop*scale)
#d=np.squeeze(demog2[min_cage:max_cage])
if grouped:
ratiostates=np.squeeze(self.gempstate[:,:,g])/self.alive
demogstates=np.squeeze(self.gempstate[:,:,g])
else:
ratiostates=self.empstate[:,:]/self.alive
demogstates=self.empstate[:,:]
if self.version in set([1,2,3,4]):
if include_retwork:
stats_employed=np.sum((demogstates[:,1]+demogstates[:,9])*scalex)
stats_parttime=np.sum((demogstates[:,10]+demogstates[:,8])*scalex)
else:
stats_employed=np.sum((demogstates[:,1])*scalex)
stats_parttime=np.sum((demogstates[:,10])*scalex)
stats_unemployed=np.sum((demogstates[:,0]+demogstates[:,4]+demogstates[:,13])*scalex)
else:
stats_employed=np.sum((demogstates[:,0]+demogstates[:,3])*scalex)
stats_parttime=np.sum((demogstates[:,3])*scalex)
stats_unemployed=np.sum((demogstates[:,0])*scalex)
#stats_all=np.sum(demogstates,1)
return stats_employed,stats_parttime,stats_unemployed
def comp_state_stats(self,state,scale_time=True,start=20,end=63.5,ratio=False):
demog2=np.squeeze(self.empstats.get_demog())
#if scale_time:
# scale=self.timestep
#else:
# scale=1.0
min_cage=self.map_age(start)
max_cage=self.map_age(end)+1
#vaikutus=np.round(scale*np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage]))/np.sum(demog2[min_cage:max_cage])
vaikutus=np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage])/np.sum(demog2[min_cage:max_cage])
x=np.sum(demog2[min_cage:max_cage]*state[min_cage:max_cage])
y=np.sum(demog2[min_cage:max_cage])
#print(f'vaikutus {vaikutus} x {x} y {y}\n s {state[min_cage:max_cage]} mean {np.mean(state[min_cage:max_cage])}\n d {demog2[min_cage:max_cage]}')
return vaikutus
def get_vanhempainvapaat(self):
'''
Laskee vanhempainvapaalla olevien määrän outsider-mallia (Excel) varten, tila 6
'''
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
ulkopuolella_m=np.sum(self.gempstate[:,7,0:3],axis=1)[:,None]/alive
alive[:,0]=np.sum(self.galive[:,3:6],1)
nn=np.sum(self.gempstate[:,5,3:6]+self.gempstate[:,7,3:6],axis=1)[:,None]-self.infostats_mother_in_workforce
ulkopuolella_n=nn/alive
return ulkopuolella_m[::4],ulkopuolella_n[::4]
def get_vanhempainvapaat_md(self):
'''
Laskee vanhempainvapaalla olevien määrän outsider-mallia (Excel) varten, tila 7
'''
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
ulkopuolella_m=np.sum(self.gempstate[:,6,0:3],axis=1)[:,None]/alive
alive[:,0]=np.sum(self.galive[:,3:6],1)
nn=self.infostats_mother_in_workforce
ulkopuolella_n=nn/alive
return ulkopuolella_m[::4],ulkopuolella_n[::4]
def comp_L2error(self):
tyollisyysaste_m,osatyoaste_m,tyottomyysaste_m,ka_tyottomyysaste=self.comp_gempratios(gender='men',unempratio=False)
tyollisyysaste_w,osatyoaste_w,tyottomyysaste_w,ka_tyottomyysaste=self.comp_gempratios(gender='women',unempratio=False)
emp_statsratio_m=self.empstats.emp_stats(g=1)[:-1]*100
emp_statsratio_w=self.empstats.emp_stats(g=2)[:-1]*100
unemp_statsratio_m=self.empstats.unemp_stats(g=1)[:-1]*100
unemp_statsratio_w=self.empstats.unemp_stats(g=2)[:-1]*100
w1=1.0
w2=3.0
L2= w1*np.sum(np.abs(emp_statsratio_m-tyollisyysaste_m[:-1])**2)+\
w1*np.sum(np.abs(emp_statsratio_w-tyollisyysaste_w[:-1])**2)+\
w2*np.sum(np.abs(unemp_statsratio_m-tyottomyysaste_m[:-1])**2)+\
w2*np.sum(np.abs(unemp_statsratio_w-tyottomyysaste_w[:-1])**2)
L2=L2/self.n_pop
#print(L1,emp_statsratio_m,tyollisyysaste_m,tyollisyysaste_w,unemp_statsratio_m,tyottomyysaste_m,tyottomyysaste_w)
print('L2 error {}'.format(L2))
return L2
def comp_budgetL2error(self,ref_muut,scale=1):
q=self.comp_budget()
muut=q['muut tulot']
L2=-((ref_muut-muut)/scale)**2
print(f'L2 error {L2} (muut {muut} muut_ref {ref_muut})')
return L2
def optimize_scale(self,target,averaged=scale_error):
opt=scipy.optimize.least_squares(self.scale_error,0.20,bounds=(-1,1),kwargs={'target':target,'averaged':averaged})
#print(opt)
return opt['x']
def optimize_logutil(self,target,source):
'''
analytical compensated consumption
does not implement final reward, hence duration 110 y
'''
n_time=110
gy=np.empty(n_time)
g=1
gx=np.empty(n_time)
for t in range(0,n_time):
gx[t]=g
g*=self.gamma
for t in range(1,n_time):
gy[t]=np.sum(gx[0:t])
gf=np.mean(gy[1:])/10
lx=(target-source)
opt=np.exp(lx/gf)-1.0
print(opt)
def min_max(self):
min_wage=np.min(self.infostats_pop_wage)
max_wage=np.max(self.infostats_pop_wage)
max_pension=np.max(self.infostats_pop_pension)
min_pension=np.min(self.infostats_pop_pension)
print(f'min wage {min_wage} max wage {max_wage}')
print(f'min pension {min_pension} max pension {max_pension}')
def setup_labels(self):
self.labels=self.lab.get_labels(self.language)
def map_age(self,age,start_zero=False):
if start_zero:
return int((age)*self.inv_timestep)
else:
return int((age-self.min_age)*self.inv_timestep)
def map_t_to_age(self,t):
return self.min_age+t/self.inv_timestep
def episodestats_exit(self):
plt.close(self.episode_fig)
def comp_gini(self):
'''
<NAME>-kerroin populaatiolle
'''
income=np.sort(self.infostats_tulot_netto,axis=None)
n=len(income)
L=np.arange(n,0,-1)
A=np.sum(L*income)/np.sum(income)
G=(n+1-2*A)/2
return G
def comp_annual_irr(self,npv,premium,pension,empstate,doprint=False):
k=0
max_npv=int(np.ceil(npv))
cashflow=-premium+pension
x=np.zeros(cashflow.shape[0]+max_npv)
eind=np.zeros(max_npv+1)
el=1
for k in range(max_npv+1):
eind[k]=el
el=el*self.elakeindeksi
x[:cashflow.shape[0]]=cashflow
if npv>0:
x[cashflow.shape[0]-1:]=cashflow[-2]*eind[:max_npv+1]
y=np.zeros(int(np.ceil(x.shape[0]/4)))
for k in range(y.shape[0]):
y[k]=np.sum(x[4*k:4*k+4])
irri=npf.irr(y)*100
#if np.isnan(irri):
# if np.sum(pension)<0.1 and np.sum(empstate[0:self.map_age(63)]==15)>0: # vain maksuja, joista ei saa tuottoja, joten tappio 100%
# irri=-100
if irri<0.01 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
if irri>100 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
if np.isnan(irri) and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,np.sum(pension),empstate))
#print('---------\nirri {}\nnpv {}\n\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,np.sum(pension),np.sum(empstate==15)))
if irri<-50 and doprint:
print('---------\nirri {}\nnpv {}\nx {}\ny {}\nprem {}\npens {}\nemps {}\n---------\n'.format(irri,npv,x,y,premium,pension,empstate))
return irri
def comp_irr(self):
'''
Laskee sisäisen tuottoasteen (IRR)
Indeksointi puuttuu npv:n osalta
Tuloksiin lisättävä inflaatio+palkkojen reaalikasvu = palkkojen nimellinen kasvu
'''
for k in range(self.n_pop):
self.infostats_irr[k]=self.reaalinen_palkkojenkasvu*100+self.comp_annual_irr(self.infostats_npv0[k,0],self.infostats_tyelpremium[:,k],self.infostats_paid_tyel_pension[:,k],self.popempstate[:,k])
def comp_aggirr(self):
'''
Laskee aggregoidun sisäisen tuottoasteen (IRR)
Indeksointi puuttuu npv:n osalta
Tuloksiin lisättävä inflaatio+palkkojen reaalikasvu = palkkojen nimellinen kasvu
'''
maxnpv=np.max(self.infostats_npv0)
agg_premium=np.sum(self.infostats_tyelpremium,axis=1)
agg_pensions=np.sum(self.infostats_paid_tyel_pension,axis=1)
agg_irr=self.reaalinen_palkkojenkasvu*100+self.comp_annual_irr(maxnpv,agg_premium,agg_pensions,self.popempstate[:,0])
x=np.zeros(self.infostats_paid_tyel_pension.shape[0]+int(np.ceil(maxnpv)))
max_npv=int(max(np.ceil(self.infostats_npv0[:,0])))
eind=np.zeros(max_npv)
el=1
for k in range(max_npv):
eind[k]=el
el=el*self.elakeindeksi
cfn=self.infostats_tyelpremium.shape[0]
for k in range(self.n_pop):
if np.sum(self.popempstate[0:self.map_age(63),k]==15)<1: # ilman kuolleita
n=int(np.ceil(self.infostats_npv0[k,0]))
cashflow=-self.infostats_tyelpremium[:,k]+self.infostats_paid_tyel_pension[:,k]
# indeksointi puuttuu
x[:cfn]+=cashflow
if n>0:
x[cfn-1:cfn+n-1]+=cashflow[-2]*eind[:n] # ei indeksoida, pitäisi huomioida takuueläkekin
y=np.zeros(int(np.ceil(x.shape[0]/4)))
for k in range(y.shape[0]):
y[k]=np.sum(x[4*k:4*k+101])
irri=npf.irr(y)*100
print('aggregate irr {}'.format(agg_irr))
def comp_unemp_durations(self,popempstate=None,popunemprightused=None,putki=True,\
tmtuki=False,laaja=False,outsider=False,ansiosid=True,tyott=False,kaikki=False,\
return_q=True,max_age=100):
'''
Poikkileikkaushetken työttömyyskestot
'''
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
if popunemprightused is None:
popunemprightused=self.popunemprightused
keskikesto=np.zeros((5,5)) # 20-29, 30-39, 40-49, 50-59, 60-69, vastaa TYJin tilastoa
n=np.zeros(5)
for k in range(self.n_pop):
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k] in unempset:
if age<29:
l=0
elif age<39:
l=1
elif age<49:
l=2
elif age<59:
l=3
else:
l=4
n[l]+=1
if self.popunemprightused[t,k]<=0.51:
keskikesto[l,0]+=1
elif self.popunemprightused[t,k]<=1.01:
keskikesto[l,1]+=1
elif self.popunemprightused[t,k]<=1.51:
keskikesto[l,2]+=1
elif self.popunemprightused[t,k]<=2.01:
keskikesto[l,3]+=1
else:
keskikesto[l,4]+=1
for k in range(5):
keskikesto[k,:] /= n[k]
if return_q:
return self.empdur_to_dict(keskikesto)
else:
return keskikesto
def empdur_to_dict(self,empdur):
q={}
q['20-29']=empdur[0,:]
q['30-39']=empdur[1,:]
q['40-49']=empdur[2,:]
q['50-59']=empdur[3,:]
q['60-65']=empdur[4,:]
return q
def comp_unemp_durations_v2(self,popempstate=None,putki=True,tmtuki=False,laaja=False,\
outsider=False,ansiosid=True,tyott=False,kaikki=False,\
return_q=True,max_age=100):
'''
Poikkileikkaushetken työttömyyskestot
Tässä lasketaan tulos tiladatasta, jolloin kyse on viimeisimmän jakson kestosta
'''
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
keskikesto=np.zeros((5,5)) # 20-29, 30-39, 40-49, 50-59, 60-69, vastaa TYJin tilastoa
n=np.zeros(5)
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] not in unempset:
prev_state=popempstate[t,k]
duration=(t-prev_trans)*self.timestep
prev_trans=t
if age<29:
l=0
elif age<39:
l=1
elif age<49:
l=2
elif age<59:
l=3
else:
l=4
n[l]+=1
if duration<=0.51:
keskikesto[l,0]+=1
elif duration<=1.01:
keskikesto[l,1]+=1
elif duration<=1.51:
keskikesto[l,2]+=1
elif duration<=2.01:
keskikesto[l,3]+=1
else:
keskikesto[l,4]+=1
elif prev_state not in unempset and popempstate[t,k] in unempset:
prev_trans=t
prev_state=popempstate[t,k]
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
for k in range(5):
keskikesto[k,:] /= n[k]
if return_q:
return self.empdur_to_dict(keskikesto)
else:
return keskikesto
def comp_virrat(self,popempstate=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,kaikki=False,max_age=100):
tyoll_virta=np.zeros((self.n_time,1))
tyot_virta=np.zeros((self.n_time,1))
unempset=[]
empset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if kaikki:
unempset=[0,2,3,4,5,6,7,8,9,11,12,13,14]
empset=set([1,10])
unempset=set(unempset)
if popempstate is None:
popempstate=self.popempstate
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] in empset:
tyoll_virta[t]+=1
prev_state=popempstate[t,k]
elif prev_state in empset and popempstate[t,k] in unempset:
tyot_virta[t]+=1
prev_state=popempstate[t,k]
else: # some other state
prev_state=popempstate[t,k]
return tyoll_virta,tyot_virta
def comp_tyollistymisdistribs(self,popempstate=None,popunemprightleft=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,max_age=100):
tyoll_distrib=[]
tyoll_distrib_bu=[]
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
empset=set([1,10])
unempset=set(unempset)
if popempstate is None or popunemprightleft is None:
popempstate=self.popempstate
popunemprightleft=self.popunemprightleft
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] in empset:
tyoll_distrib.append((t-prev_trans)*self.timestep)
tyoll_distrib_bu.append(popunemprightleft[t,k])
prev_state=popempstate[t,k]
prev_trans=t
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
return tyoll_distrib,tyoll_distrib_bu
def comp_empdistribs(self,popempstate=None,popunemprightleft=None,putki=True,tmtuki=True,laaja=False,outsider=False,ansiosid=True,tyott=False,max_age=100):
unemp_distrib=[]
unemp_distrib_bu=[]
emp_distrib=[]
unempset=[]
if tmtuki:
unempset.append(13)
if outsider:
unempset.append(11)
if putki:
unempset.append(4)
if ansiosid:
unempset.append(0)
if tyott:
unempset=[0,4,13]
if laaja:
unempset=[0,4,11,13]
if popempstate is None or popunemprightleft is None:
popempstate=self.popempstate
popunemprightleft=self.popunemprightleft
empset=set([1,10])
unempset=set(unempset)
for k in range(self.n_pop):
prev_state=popempstate[0,k]
prev_trans=0
for t in range(1,self.n_time):
age=self.min_age+t*self.timestep
if age<=max_age:
if self.popempstate[t,k]!=prev_state:
if prev_state in unempset and popempstate[t,k] not in unempset:
unemp_distrib.append((t-prev_trans)*self.timestep)
unemp_distrib_bu.append(popunemprightleft[t,k])
prev_state=popempstate[t,k]
prev_trans=t
elif prev_state in empset and popempstate[t,k] not in unempset:
emp_distrib.append((t-prev_trans)*self.timestep)
prev_state=popempstate[t,k]
prev_trans=t
else: # some other state
prev_state=popempstate[t,k]
prev_trans=t
return unemp_distrib,emp_distrib,unemp_distrib_bu
def empdist_stat(self):
ratio=np.array([1,0.287024901703801,0.115508955875928,0.0681083442551332,0.0339886413280909,0.0339886413280909,0.0114460463084316,0.0114460463084316,0.0114460463084316,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00419397116644823,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00166011358671909,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206,0.00104849279161206])
return ratio
def comp_gempratios(self,unempratio=True,gender='men'):
if gender=='men': # men
gempstate=np.sum(self.gempstate[:,:,0:3],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
mother_in_workforce=0
else: # women
gempstate=np.sum(self.gempstate[:,:,3:6],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
mother_in_workforce=self.infostats_mother_in_workforce
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(gempstate,alive,unempratio=unempratio,mother_in_workforce=mother_in_workforce)
return tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste
def comp_empratios(self,emp,alive,unempratio=True,mother_in_workforce=0):
employed=emp[:,1]
retired=emp[:,2]
unemployed=emp[:,0]
if self.version in set([1,2,3,4]):
disabled=emp[:,3]
piped=emp[:,4]
mother=emp[:,5]
dad=emp[:,6]
kotihoidontuki=emp[:,7]
vetyo=emp[:,9]
veosatyo=emp[:,8]
osatyo=emp[:,10]
outsider=emp[:,11]
student=emp[:,12]
tyomarkkinatuki=emp[:,13]
tyollisyysaste=100*(employed+osatyo+veosatyo+vetyo+dad+mother_in_workforce)/alive[:,0]
osatyoaste=100*(osatyo+veosatyo)/(employed+osatyo+veosatyo+vetyo)
if unempratio:
tyottomyysaste=100*(unemployed+piped+tyomarkkinatuki)/(tyomarkkinatuki+unemployed+employed+piped+osatyo+veosatyo+vetyo)
ka_tyottomyysaste=100*np.sum(unemployed+tyomarkkinatuki+piped)/np.sum(tyomarkkinatuki+unemployed+employed+piped+osatyo+veosatyo+vetyo)
else:
tyottomyysaste=100*(unemployed+piped+tyomarkkinatuki)/alive[:,0]
ka_tyottomyysaste=100*np.sum(unemployed+tyomarkkinatuki+piped)/np.sum(alive[:,0])
elif self.version in set([0,101]):
if False:
osatyo=emp[:,3]
else:
osatyo=0
tyollisyysaste=100*(employed+osatyo)/alive[:,0]
#osatyoaste=np.zeros(employed.shape)
osatyoaste=100*(osatyo)/(employed+osatyo)
if unempratio:
tyottomyysaste=100*(unemployed)/(unemployed+employed+osatyo)
ka_tyottomyysaste=100*np.sum(unemployed)/np.sum(unemployed+employed+osatyo)
else:
tyottomyysaste=100*(unemployed)/alive[:,0]
ka_tyottomyysaste=100*np.sum(unemployed)/np.sum(alive[:,0])
return tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste
def plot_ratiostats(self,t):
'''
Tee kuvia tuloksista
'''
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.set_xlabel('palkat')
ax.set_ylabel('freq')
ax.hist(self.infostats_pop_wage[t,:])
plt.show()
fig,ax=plt.subplots()
ax.set_xlabel('aika')
ax.set_ylabel('palkat')
meansal=np.mean(self.infostats_pop_wage,axis=1)
stdsal=np.std(self.infostats_pop_wage,axis=1)
ax.plot(x,meansal)
ax.plot(x,meansal+stdsal)
ax.plot(x,meansal-stdsal)
plt.show()
def plot_empdistribs(self,emp_distrib):
fig,ax=plt.subplots()
ax.set_xlabel('työsuhteen pituus [v]')
ax.set_ylabel('freq')
ax.set_yscale('log')
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(emp_distrib,x)
scaled=scaled/np.sum(emp_distrib)
#ax.hist(emp_distrib)
ax.bar(x2[1:-1],scaled[1:],align='center')
plt.show()
def plot_compare_empdistribs(self,emp_distrib,emp_distrib2,label2='vaihtoehto',label1=''):
fig,ax=plt.subplots()
ax.set_xlabel('työsuhteen pituus [v]')
ax.set_ylabel(self.labels['probability'])
ax.set_yscale('log')
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(emp_distrib,x)
scaled=scaled/np.sum(emp_distrib)
x=np.linspace(0,max_time,nn_time)
scaled3,x3=np.histogram(emp_distrib2,x)
scaled3=scaled3/np.sum(emp_distrib2)
ax.plot(x3[:-1],scaled3,label=label1)
ax.plot(x2[:-1],scaled,label=label2)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plot_vlines_unemp(self,point=0):
axvcolor='gray'
lstyle='--'
plt.axvline(x=300/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(310/(12*21.5),point,'300',rotation=90)
plt.axvline(x=400/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(410/(12*21.5),point,'400',rotation=90)
plt.axvline(x=500/(12*21.5),ls=lstyle,color=axvcolor)
plt.text(510/(12*21.5),point,'500',rotation=90)
def plot_tyolldistribs(self,emp_distrib,tyoll_distrib,tyollistyneet=True,max=10,figname=None):
max_time=55
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled0,x0=np.histogram(emp_distrib,x)
if not tyollistyneet:
scaled=scaled0
x2=x0
else:
scaled,x2=np.histogram(tyoll_distrib,x)
jaljella=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien kumulatiivinen summa
scaled=scaled/jaljella
fig,ax=plt.subplots()
ax.set_xlabel('työttömyysjakson pituus [v]')
if tyollistyneet:
ax.set_ylabel('työllistyneiden osuus')
point=0.5
else:
ax.set_ylabel('pois siirtyneiden osuus')
point=0.9
self.plot_vlines_unemp(point)
ax.plot(x2[1:-1],scaled[1:])
#ax.bar(x2[1:-1],scaled[1:],align='center',width=self.timestep)
plt.xlim(0,max)
if figname is not None:
plt.savefig(figname+'tyollistyneetdistrib.eps', format='eps')
plt.show()
def plot_tyolldistribs_both(self,emp_distrib,tyoll_distrib,max=10,figname=None):
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled0,x0=np.histogram(emp_distrib,x)
scaled=scaled0
scaled_tyoll,x2=np.histogram(tyoll_distrib,x)
jaljella=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
scaled=scaled/jaljella
jaljella_tyoll=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
scaled_tyoll=scaled_tyoll/jaljella_tyoll
fig,ax=plt.subplots()
ax.set_xlabel('työttömyysjakson pituus [v]')
point=0.6
self.plot_vlines_unemp(point)
ax.plot(x2[1:-1],scaled[1:],label='pois siirtyneiden osuus')
ax.plot(x2[1:-1],scaled_tyoll[1:],label='työllistyneiden osuus')
#ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.legend()
ax.set_ylabel('pois siirtyneiden osuus')
plt.xlim(0,max)
plt.ylim(0,0.8)
if figname is not None:
plt.savefig(figname+'tyolldistribs.eps', format='eps')
plt.show()
def plot_tyolldistribs_both_bu(self,emp_distrib,tyoll_distrib,max=2):
max_time=4
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(-max_time,0,nn_time)
scaled0,x0=np.histogram(emp_distrib,x)
scaled=scaled0
scaled_tyoll,x2=np.histogram(tyoll_distrib,x)
jaljella=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
#jaljella=np.cumsum(scaled0)
scaled=scaled/jaljella
jaljella_tyoll=np.cumsum(scaled0[::-1])[::-1] # jäljellä olevien summa
#jaljella_tyoll=np.cumsum(scaled0)
scaled_tyoll=scaled_tyoll/jaljella_tyoll
fig,ax=plt.subplots()
ax.set_xlabel('aika ennen ansiopäivärahaoikeuden loppua [v]')
point=0.6
#self.plot_vlines_unemp(point)
ax.plot(x2[1:-1],scaled[1:],label='pois siirtyneiden osuus')
ax.plot(x2[1:-1],scaled_tyoll[1:],label='työllistyneiden osuus')
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_ylabel('pois siirtyneiden osuus')
plt.xlim(-max,0)
#plt.ylim(0,0.8)
plt.show()
def plot_compare_tyolldistribs(self,emp_distrib1,tyoll_distrib1,emp_distrib2,
tyoll_distrib2,tyollistyneet=True,max=4,label1='perus',label2='vaihtoehto',
figname=None):
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
# data1
scaled01,x0=np.histogram(emp_distrib1,x)
if not tyollistyneet:
scaled1=scaled01
x1=x0
else:
scaled1,x1=np.histogram(tyoll_distrib1,x)
jaljella1=np.cumsum(scaled01[::-1])[::-1] # jäljellä olevien summa
scaled1=scaled1/jaljella1
# data2
scaled02,x0=np.histogram(emp_distrib2,x)
if not tyollistyneet:
scaled2=scaled02
x2=x0
else:
scaled2,x2=np.histogram(tyoll_distrib2,x)
jaljella2=np.cumsum(scaled02[::-1])[::-1] # jäljellä olevien summa
scaled2=scaled2/jaljella2
fig,ax=plt.subplots()
ax.set_xlabel('työttömyysjakson pituus [v]')
if tyollistyneet:
ax.set_ylabel('työllistyneiden osuus')
else:
ax.set_ylabel('pois siirtyneiden osuus')
self.plot_vlines_unemp()
ax.plot(x2[1:-1],scaled2[1:],label=label2)
ax.plot(x1[1:-1],scaled1[1:],label=label1)
#ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.legend()
plt.xlim(0,max)
if figname is not None:
plt.savefig(figname+'comp_tyollistyneetdistrib.eps', format='eps')
plt.show()
def plot_unempdistribs(self,unemp_distrib,max=4,figname=None,miny=None,maxy=None):
#fig,ax=plt.subplots()
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(0,max_time,nn_time)
scaled,x2=np.histogram(unemp_distrib,x)
scaled=scaled/np.sum(unemp_distrib)
fig,ax=plt.subplots()
self.plot_vlines_unemp(0.6)
ax.set_xlabel(self.labels['unemp duration'])
ax.set_ylabel(self.labels['probability'])
ax.plot(x[:-1],scaled)
ax.set_yscale('log')
plt.xlim(0,max)
if miny is not None:
plt.ylim(miny,maxy)
if figname is not None:
plt.savefig(figname+'unempdistribs.eps', format='eps')
plt.show()
def plot_saldist(self,t=0,sum=False,all=False,n=10,bins=30):
if all:
fig,ax=plt.subplots()
for t in range(1,self.n_time-1,5):
scaled,x=np.histogram(self.infostats_pop_wage[t,:],bins=bins)
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t)
plt.legend()
plt.show()
else:
if sum:
scaled,x=np.histogram(np.sum(self.infostats_pop_wage,axis=0),bins=bins)
x2=0.5*(x[1:]+x[0:-1])
plt.plot(x2,scaled)
else:
fig,ax=plt.subplots()
for t1 in range(t,t+n,1):
scaled,x=np.histogram(self.infostats_pop_wage[t1,:],bins=bins)
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t1)
plt.legend()
plt.show()
def test_salaries(self):
n=self.n_pop
palkat_ika_miehet=12.5*np.array([2339.01,2489.09,2571.40,2632.58,2718.03,2774.21,2884.89,2987.55,3072.40,3198.48,3283.81,3336.51,3437.30,3483.45,3576.67,3623.00,3731.27,3809.58,3853.66,3995.90,4006.16,4028.60,4104.72,4181.51,4134.13,4157.54,4217.15,4165.21,4141.23,4172.14,4121.26,4127.43,4134.00,4093.10,4065.53,4063.17,4085.31,4071.25,4026.50,4031.17,4047.32,4026.96,4028.39,4163.14,4266.42,4488.40,4201.40,4252.15,4443.96,3316.92,3536.03,3536.03])
palkat_ika_naiset=12.5*np.array([2223.96,2257.10,2284.57,2365.57,2443.64,2548.35,2648.06,2712.89,2768.83,2831.99,2896.76,2946.37,2963.84,2993.79,3040.83,3090.43,3142.91,3159.91,3226.95,3272.29,3270.97,3297.32,3333.42,3362.99,3381.84,3342.78,3345.25,3360.21,3324.67,3322.28,3326.72,3326.06,3314.82,3303.73,3302.65,3246.03,3244.65,3248.04,3223.94,3211.96,3167.00,3156.29,3175.23,3228.67,3388.39,3457.17,3400.23,3293.52,2967.68,2702.05,2528.84,2528.84])
g_r=[0.77,1.0,1.23]
data_range=np.arange(20,72)
sal20=np.zeros((n,1))
sal25=np.zeros((n,1))
sal30=np.zeros((n,1))
sal40=np.zeros((n,1))
sal50=np.zeros((n,1))
sal60=np.zeros((n,1))
sal=np.zeros((n,72))
p=np.arange(700,17500,100)*12.5
palkka20=np.array([10.3,5.6,4.5,14.2,7.1,9.1,22.8,22.1,68.9,160.3,421.6,445.9,501.5,592.2,564.5,531.9,534.4,431.2,373.8,320.3,214.3,151.4,82.3,138.0,55.6,61.5,45.2,19.4,32.9,13.1,9.6,7.4,12.3,12.5,11.5,5.3,2.4,1.6,1.2,1.2,14.1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
palkka25=np.array([12.4,11.3,30.2,4.3,28.5,20.3,22.5,23.7,83.3,193.0,407.9,535.0,926.5,1177.1,1540.9,1526.4,1670.2,1898.3,1538.8,1431.5,1267.9,1194.8,1096.3,872.6,701.3,619.0,557.2,465.8,284.3,291.4,197.1,194.4,145.0,116.7,88.7,114.0,56.9,57.3,55.0,25.2,24.4,20.1,25.2,37.3,41.4,22.6,14.1,9.4,6.3,7.5,8.1,9.0,4.0,3.4,5.4,4.1,5.2,1.0,2.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
palkka30=np.array([1.0,2.0,3.0,8.5,12.1,22.9,15.8,21.8,52.3,98.2,295.3,392.8,646.7,951.4,1240.5,1364.5,1486.1,1965.2,1908.9,1729.5,1584.8,1460.6,1391.6,1551.9,1287.6,1379.0,1205.6,1003.6,1051.6,769.9,680.5,601.2,552.0,548.3,404.5,371.0,332.7,250.0,278.2,202.2,204.4,149.8,176.7,149.0,119.6,76.8,71.4,56.3,75.9,76.8,58.2,50.2,46.8,48.9,30.1,32.2,28.8,31.1,45.5,41.2,36.5,18.1,11.6,8.5,10.2,4.3,13.5,12.3,4.9,13.9,5.4,5.9,7.4,14.1,9.6,8.4,11.5,0.0,3.3,9.0,5.2,5.0,3.1,7.4,2.0,4.0,4.1,14.0,2.0,3.0,1.0,0.0,6.2,2.0,1.2,2.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
palkka50=np.array([2.0,3.1,2.4,3.9,1.0,1.0,11.4,30.1,29.3,34.3,231.9,341.9,514.4,724.0,1076.8,1345.2,1703.0,1545.8,1704.0,1856.1,1805.4,1608.1,1450.0,1391.4,1338.5,1173.2,1186.3,1024.8,1105.6,963.0,953.0,893.7,899.8,879.5,857.0,681.5,650.5,579.2,676.8,498.0,477.5,444.3,409.1,429.0,340.5,297.2,243.1,322.5,297.5,254.1,213.1,249.3,212.1,212.8,164.4,149.3,158.6,157.4,154.1,112.7,93.4,108.4,87.3,86.7,82.0,115.9,66.9,84.2,61.4,43.7,58.1,40.9,73.9,50.0,51.6,25.7,43.2,48.2,43.0,32.6,21.6,22.4,36.3,28.3,19.4,21.1,21.9,21.5,19.2,15.8,22.6,9.3,14.0,22.4,14.0,13.0,11.9,18.7,7.3,21.6,9.5,11.2,12.0,18.2,12.9,2.2,10.7,6.1,11.7,7.6,1.0,4.7,8.5,6.4,3.3,4.6,1.2,3.7,5.8,1.0,1.0,1.0,1.0,3.2,1.2,3.1,2.2,2.3,2.1,1.1,2.0,2.1,2.2,4.6,2.2,1.0,1.0,1.0,0.0,3.0,1.2,0.0,8.2,3.0,1.0,1.0,2.1,1.2,3.2,1.0,5.2,1.1,5.2,1.0,1.2,2.3,1.0,3.1,1.0,1.0,1.1,1.6,1.1,1.1,1.0,1.0,1.0,1.0])
m20=0
m25=0
m30=0
m40=0
m50=0
m60=0
salx=np.zeros((self.n_time+2,1))
saln=np.zeros((self.n_time+2,1))
salx_m=np.zeros((self.n_time+2,1))
saln_m=np.zeros((self.n_time+2,1))
salx_f=np.zeros((self.n_time+2,1))
saln_f=np.zeros((self.n_time+2,1))
for k in range(self.n_pop):
for t in range(self.n_time-2):
if self.popempstate[t,k] in set([1,10,8,9]):
salx[t]=salx[t]+self.infostats_pop_wage[t,k]
saln[t]=saln[t]+1
if self.infostats_group[k]>2:
salx_f[t]=salx_f[t]+self.infostats_pop_wage[t,k]
saln_f[t]=saln_f[t]+1
else:
salx_m[t]=salx_m[t]+self.infostats_pop_wage[t,k]
saln_m[t]=saln_m[t]+1
if self.popempstate[self.map_age(20),k] in set([1,10]):
sal20[m20]=self.infostats_pop_wage[self.map_age(20),k]
m20=m20+1
if self.popempstate[self.map_age(25),k] in set([1,10]):
sal25[m25]=self.infostats_pop_wage[self.map_age(25),k]
m25=m25+1
if self.popempstate[self.map_age(30),k] in set([1,10]):
sal30[m30]=self.infostats_pop_wage[self.map_age(30),k]
m30=m30+1
if self.popempstate[self.map_age(40),k] in set([1,10]):
sal40[m40]=self.infostats_pop_wage[self.map_age(40),k]
m40=m40+1
if self.popempstate[self.map_age(50),k] in set([1,10]):
sal50[m50]=self.infostats_pop_wage[self.map_age(50),k]
m50=m50+1
if self.popempstate[self.map_age(60),k] in set([1,10]):
sal60[m60]=self.infostats_pop_wage[self.map_age(60),k]
m60=m60+1
salx=salx/np.maximum(1,saln)
salx_f=salx_f/np.maximum(1,saln_f)
salx_m=salx_m/np.maximum(1,saln_m)
#print(sal25,self.infostats_pop_wage)
def kuva(sal,ika,m,p,palkka):
plt.hist(sal[:m],bins=50,density=True)
ave=np.mean(sal[:m])/12
palave=np.sum(palkka*p)/12/np.sum(palkka)
plt.title('{}: ave {} vs {}'.format(ika,ave,palave))
plt.plot(p,palkka/sum(palkka)/2000)
plt.show()
def kuva2(sal,ika,m):
plt.hist(sal[:m],bins=50,density=True)
ave=np.mean(sal[:m])/12
plt.title('{}: ave {}'.format(ika,ave))
plt.show()
kuva(sal20,20,m20,p,palkka20)
kuva(sal25,25,m25,p,palkka25)
kuva(sal30,30,m30,p,palkka30)
kuva2(sal40,40,m40)
kuva(sal50,50,m50,p,palkka50)
kuva2(sal60,60,m60)
data_range=np.arange(21,72)
plt.plot(data_range,np.mean(self.infostats_pop_wage[::4],axis=1),label='malli kaikki')
plt.plot(data_range,salx[::4],label='malli töissä')
data_range=np.arange(20,72)
plt.plot(data_range,0.5*palkat_ika_miehet+0.5*palkat_ika_naiset,label='data')
plt.legend()
plt.show()
data_range=np.arange(21,72)
plt.plot(data_range,salx_m[::4],label='malli töissä miehet')
plt.plot(data_range,salx_f[::4],label='malli töissä naiset')
data_range=np.arange(20,72)
plt.plot(data_range,palkat_ika_miehet,label='data miehet')
plt.plot(data_range,palkat_ika_naiset,label='data naiset')
plt.legend()
plt.show()
def plot_rewdist(self,t=0,sum=False,all=False):
if all:
fig,ax=plt.subplots()
for t in range(1,self.n_time-1,5):
scaled,x=np.histogram(self.poprewstate[t,:])
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t)
plt.legend()
plt.show()
else:
if sum:
scaled,x=np.histogram(np.sum(self.poprewstate,axis=0))
x2=0.5*(x[1:]+x[0:-1])
plt.plot(x2,scaled)
else:
fig,ax=plt.subplots()
for t in range(t,t+10,1):
scaled,x=np.histogram(self.poprewstate[t,:])
x2=0.5*(x[1:]+x[0:-1])
ax.plot(x2,scaled,label=t)
plt.legend()
plt.show()
def plot_unempdistribs_bu(self,unemp_distrib,max=2):
#fig,ax=plt.subplots()
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(-max_time,0,nn_time)
scaled,x2=np.histogram(unemp_distrib,x)
scaled=scaled/np.abs(np.sum(unemp_distrib))
fig,ax=plt.subplots()
#self.plot_vlines_unemp(0.6)
ax.set_xlabel(self.labels['unemp duration'])
ax.set_ylabel(self.labels['probability'])
#x3=np.flip(x[:-1])
#ax.plot(x3,scaled)
ax.plot(x[:-1],scaled)
#ax.set_yscale('log')
plt.xlim(-max,0)
plt.show()
def plot_compare_unempdistribs(self,unemp_distrib1,unemp_distrib2,max=4,
label2='none',label1='none',logy=True,diff=False,figname=None):
#fig,ax=plt.subplots()
max_time=50
nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(self.timestep,max_time,nn_time)
scaled1,x1=np.histogram(unemp_distrib1,x)
print('{} keskikesto {} v {} keskikesto {} v'.format(label1,np.mean(unemp_distrib1),label2,np.mean(unemp_distrib2)))
print('Skaalaamaton {} lkm {} v {} lkm {} v'.format(label1,len(unemp_distrib1),label2,len(unemp_distrib2)))
print('Skaalaamaton {} työtpäiviä yht {} v {} työtpäiviä yht {} v'.format(label1,np.sum(unemp_distrib1),label2,np.sum(unemp_distrib2)))
#scaled=scaled/np.sum(unemp_distrib)
scaled1=scaled1/np.sum(scaled1)
scaled2,x1=np.histogram(unemp_distrib2,x)
scaled2=scaled2/np.sum(scaled2)
fig,ax=plt.subplots()
if not diff:
self.plot_vlines_unemp(0.5)
ax.set_xlabel(self.labels['unemp duration'])
ax.set_ylabel(self.labels['osuus'])
if diff:
ax.plot(x[:-1],scaled1-scaled2,label=label1+'-'+label2)
else:
ax.plot(x[:-1],scaled2,label=label2)
ax.plot(x[:-1],scaled1,label=label1)
if logy and not diff:
ax.set_yscale('log')
if not diff:
plt.ylim(1e-4,1.0)
#ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.legend()
plt.xlim(0,max)
if figname is not None:
plt.savefig(figname+'comp_unempdistrib.eps', format='eps')
plt.show()
def plot_compare_virrat(self,virta1,virta2,min_time=25,max_time=65,label1='perus',label2='vaihtoehto',virta_label='työllisyys',ymin=None,ymax=None):
x=np.linspace(self.min_age,self.max_age,self.n_time)
demog2=self.empstats.get_demog()
scaled1=virta1*demog2/self.n_pop #/self.alive
scaled2=virta2*demog2/self.n_pop #/self.alive
fig,ax=plt.subplots()
plt.xlim(min_time,max_time)
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(virta_label+'virta')
ax.plot(x,scaled1,label=label1)
ax.plot(x,scaled2,label=label2)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if ymin is not None and ymax is not None:
plt.ylim(ymin,ymax)
plt.show()
def plot_outsider(self,printtaa=True):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,100*(self.empstate[:,11]+self.empstate[:,5]+self.empstate[:,7])/self.alive[:,0],label='työvoiman ulkopuolella, ei opiskelija, sis. vanh.vapaat')
emp_statsratio=100*self.empstats.outsider_stats()
ax.plot(x,emp_statsratio,label='havainto')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,100*np.sum(self.gempstate[:,11,3:5]+self.gempstate[:,5,3:5]+self.gempstate[:,7,3:5],1,keepdims=True)/np.sum(self.galive[:,3:5],1,keepdims=True),label='työvoiman ulkopuolella, naiset')
ax.plot(x,100*np.sum(self.gempstate[:,11,0:2]+self.gempstate[:,5,0:2]+self.gempstate[:,7,0:2],1,keepdims=True)/np.sum(self.galive[:,3:5],1,keepdims=True),label='työvoiman ulkopuolella, miehet')
emp_statsratio=100*self.empstats.outsider_stats(g=1)
ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
emp_statsratio=100*self.empstats.outsider_stats(g=2)
ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
if printtaa:
#print('yht',100*(self.empstate[:,11]+self.empstate[:,5]+self.empstate[:,6]+self.empstate[:,7])/self.alive[:,0])
nn=np.sum(self.galive[:,3:5],1,keepdims=True)
n=np.sum(100*(self.gempstate[:,5,3:5]+self.gempstate[:,6,3:5]+self.gempstate[:,7,3:5]),1,keepdims=True)/nn
mn=np.sum(self.galive[:,0:2],1,keepdims=True)
m=np.sum(100*(self.gempstate[:,5,0:2]+self.gempstate[:,6,0:2]+self.gempstate[:,7,0:2]),1,keepdims=True)/mn
#print('naiset vv',n[1::4,0])
#print('miehet vv',m[1::4,0])
def plot_pinkslip(self):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,100*self.infostats_pinkslip[:,0]/self.empstate[:,0],label='ansiosidonnaisella')
ax.plot(x,100*self.infostats_pinkslip[:,4]/self.empstate[:,4],label='putkessa')
ax.plot(x,100*self.infostats_pinkslip[:,13]/self.empstate[:,13],label='työmarkkinatuella')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel('Irtisanottujen osuus tilassa [%]')
ax.legend()
plt.show()
def plot_student(self):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x+self.timestep,100*self.empstate[:,12]/self.alive[:,0],label='opiskelija tai armeijassa')
emp_statsratio=100*self.empstats.student_stats()
ax.plot(x,emp_statsratio,label='havainto')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
def plot_kassanjasen(self):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x+self.timestep,100*self.infostats_kassanjasen[:]/self.alive[:,0],label='työttömyyskassan jäsenien osuus kaikista')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend()
plt.show()
mini=np.nanmin(100*self.infostats_kassanjasen[:]/self.alive[:,0])
maxi=np.nanmax(100*self.infostats_kassanjasen[:]/self.alive[:,0])
print('Kassanjäseniä min {} % max {} %'.format(mini,maxi))
def plot_group_student(self):
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='Opiskelijat+Armeija Miehet'
opiskelijat=np.sum(self.gempstate[:,12,0:3],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
else:
leg='Opiskelijat+Armeija Naiset'
opiskelijat=np.sum(self.gempstate[:,12,3:6],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
opiskelijat=np.reshape(opiskelijat,(self.galive.shape[0],1))
osuus=100*opiskelijat/alive
x=np.linspace(self.min_age,self.max_age,self.n_time)
ax.plot(x,osuus,label=leg)
emp_statsratio=100*self.empstats.student_stats(g=1)
ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
emp_statsratio=100*self.empstats.student_stats(g=2)
ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plot_group_disab(self):
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='TK Miehet'
opiskelijat=np.sum(self.gempstate[:,3,0:3],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
else:
leg='TK Naiset'
opiskelijat=np.sum(self.gempstate[:,3,3:6],axis=1)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
opiskelijat=np.reshape(opiskelijat,(self.galive.shape[0],1))
osuus=100*opiskelijat/alive
x=np.linspace(self.min_age,self.max_age,self.n_time)
ax.plot(x,osuus,label=leg)
emp_statsratio=100*self.empstats.disab_stat(g=1)
ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
emp_statsratio=100*self.empstats.disab_stat(g=2)
ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plot_taxes(self,figname=None):
valtionvero_ratio=100*self.infostats_valtionvero_distrib/np.reshape(np.sum(self.infostats_valtionvero_distrib,1),(-1,1))
kunnallisvero_ratio=100*self.infostats_kunnallisvero_distrib/np.reshape(np.sum(self.infostats_kunnallisvero_distrib,1),(-1,1))
vero_ratio=100*(self.infostats_kunnallisvero_distrib+self.infostats_valtionvero_distrib)/(np.reshape(np.sum(self.infostats_valtionvero_distrib,1),(-1,1))+np.reshape(np.sum(self.infostats_kunnallisvero_distrib,1),(-1,1)))
if figname is not None:
self.plot_states(vero_ratio,ylabel='Valtioneronmaksajien osuus tilassa [%]',stack=True,figname=figname+'_stack')
else:
self.plot_states(vero_ratio,ylabel='Valtioneronmaksajien osuus tilassa [%]',stack=True)
if figname is not None:
self.plot_states(valtionvero_ratio,ylabel='Veronmaksajien osuus tilassa [%]',stack=True,figname=figname+'_stack')
else:
self.plot_states(valtionvero_ratio,ylabel='Veronmaksajien osuus tilassa [%]',stack=True)
if figname is not None:
self.plot_states(kunnallisvero_ratio,ylabel='Kunnallisveron maksajien osuus tilassa [%]',stack=True,figname=figname+'_stack')
else:
self.plot_states(kunnallisvero_ratio,ylabel='Kunnallisveron maksajien osuus tilassa [%]',stack=True)
valtionvero_osuus,kunnallisvero_osuus,vero_osuus=self.comp_taxratios()
print('Valtionveron maksajien osuus\n{}'.format(self.v2_groupstates(valtionvero_osuus)))
print('Kunnallisveron maksajien osuus\n{}'.format(self.v2_groupstates(kunnallisvero_osuus)))
print('Veronmaksajien osuus\n{}'.format(self.v2_groupstates(vero_osuus)))
def group_taxes(self,ratios):
if len(ratios.shape)>1:
vv_osuus=np.zeros((ratios.shape[0],5))
vv_osuus[:,0]=ratios[:,0]+ratios[:,4]+ratios[:,5]+ratios[:,6]+\
ratios[:,7]+ratios[:,8]+ratios[:,9]+ratios[:,11]+\
ratios[:,12]+ratios[:,13]
vv_osuus[:,1]=ratios[:,1]+ratios[:,10]
vv_osuus[:,2]=ratios[:,2]+ratios[:,3]+ratios[:,8]+ratios[:,9]
vv_osuus[:,3]=ratios[:,1]+ratios[:,10]+ratios[:,8]+ratios[:,9]
else:
vv_osuus=np.zeros((4))
vv_osuus[0]=ratios[0]+ratios[4]+ratios[5]+ratios[6]+\
ratios[7]+ratios[8]+ratios[9]+ratios[11]+\
ratios[12]+ratios[13]
vv_osuus[1]=ratios[1]+ratios[10]
vv_osuus[2]=ratios[2]+ratios[3]+ratios[8]+ratios[9]
vv_osuus[3]=ratios[1]+ratios[10]+ratios[8]+ratios[9]
return vv_osuus
def comp_taxratios(self,grouped=False):
valtionvero_osuus=100*np.sum(self.infostats_valtionvero_distrib,0)/np.sum(self.infostats_valtionvero_distrib)
kunnallisvero_osuus=100*np.sum(self.infostats_kunnallisvero_distrib,0)/np.sum(self.infostats_kunnallisvero_distrib)
vero_osuus=100*(np.sum(self.infostats_kunnallisvero_distrib,0)+np.sum(self.infostats_valtionvero_distrib,0))/(np.sum(self.infostats_kunnallisvero_distrib)+np.sum(self.infostats_valtionvero_distrib))
if grouped:
vv_osuus=self.group_taxes(valtionvero_osuus)
kv_osuus=self.group_taxes(kunnallisvero_osuus)
v_osuus=self.group_taxes(vero_osuus)
else:
vv_osuus=valtionvero_osuus
kv_osuus=kunnallisvero_osuus
v_osuus=vero_osuus
return vv_osuus,kv_osuus,v_osuus
def comp_verokiila(self,include_retwork=True,debug=False):
'''
Computes the tax effect as in Lundberg 2017
However, this applies the formulas for averages
'''
if debug:
print('comp_verokiila')
demog2=self.empstats.get_demog()
scalex=demog2/self.n_pop
valtionvero_osuus=np.sum(self.infostats_valtionvero_distrib*scalex,0)
kunnallisvero_osuus=np.sum(self.infostats_kunnallisvero_distrib*scalex,0)
taxes_distrib=np.sum(self.infostats_taxes_distrib*scalex,0)
taxes=self.group_taxes(taxes_distrib)
q=self.comp_budget()
q2=self.comp_participants(scale=True,include_retwork=include_retwork)
#htv=q2['palkansaajia']
#muut_tulot=q['muut tulot']
# kulutuksen verotus
tC=0.24*max(0,q['tyotulosumma']-taxes[3])
# (työssäolevien verot + ta-maksut + kulutusvero)/(työtulosumma + ta-maksut)
kiila=(taxes[3]+q['ta_maksut']+tC)/(q['tyotulosumma']+q['ta_maksut'])
qq={}
qq['tI']=taxes[3]/q['tyotulosumma']
qq['tC']=tC/q['tyotulosumma']
qq['tP']=q['ta_maksut']/q['tyotulosumma']
if debug:
print('qq',qq,'kiila',kiila)
return kiila,qq
def comp_verokiila_kaikki_ansiot(self):
demog2=self.empstats.get_demog()
scalex=demog2/self.n_pop
valtionvero_osuus=np.sum(self.infostats_valtionvero_distrib*scalex,0)
kunnallisvero_osuus=np.sum(self.infostats_kunnallisvero_distrib*scalex,0)
taxes_distrib=np.sum(self.infostats_taxes_distrib*scalex,0)
taxes=self.group_taxes(taxes_distrib)
q=self.comp_budget()
q2=self.comp_participants(scale=True)
htv=q2['palkansaajia']
muut_tulot=q['muut tulot']
# kulutuksen verotus
tC=0.2*max(0,q['tyotulosumma']-taxes[3])
# (työssäolevien verot + ta-maksut + kulutusvero)/(työtulosumma + ta-maksut)
kiila=(taxes[0]+q['ta_maksut']+tC)/(q['tyotulosumma']+q['verotettava etuusmeno']+q['ta_maksut'])
qq={}
qq['tI']=taxes[0]/q['tyotulosumma']
qq['tC']=tC/q['tyotulosumma']
qq['tP']=q['ta_maksut']/q['tyotulosumma']
#print(qq)
return kiila,qq
def v2_states(self,x):
return 'Ansiosidonnaisella {:.2f}\nKokoaikatyössä {:.2f}\nVanhuuseläkeläiset {:.2f}\nTyökyvyttömyyseläkeläiset {:.2f}\n'.format(x[0],x[1],x[2],x[3])+\
'Putkessa {:.2f}\nÄitiysvapaalla {:.2f}\nIsyysvapaalla {:.2f}\nKotihoidontuella {:.2f}\n'.format(x[4],x[5],x[6],x[7])+\
'VE+OA {:.2f}\nVE+kokoaika {:.2f}\nOsa-aikatyö {:.2f}\nTyövoiman ulkopuolella {:.2f}\n'.format(x[8],x[9],x[10],x[11])+\
'Opiskelija/Armeija {:.2f}\nTM-tuki {:.2f}\n'.format(x[12],x[13])
def v2_groupstates(self,xx):
x=self.group_taxes(xx)
return 'Etuudella olevat {:.2f}\nTyössä {:.2f}\nEläkkeellä {:.2f}\n'.format(x[0],x[1],x[2])
def plot_emp(self,figname=None):
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(self.empstate,self.alive,unempratio=False)
age_label=self.labels['age']
ratio_label=self.labels['osuus']
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,tyollisyysaste,label=self.labels['malli'])
#ax.plot(x,tyottomyysaste,label=self.labels['tyottomien osuus'])
emp_statsratio=100*self.empstats.emp_stats()
ax.plot(x,emp_statsratio,ls='--',label=self.labels['havainto'])
ax.set_xlabel(age_label)
ax.set_ylabel(self.labels['tyollisyysaste %'])
ax.legend()
if figname is not None:
plt.savefig(figname+'tyollisyysaste.eps', format='eps')
plt.show()
#if self.version in set([1,2,3]):
fig,ax=plt.subplots()
ax.stackplot(x,osatyoaste,100-osatyoaste,
labels=('osatyössä','kokoaikaisessa työssä')) #, colors=pal) pal=sns.color_palette("hls", self.n_employment) # hls, husl, cubehelix
ax.legend()
plt.show()
empstate_ratio=100*self.empstate/self.alive
if figname is not None:
self.plot_states(empstate_ratio,ylabel=ratio_label,stack=True,figname=figname+'_stack')
else:
self.plot_states(empstate_ratio,ylabel=ratio_label,stack=True)
if self.version in set([1,2,3,4]):
self.plot_states(empstate_ratio,ylabel=ratio_label,ylimit=20,stack=False)
self.plot_states(empstate_ratio,ylabel=ratio_label,parent=True,stack=False)
self.plot_parents_in_work()
self.plot_states(empstate_ratio,ylabel=ratio_label,unemp=True,stack=False)
if figname is not None:
self.plot_states(empstate_ratio,ylabel=ratio_label,start_from=60,stack=True,figname=figname+'_stack60')
else:
self.plot_states(empstate_ratio,ylabel=ratio_label,start_from=60,stack=True)
def plot_savings(self):
savings_0=np.zeros(self.n_time)
savings_1=np.zeros(self.n_time)
savings_2=np.zeros(self.n_time)
act_savings_0=np.zeros(self.n_time)
act_savings_1=np.zeros(self.n_time)
act_savings_2=np.zeros(self.n_time)
for t in range(self.n_time):
state_0=np.argwhere(self.popempstate[t,:]==0)
savings_0[t]=np.mean(self.infostats_savings[t,state_0[:]])
act_savings_0[t]=np.mean(self.sav_actions[t,state_0[:]])
state_1=np.argwhere(self.popempstate[t,:]==1)
savings_1[t]=np.mean(self.infostats_savings[t,state_1[:]])
act_savings_1[t]=np.mean(self.sav_actions[t,state_1[:]])
state_2=np.argwhere(self.popempstate[t,:]==2)
savings_2[t]=np.mean(self.infostats_savings[t,state_2[:]])
act_savings_2[t]=np.mean(self.sav_actions[t,state_2[:]])
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.infostats_savings,axis=1)
ax.plot(x,savings,label='savings all')
ax.legend()
plt.title('Savings all')
plt.show()
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.infostats_savings,axis=1)
ax.plot(x,savings_0,label='unemp')
ax.plot(x,savings_1,label='emp')
ax.plot(x,savings_2,label='retired')
plt.title('Savings by emp state')
ax.legend()
plt.show()
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.sav_actions-20,axis=1)
ax.plot(x[1:],savings[1:],label='savings action')
ax.legend()
plt.title('Saving action')
plt.show()
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
savings=np.mean(self.infostats_savings,axis=1)
ax.plot(x[1:],act_savings_0[1:]-20,label='unemp')
ax.plot(x[1:],act_savings_1[1:]-20,label='emp')
ax.plot(x[1:],act_savings_2[1:]-20,label='retired')
plt.title('Saving action by emp state')
ax.legend()
plt.show()
def plot_emp_by_gender(self,figname=None):
x=np.linspace(self.min_age,self.max_age,self.n_time)
for gender in range(2):
if gender<1:
empstate_ratio=100*np.sum(self.gempstate[:,:,0:3],axis=2)/(np.sum(self.galive[:,0:3],axis=1)[:,None])
genderlabel='miehet'
else:
empstate_ratio=100*np.sum(self.gempstate[:,:,3:6],axis=2)/(np.sum(self.galive[:,3:6],axis=1)[:,None])
genderlabel='naiset'
if figname is not None:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),stack=True,figname=figname+'_stack')
else:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),stack=True)
if self.version in set([1,2,3,4]):
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),ylimit=20,stack=False)
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),parent=True,stack=False)
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),unemp=True,stack=False)
if figname is not None:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),start_from=60,stack=True,figname=figname+'_stack60')
else:
self.plot_states(empstate_ratio,ylabel=self.labels['osuus tilassa x'].format(genderlabel),start_from=60,stack=True)
def plot_parents_in_work(self):
empstate_ratio=100*self.empstate/self.alive
ml=100*self.infostats_mother_in_workforce/self.alive
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.plot(x,ml,label='äitiysvapaa')
ax.plot(x,empstate_ratio[:,6],label='isyysvapaa')
ax.legend()
plt.show()
def plot_spouse(self,figname=None):
x=np.linspace(self.min_age,self.max_age,self.n_time)
fig,ax=plt.subplots()
ax.set_xlabel(self.labels['age'])
spouseratio=self.infostats_puoliso/self.alive[:,0]
ax.set_ylabel('spouses')
ax.plot(x,spouseratio)
if figname is not None:
plt.savefig(figname+'spouses.eps', format='eps')
plt.show()
def plot_unemp(self,unempratio=True,figname=None,grayscale=False):
'''
Plottaa työttömyysaste (unempratio=True) tai työttömien osuus väestöstö (False)
'''
x=np.linspace(self.min_age,self.max_age,self.n_time)
if unempratio:
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(self.empstate,self.alive,unempratio=True)
unempratio_stat=100*self.empstats.unempratio_stats()
if self.language=='Finnish':
labeli='keskimääräinen työttömyysaste '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomyysaste']
labeli2='työttömyysaste'
else:
labeli='average unemployment rate '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomyysaste']
labeli2='Unemployment rate'
else:
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(self.empstate,self.alive,unempratio=False)
unempratio_stat=100*self.empstats.unemp_stats()
if self.language=='Finnish':
labeli='keskimääräinen työttömien osuus väestöstö '+str(ka_tyottomyysaste)
ylabeli='Työttömien osuus väestöstö [%]'
labeli2='työttömien osuus väestöstö'
else:
labeli='proportion of unemployed'+str(ka_tyottomyysaste)
ylabeli='Proportion of unemployed [%]'
labeli2='proportion of unemployed'
fig,ax=plt.subplots()
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
ax.plot(x,tyottomyysaste,label=self.labels['malli'])
ax.plot(x,unempratio_stat,ls='--',label=self.labels['havainto'])
ax.legend()
if figname is not None:
plt.savefig(figname+'tyottomyysaste.eps', format='eps')
plt.show()
fig,ax=plt.subplots()
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
ax.plot(x,unempratio_stat,label=self.labels['havainto'])
ax.legend()
if grayscale:
pal=sns.light_palette("black", 8, reverse=True)
else:
pal=sns.color_palette("hls", self.n_employment) # hls, husl, cubehelix
ax.stackplot(x,tyottomyysaste,colors=pal) #,label=self.labels['malli'])
#ax.plot(x,tyottomyysaste)
plt.show()
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='Miehet'
gempstate=np.sum(self.gempstate[:,:,0:3],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
color='darkgray'
else:
gempstate=np.sum(self.gempstate[:,:,3:6],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
leg='Naiset'
color='black'
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(gempstate,alive,unempratio=unempratio)
ax.plot(x,tyottomyysaste,color=color,label='{} {}'.format(labeli2,leg))
if grayscale:
lstyle='--'
else:
lstyle='--'
if self.version in set([1,2,3,4]):
if unempratio:
ax.plot(x,100*self.empstats.unempratio_stats(g=1),ls=lstyle,label=self.labels['havainto, naiset'])
ax.plot(x,100*self.empstats.unempratio_stats(g=2),ls=lstyle,label=self.labels['havainto, miehet'])
labeli='keskimääräinen työttömyysaste '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomyysaste']
else:
ax.plot(x,100*self.empstats.unemp_stats(g=1),ls=lstyle,label=self.labels['havainto, naiset'])
ax.plot(x,100*self.empstats.unemp_stats(g=2),ls=lstyle,label=self.labels['havainto, miehet'])
labeli='keskimääräinen työttömien osuus väestöstö '+str(ka_tyottomyysaste)
ylabeli=self.labels['tyottomien osuus']
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
if False:
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.legend()
if figname is not None:
plt.savefig(figname+'tyottomyysaste_spk.eps', format='eps')
plt.show()
def plot_parttime_ratio(self,grayscale=True,figname=None):
'''
Plottaa osatyötä tekevien osuus väestöstö
'''
x=np.linspace(self.min_age,self.max_age,self.n_time)
labeli2='Osatyötä tekevien osuus'
fig,ax=plt.subplots()
for gender in range(2):
if gender==0:
leg='Miehet'
g='men'
pstyle='-'
else:
g='women'
leg='Naiset'
pstyle=''
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_gempratios(gender=g,unempratio=False)
ax.plot(x,osatyoaste,'{}'.format(pstyle),label='{} {}'.format(labeli2,leg))
o_x=np.array([20,30,40,50,60,70])
f_osatyo=np.array([55,21,16,12,18,71])
m_osatyo=np.array([32,8,5,4,9,65])
if grayscale:
ax.plot(o_x,f_osatyo,ls='--',label=self.labels['havainto, naiset'])
ax.plot(o_x,m_osatyo,ls='--',label=self.labels['havainto, miehet'])
else:
ax.plot(o_x,f_osatyo,label=self.labels['havainto, naiset'])
ax.plot(o_x,m_osatyo,label=self.labels['havainto, miehet'])
labeli='osatyöaste '#+str(ka_tyottomyysaste)
ylabeli='Osatyön osuus työnteosta [%]'
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabeli)
if False:
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.legend()
if figname is not None:
plt.savefig(figname+'osatyoaste_spk.eps', format='eps')
plt.show()
def plot_unemp_shares(self):
empstate_ratio=100*self.empstate/self.alive
self.plot_states(empstate_ratio,ylabel='Osuus tilassa [%]',onlyunemp=True,stack=True)
def plot_group_emp(self,grayscale=False,figname=None):
fig,ax=plt.subplots()
if grayscale:
lstyle='--'
else:
lstyle='--'
for gender in range(2):
if gender==0:
leg='Miehet'
gempstate=np.sum(self.gempstate[:,:,0:3],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,0:3],1)
color='darkgray'
else:
gempstate=np.sum(self.gempstate[:,:,3:6],axis=2)
alive=np.zeros((self.galive.shape[0],1))
alive[:,0]=np.sum(self.galive[:,3:6],1)
leg='Naiset'
color='black'
tyollisyysaste,osatyoaste,tyottomyysaste,ka_tyottomyysaste=self.comp_empratios(gempstate,alive)
x=np.linspace(self.min_age,self.max_age,self.n_time)
ax.plot(x,tyollisyysaste,color=color,label='työllisyysaste {}'.format(leg))
#ax.plot(x,tyottomyysaste,label='työttömyys {}'.format(leg))
emp_statsratio=100*self.empstats.emp_stats(g=2)
ax.plot(x,emp_statsratio,ls=lstyle,color='darkgray',label=self.labels['havainto, miehet'])
emp_statsratio=100*self.empstats.emp_stats(g=1)
ax.plot(x,emp_statsratio,ls=lstyle,color='black',label=self.labels['havainto, naiset'])
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(self.labels['ratio'])
if False:
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.legend()
if figname is not None:
plt.savefig(figname+'tyollisyysaste_spk.eps', format='eps')
plt.show()
def plot_pensions(self):
if self.version in set([1,2,3,4]):
self.plot_ratiostates(self.stat_pension,ylabel='Tuleva eläke [e/v]',stack=False)
def plot_career(self):
if self.version in set([1,2,3,4]):
self.plot_ratiostates(self.stat_tyoura,ylabel='Työuran pituus [v]',stack=False)
def plot_ratiostates(self,statistic,ylabel='',ylimit=None, show_legend=True, parent=False,\
unemp=False,start_from=None,stack=False,no_ve=False,figname=None,emp=False,oa_unemp=False):
self.plot_states(statistic/self.empstate,ylabel=ylabel,ylimit=ylimit,no_ve=no_ve,\
show_legend=show_legend,parent=parent,unemp=unemp,start_from=start_from,\
stack=stack,figname=figname,emp=emp,oa_unemp=oa_unemp)
def count_putki(self,emps=None):
if emps is None:
piped=np.reshape(self.empstate[:,4],(self.empstate[:,4].shape[0],1))
demog2=self.empstats.get_demog()
putkessa=self.timestep*np.nansum(piped[1:]/self.alive[1:]*demog2[1:])
return putkessa
else:
piped=np.reshape(emps[:,4],(emps[:,4].shape[0],1))
demog2=self.empstats.get_demog()
alive=np.sum(emps,axis=1,keepdims=True)
putkessa=self.timestep*np.nansum(piped[1:]/alive[1:]*demog2[1:])
return putkessa
def plot_states(self,statistic,ylabel='',ylimit=None,show_legend=True,parent=False,unemp=False,no_ve=False,
start_from=None,stack=True,figname=None,yminlim=None,ymaxlim=None,
onlyunemp=False,reverse=False,grayscale=False,emp=False,oa_unemp=False):
if start_from is None:
x=np.linspace(self.min_age,self.max_age,self.n_time)
else:
x_n = self.max_age-60+1
x_t = int(np.round((x_n-1)*self.inv_timestep))+2
x=np.linspace(start_from,self.max_age,x_t)
#x=np.linspace(start_from,self.max_age,self.n_time)
statistic=statistic[self.map_age(start_from):]
ura_emp=statistic[:,1]
ura_ret=statistic[:,2]
ura_unemp=statistic[:,0]
if self.version in set([1,2,3,4]):
ura_disab=statistic[:,3]
ura_pipe=statistic[:,4]
ura_mother=statistic[:,5]
ura_dad=statistic[:,6]
ura_kht=statistic[:,7]
ura_vetyo=statistic[:,9]
ura_veosatyo=statistic[:,8]
ura_osatyo=statistic[:,10]
ura_outsider=statistic[:,11]
ura_student=statistic[:,12]
ura_tyomarkkinatuki=statistic[:,13]
ura_army=statistic[:,14]
else:
ura_osatyo=0 #statistic[:,3]
if no_ve:
ura_ret[-2:-1]=None
fig,ax=plt.subplots()
if stack:
if grayscale:
pal=sns.light_palette("black", 8, reverse=True)
else:
pal=sns.color_palette("hls", self.n_employment) # hls, husl, cubehelix
reverse=True
if parent:
if self.version in set([1,2,3,4]):
ax.stackplot(x,ura_mother,ura_dad,ura_kht,
labels=('äitiysvapaa','isyysvapaa','khtuki'), colors=pal)
elif unemp:
if self.version in set([1,2,3,4]):
ax.stackplot(x,ura_unemp,ura_pipe,ura_student,ura_outsider,ura_tyomarkkinatuki,
labels=('tyött','putki','opiskelija','ulkona','tm-tuki'), colors=pal)
else:
ax.stackplot(x,ura_unemp,labels=('tyött'), colors=pal)
elif onlyunemp:
if self.version in set([1,2,3,4]):
#urasum=np.nansum(statistic[:,[0,4,11,13]],axis=1)/100
urasum=np.nansum(statistic[:,[0,4,13]],axis=1)/100
osuus=(1.0-np.array([0.84,0.68,0.62,0.58,0.57,0.55,0.53,0.50,0.29]))*100
xx=np.array([22.5,27.5,32.5,37.5,42.5,47.5,52.5,57.5,62.5])
#ax.stackplot(x,ura_unemp/urasum,ura_pipe/urasum,ura_outsider/urasum,ura_tyomarkkinatuki/urasum,
# labels=('ansiosidonnainen','lisäpäivät','työvoiman ulkopuolella','tm-tuki'), colors=pal)
ax.stackplot(x,ura_unemp/urasum,ura_pipe/urasum,ura_tyomarkkinatuki/urasum,
labels=('ansiosidonnainen','lisäpäivät','tm-tuki'), colors=pal)
ax.plot(xx,osuus,color='k')
else:
ax.stackplot(x,ura_unemp,labels=('tyött'), colors=pal)
else:
if self.version in set([1,2,3,4]):
#ax.stackplot(x,ura_emp,ura_osatyo,ura_vetyo,ura_veosatyo,ura_unemp,ura_tyomarkkinatuki,ura_pipe,ura_disab,ura_mother,ura_dad,ura_kht,ura_ret,ura_student,ura_outsider,ura_army,
# labels=('työssä','osatyö','ve+työ','ve+osatyö','työtön','tm-tuki','työttömyysputki','tk-eläke','äitiysvapaa','isyysvapaa','kh-tuki','vanhuuseläke','opiskelija','työvoiman ulkop.','armeijassa'),
# colors=pal)
ax.stackplot(x,ura_emp,ura_osatyo,ura_vetyo,ura_veosatyo,ura_unemp,ura_tyomarkkinatuki,ura_pipe,ura_ret,ura_disab,ura_mother,ura_dad,ura_kht,ura_student,ura_outsider,ura_army,
labels=('työssä','osatyö','ve+työ','ve+osatyö','työtön','tm-tuki','työttömyysputki','vanhuuseläke','tk-eläke','äitiysvapaa','isyysvapaa','kh-tuki','opiskelija','työvoiman ulkop.','armeijassa'),
colors=pal)
else:
#ax.stackplot(x,ura_emp,ura_osatyo,ura_unemp,ura_ret,
# labels=('työssä','osa-aikatyö','työtön','vanhuuseläke'), colors=pal)
ax.stackplot(x,ura_emp,ura_unemp,ura_ret,
labels=('työssä','työtön','vanhuuseläke'), colors=pal)
if start_from is None:
ax.set_xlim(self.min_age,self.max_age)
else:
ax.set_xlim(60,self.max_age)
if ymaxlim is None:
ax.set_ylim(0, 100)
else:
ax.set_ylim(yminlim,ymaxlim)
else:
if parent:
if self.version in set([1,2,3,4]):
ax.plot(x,ura_mother,label='äitiysvapaa')
ax.plot(x,ura_dad,label='isyysvapaa')
ax.plot(x,ura_kht,label='khtuki')
elif unemp:
ax.plot(x,ura_unemp,label='tyött')
if self.version in set([1,2,3,4]):
ax.plot(x,ura_tyomarkkinatuki,label='tm-tuki')
ax.plot(x,ura_student,label='student')
ax.plot(x,ura_outsider,label='outsider')
ax.plot(x,ura_pipe,label='putki')
elif oa_unemp:
ax.plot(x,ura_unemp,label='tyött')
if self.version in set([1,2,3,4]):
ax.plot(x,ura_tyomarkkinatuki,label='tm-tuki')
ax.plot(x,ura_student,label='student')
ax.plot(x,ura_outsider,label='outsider')
ax.plot(x,ura_pipe,label='putki')
ax.plot(x,ura_osatyo,label='osa-aika')
elif emp:
ax.plot(x,ura_emp,label='työssä')
#if self.version in set([1,2,3,4]):
ax.plot(x,ura_osatyo,label='osatyö')
else:
ax.plot(x,ura_unemp,label='tyött')
ax.plot(x,ura_ret,label='eläke')
ax.plot(x,ura_emp,label='työ')
if self.version in set([1,2,3,4]):
ax.plot(x,ura_osatyo,label='osatyö')
ax.plot(x,ura_disab,label='tk')
ax.plot(x,ura_pipe,label='putki')
ax.plot(x,ura_tyomarkkinatuki,label='tm-tuki')
ax.plot(x,ura_mother,label='äitiysvapaa')
ax.plot(x,ura_dad,label='isyysvapaa')
ax.plot(x,ura_kht,label='khtuki')
ax.plot(x,ura_vetyo,label='ve+työ')
ax.plot(x,ura_veosatyo,label='ve+osatyö')
ax.plot(x,ura_student,label='student')
ax.plot(x,ura_outsider,label='outsider')
ax.plot(x,ura_army,label='armeijassa')
ax.set_xlabel(self.labels['age'])
ax.set_ylabel(ylabel)
if show_legend:
if not reverse:
lgd=ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
handles, labels = ax.get_legend_handles_labels()
lgd=ax.legend(handles[::-1], labels[::-1], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if ylimit is not None:
ax.set_ylim([0,ylimit])
#fig.tight_layout()
if figname is not None:
if show_legend:
plt.savefig(figname,bbox_inches='tight',bbox_extra_artists=(lgd,), format='eps')
else:
plt.savefig(figname,bbox_inches='tight', format='eps')
plt.show()
def plot_toe(self):
if self.version in set([1,2,3,4]):
self.plot_ratiostates(self.stat_toe,'työssäolo-ehdon pituus 28 kk aikana [v]',stack=False)
def plot_sal(self):
self.plot_ratiostates(self.salaries_emp,'Keskipalkka [e/v]',stack=False)
def plot_moved(self):
siirtyneet_ratio=self.siirtyneet/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
pysyneet_ratio=self.pysyneet/self.alive*100
self.plot_states(pysyneet_ratio,ylabel='Pysyneet tilassa',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(pysyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,1]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet työhön tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,4]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet putkeen tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,0]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet työttömäksi tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,13]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet tm-tuelle tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
siirtyneet_ratio=self.siirtyneet_det[:,:,10]/self.alive*100
self.plot_states(siirtyneet_ratio,ylabel='Siirtyneet osa-aikatyöhön tilasta',stack=True,
yminlim=0,ymaxlim=min(100,1.1*np.nanmax(np.cumsum(siirtyneet_ratio,1))))
# def plot_army(self):
# x=np.linspace(self.min_age,self.max_age,self.n_time)
# fig,ax=plt.subplots()
# ax.plot(x,100*self.empstate[:,14]/self.alive[:,0],label='armeijassa ja siviilipalveluksessa olevat')
# emp_statsratio=100*self.army_stats()
# ax.plot(x,emp_statsratio,label='havainto')
# ax.set_xlabel(self.labels['age'])
# ax.set_ylabel(self.labels['ratio'])
# ax.legend()
# plt.show()
#
# def plot_group_army(self):
# fig,ax=plt.subplots()
# for gender in range(2):
# if gender==0:
# leg='Armeija Miehet'
# opiskelijat=np.sum(self.gempstate[:,14,0:3],axis=1)
# alive=np.zeros((self.galive.shape[0],1))
# alive[:,0]=np.sum(self.galive[:,0:3],1)
# else:
# leg='Armeija Naiset'
# opiskelijat=np.sum(self.gempstate[:,14,3:6],axis=1)
# alive=np.zeros((self.galive.shape[0],1))
# alive[:,0]=np.sum(self.galive[:,3:6],1)
#
# opiskelijat=np.reshape(opiskelijat,(self.galive.shape[0],1))
# osuus=100*opiskelijat/alive
# x=np.linspace(self.min_age,self.max_age,self.n_time)
# ax.plot(x,osuus,label=leg)
#
# emp_statsratio=100*self.army_stats(g=1)
# ax.plot(x,emp_statsratio,label=self.labels['havainto, naiset'])
# emp_statsratio=100*self.army_stats(g=2)
# ax.plot(x,emp_statsratio,label=self.labels['havainto, miehet'])
# ax.set_xlabel(self.labels['age'])
# ax.set_ylabel(self.labels['ratio'])
# ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.show()
#
def plot_ave_stay(self):
self.plot_ratiostates(self.time_in_state,ylabel='Ka kesto tilassa',stack=False)
fig,ax=plt.subplots()
x=np.linspace(self.min_age,self.max_age,self.n_time)
plt.plot(x,self.time_in_state[:,1]/self.empstate[:,1])
ax.set_xlabel('Aika')
ax.set_ylabel('Ka kesto työssä')
plt.show()
fig,ax=plt.subplots()
ax.set_xlabel('Aika')
ax.set_ylabel('ka kesto työttömänä')
plt.plot(x,self.time_in_state[:,0]/self.empstate[:,0])
plt.show()
def plot_ove(self):
self.plot_ratiostates(self.infostats_ove,ylabel='Ove',stack=False)
def plot_reward(self):
self.plot_ratiostates(self.rewstate,ylabel='Keskireward tilassa',stack=False)
self.plot_ratiostates(self.rewstate,ylabel='Keskireward tilassa',stack=False,no_ve=True)
self.plot_ratiostates(self.rewstate,ylabel='Keskireward tilassa',stack=False,oa_unemp=True)
x=np.linspace(self.min_age,self.max_age,self.n_time)
total_reward=np.sum(self.rewstate,axis=1)
fig,ax=plt.subplots()
ax.plot(x,total_reward)
ax.set_xlabel('Aika')
ax.set_ylabel('Koko reward tilassa')
ax.legend()
plt.show()
def vector_to_array(self,x):
return x[:,None]
def comp_scaled_consumption(self,x0,averaged=False,t0=1):
'''
Computes discounted actual reward at each time point
with a given scaling x
averaged determines whether the value is averaged over time or not
'''
x=x0[0]
u=np.zeros(self.n_time)
for k in range(self.n_pop):
#g=self.infostats_group[k]
for t in range(1,self.n_time-1):
age=t+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v,_=self.env.log_utility((1+x)*income,employment_state,age)
if np.isnan(v):
print('NaN',v,income,employment_state,age)
v=0
u[t]+=v
#print(age,v)
t=self.n_time-1
age=t+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v0,_=self.env.log_utility(income,employment_state,age)
factor=self.poprewstate[t,k]/v0 # life expectancy
v,_=self.env.log_utility((1+x)*income,employment_state,age)
if np.isnan(v):
print('NaN',v,income,employment_state,age)
if np.isnan(factor):
print('NaN',factor,v0)
#print(age,v*factor,factor)
u[t]+=v*factor
if np.isnan(u[t]):
print('NaN',age,v,v*factor,factor,u[t],income,employment_state)
u=u/self.n_pop
w=np.zeros(self.n_time)
w[-1]=u[-1]
for t in range(self.n_time-2,0,-1):
w[t]=u[t]+self.gamma*w[t+1]
if averaged:
ret=np.mean(w[t0:])
else:
ret=w[t0]
if np.isnan(ret):
print(u,w)
u=np.zeros(self.n_time)
for k in range(self.n_pop):
#g=self.infostats_group[k]
for t in range(1,self.n_time-1):
age=t+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v,_=self.env.log_utility((1+x)*income,employment_state,age) #,g=g,pinkslip=pinkslip)
#print(t,k,v,income)
u[t]+=v
t=self.n_time-1
age=t-1+self.min_age
income=self.infostats_poptulot_netto[t,k]
employment_state=self.popempstate[t,k]
v0,_=self.env.log_utility(income,employment_state,age) #,g=g,pinkslip=pinkslip)
factor=self.poprewstate[t,k]/v0 # life expectancy
v,_=self.env.log_utility((1+x)*income,employment_state,age) #,g=g,pinkslip=pinkslip)
#print(t,k,v,income)
u[t]+=v*factor
#print(x,ret)
return ret
def comp_presentvalue(self):
'''
Computes discounted actual reward at each time point
with a given scaling x
averaged determines whether the value is averaged over time or not
'''
u=np.zeros((self.n_time,self.n_pop))
u[self.n_time-1,:]=self.poprewstate[self.n_time-1,:]
for t in range(self.n_time-2,-1,-1):
u[t,:]=self.poprewstate[t,:]+self.gamma*u[t+1,:]
return u
def comp_realoptimrew(self,discountfactor=None):
'''
Computes discounted actual reward at each time point
'''
if discountfactor is None:
discountfactor=self.gamma
realrew=np.zeros(self.n_time)
for k in range(self.n_pop):
prew=np.zeros(self.n_time)
prew[-1]=self.poprewstate[-1,k]
for t in range(self.n_time-2,0,-1):
prew[t]=discountfactor*prew[t+1]+self.poprewstate[t,k]
realrew+=prew
realrew/=self.n_pop
realrew=np.mean(realrew[1:])
return realrew
def get_reward(self,discounted=False):
return self.comp_total_reward(output=False,discounted=discounted) #np.sum(self.rewstate)/self.n_pop
def comp_total_reward(self,output=False,discounted=False,discountfactor=None):
if not discounted:
total_reward=np.sum(self.rewstate)
rr=total_reward/self.n_pop
disco='undiscounted'
else:
#discount=discountfactor**np.arange(0,self.n_time*self.timestep,self.timestep)[:,None]
#total_reward=np.sum(self.poprewstate*discount)
rr=self.comp_realoptimrew(discountfactor) #total_reward/self.n_pop
disco='discounted'
#print('total rew1 {} rew2 {}'.format(total_reward,np.sum(self.poprewstate)))
#print('ave rew1 {} rew2 {}'.format(rr,np.mean(np.sum(self.poprewstate,axis=0))))
#print('shape rew2 {} pop {} alive {}'.format(self.poprewstate.shape,self.n_pop,self.alive[0]))
if output:
print(f'Ave {disco} reward {rr}')
return rr
def comp_total_netincome(self,output=True):
rr=np.sum(self.infostats_tulot_netto)/self.n_pop/(self.n_time+21.0) # 21 approximates the time in pension
eq=np.sum(self.infostats_equivalent_income)/self.n_pop/(self.n_time+21.0) # 21 approximates the time in pension
if output:
print('Ave net income {} Ave equivalent net income {}'.format(rr,eq))
return rr,eq
def plot_wage_reduction(self):
self.plot_ratiostates(self.stat_wage_reduction,ylabel='wage-reduction tilassa',stack=False)
self.plot_ratiostates(self.stat_wage_reduction,ylabel='wage-reduction tilassa',stack=False,unemp=True)
self.plot_ratiostates(self.stat_wage_reduction,ylabel='wage-reduction tilassa',stack=False,emp=True)
#self.plot_ratiostates(np.log(1.0+self.stat_wage_reduction),ylabel='log 5wage-reduction tilassa',stack=False)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,0:3],axis=2),ylabel='wage-reduction tilassa naiset',stack=False)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,3:6],axis=2),ylabel='wage-reduction tilassa miehet',stack=False)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,0:3],axis=2),ylabel='wage-reduction tilassa, naiset',stack=False,unemp=True)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,3:6],axis=2),ylabel='wage-reduction tilassa, miehet',stack=False,unemp=True)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,0:3],axis=2),ylabel='wage-reduction tilassa, naiset',stack=False,emp=True)
self.plot_ratiostates(np.sum(self.stat_wage_reduction_g[:,:,3:6],axis=2),ylabel='wage-reduction tilassa, miehet',stack=False,emp=True)
def plot_distrib(self,label='',plot_emp=False,plot_bu=False,ansiosid=False,tmtuki=False,putki=False,outsider=False,max_age=500,laaja=False,max=4,figname=None):
unemp_distrib,emp_distrib,unemp_distrib_bu=self.comp_empdistribs(ansiosid=ansiosid,tmtuki=tmtuki,putki=putki,outsider=outsider,max_age=max_age,laaja=laaja)
tyoll_distrib,tyoll_distrib_bu=self.comp_tyollistymisdistribs(ansiosid=ansiosid,tmtuki=tmtuki,putki=putki,outsider=outsider,max_age=max_age,laaja=laaja)
if plot_emp:
self.plot_empdistribs(emp_distrib)
if plot_bu:
self.plot_unempdistribs_bu(unemp_distrib_bu)
else:
self.plot_unempdistribs(unemp_distrib,figname=figname)
#self.plot_tyolldistribs(unemp_distrib,tyoll_distrib,tyollistyneet=False)
if plot_bu:
self.plot_tyolldistribs_both_bu(unemp_distrib_bu,tyoll_distrib_bu,max=max)
else:
self.plot_tyolldistribs_both(unemp_distrib,tyoll_distrib,max=max,figname=figname)
def plot_irr(self,figname=''):
self.comp_aggirr()
self.comp_irr()
self.plot_irrdistrib(self.infostats_irr,figname=figname)
def plot_irrdistrib(self,irr_distrib,grayscale=True,figname=''):
if grayscale:
plt.style.use('grayscale')
plt.rcParams['figure.facecolor'] = 'white' # Or any suitable colour...
print('Nans {} out of {}'.format(np.sum(np.isnan(irr_distrib)),irr_distrib.shape[0]))
fig,ax=plt.subplots()
ax.set_xlabel('Sisäinen tuottoaste [%]')
lbl=ax.set_ylabel('Taajuus')
#ax.set_yscale('log')
#max_time=50
#nn_time = int(np.round((max_time)*self.inv_timestep))+1
x=np.linspace(-5,5,100)
scaled,x2=np.histogram(irr_distrib,x)
#scaled=scaled/np.nansum(np.abs(irr_distrib))
ax.bar(x2[1:-1],scaled[1:],align='center')
if figname is not None:
plt.savefig(figname+'irrdistrib.eps', bbox_inches='tight', format='eps')
plt.show()
fig,ax=plt.subplots()
ax.hist(irr_distrib,bins=40)
plt.show()
print('Keskimääräinen irr {:.3f} %'.format(np.nanmean(irr_distrib)))
print('Mediaani irr {:.3f} %'.format(np.nanmedian(irr_distrib)))
count = (irr_distrib < 0).sum(axis=0)
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus irr<0 {} %:lla'.format(100*percent))
count = (irr_distrib <=-50).sum(axis=0)
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus irr<-50 {} %:lla'.format(100*percent))
count = (np.sum(self.infostats_paid_tyel_pension,axis=0)<0.1).sum()
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus eläke ei maksussa {} %:lla'.format(100*percent))
count1 = np.sum(self.popempstate[0:self.map_age(63),:]==15)
count = (np.sum(self.infostats_paid_tyel_pension,axis=0)<0.1).sum()-count1
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus eläke ei maksussa, ei kuollut {} %:lla'.format(100*percent))
count = np.sum(self.popempstate==15)
percent = np.true_divide(count,irr_distrib.shape[0])
print('Osuus kuolleet {} %:lla'.format(100*percent))
def get_initial_reward(self,startage=None):
real=self.comp_presentvalue()
if startage is None:
startage=self.min_age
age=max(1,startage-self.min_age)
realage=max(self.min_age+1,startage)
print('Initial discounted reward at age {}: {}'.format(realage,np.mean(real[age,:])))
return np.mean(real[age,:])
def plot_stats(self,grayscale=False,figname=None):
if grayscale:
plt.style.use('grayscale')
plt.rcParams['figure.facecolor'] = 'white' # Or any suitable colour...
self.comp_total_reward()
self.comp_total_reward(discounted=True)
self.comp_total_netincome()
#self.plot_rewdist()
#self.plot_emp(figname=figname)
if self.version in set([1,2,3,4]):
q=self.comp_budget(scale=True)
q_stat=self.stat_budget()
df1 = pd.DataFrame.from_dict(q,orient='index',columns=['e/v'])
df2 = pd.DataFrame.from_dict(q_stat,orient='index',columns=['toteuma'])
df=df1.copy()
df['toteuma']=df2['toteuma']
df['ero']=df1['e/v']-df2['toteuma']
print('Rahavirrat skaalattuna väestötasolle')
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.2f"))
q=self.comp_participants(scale=True)
q_stat=self.stat_participants_2018()
q_days=self.stat_days_2018()
df1 = pd.DataFrame.from_dict(q,orient='index',columns=['arvio (htv)'])
df2 = pd.DataFrame.from_dict(q_stat,orient='index',columns=['toteuma'])
df3 = pd.DataFrame.from_dict(q_days,orient='index',columns=['htv_tot'])
df=df1.copy()
df['toteuma (kpl)']=df2['toteuma']
df['toteuma (htv)']=df3['htv_tot']
df['ero (htv)']=df['arvio (htv)']-df['toteuma (htv)']
print('Henkilöitä tiloissa skaalattuna väestötasolle')
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=",.0f"))
else:
q=self.comp_participants(scale=True)
q_stat=self.stat_participants_2018()
q_days=self.stat_days_2018()
df1 = | pd.DataFrame.from_dict(q,orient='index',columns=['arvio (htv)']) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 11:16:04 2017
@author: <NAME> (<EMAIL>)
@brief: MSTD is a generic and efficient method to identify multi-scale topological domains (MSTD)
from symmetric Hi-C and other high resolution asymmetric promoter capture Hi-C datasets
@version 0.0.2
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from colormap import Color, Colormap
#Data=matrix_data; distance=MDHD; win_n=5
def _domain_only_diagonal(Data,win_n,distance):
Dsize=Data.shape[0]
#step1.1
pdensity=np.zeros(Dsize)
DEN_Dict={}
for ip in range(Dsize):
begin_i=ip-win_n+1
end_i=ip+win_n-1
if (begin_i<=0) | (end_i>=Dsize-1):
if begin_i<0:
begin_i=0
if end_i>Dsize-1:
end_i=Dsize-1
pdensity[ip]=np.mean(Data[begin_i:ip+1,:][:,ip:end_i+1])
DEN_Dict[ip]=pdensity[ip]
else:
pdensity[ip]= pdensity[ip-1] + (-np.sum(Data[begin_i-1:ip,ip-1])-np.sum(Data[begin_i-1,ip:end_i])
+np.sum(Data[ip,ip:end_i+1])+ np.sum(Data[begin_i:ip,end_i]))/(win_n*win_n)
DEN_Dict[ip]=pdensity[ip]+np.random.random(1)/1000
#step1.2
Max_step=100
NDP_Dict={}
ASS_Dict={}
for ip in np.arange(0,Dsize):
for step in np.arange(0,max(ip,Dsize-ip)):
if ip-step>=0:
up_point=pdensity[ip-step]
if up_point>pdensity[ip]:
ASS_Dict[ip]=ip-step
break
if ip+step<=Dsize-1:
down_point=pdensity[ip+step]
if down_point>pdensity[ip]:
ASS_Dict[ip]=ip+step
break
if step>Max_step:
ASS_Dict[ip]=ip
break
NDP_Dict[ip]=step
#boundaries DF
start={}
end={}
center={}
Thr_den=np.percentile(pdensity,20)
point_assign={}
for temp in DEN_Dict:
point_assign[temp]=0
#class_num=1
join_num=0
#centers=[]
for item in DEN_Dict:
den=DEN_Dict[item]
dist=NDP_Dict[item]
if ((den>Thr_den) & (dist>distance)):
join_num=join_num+1
point_assign[item]=join_num
#class_num=class_num+1
start[join_num]=item
end[join_num]=item
center[join_num]=item
#centers.append(item)
ASS_Dict[item]=item
clures=pd.DataFrame({'Start':start,'End':end,'Cen':center}, columns=['Start','End','Cen'])
old_join_num=0
new_join_num=join_num
while old_join_num!=new_join_num:
old_join_num=join_num
for item in DEN_Dict:
if ((NDP_Dict[item]<=distance)):
if ASS_Dict[item]==item:
continue
fclass=point_assign[ASS_Dict[item]]
if fclass !=0:
mclass= point_assign[item]
if mclass == 0:
temp=center[fclass]
if (DEN_Dict[item]>DEN_Dict[temp]/5):
#判断此点是否在类别范围
item_class= clures[(item>clures['Start']) & (clures['End']>item)].values
if len(item_class)!=0:
point_assign[item]=point_assign[ASS_Dict[item_class[0][2]]]
else:
#print item
point_assign[item]=point_assign[ASS_Dict[item]]
if item < clures.ix[point_assign[item],'Start']:
clures.ix[ point_assign[item],'Start']=item
else:
clures.ix[ point_assign[item], 'End']=item
join_num=join_num+1
new_join_num=join_num
step=3
for clu in clures.index[:-1:1]:
left=clures.loc[clu,'End']
right=clures.loc[clu+1,'Start']
if (left-step>=0) & (right+step<=Dsize-1):
if left==right-1:
loca=np.argmin(pdensity[left-step:right+step])
newbound=left-step+loca
clures.loc[clu,'End']=newbound
clures.loc[clu+1,'Start']=newbound
return clures
#Data=matrix_data
def _generate_density_con(Data,win,thr,MDHD):
Dsize=Data.shape
M_density=np.zeros(Dsize)
DEN_Dict={}
if Dsize[0]==Dsize[1]:
for i in range(Dsize[0]):
for j in range(Dsize[1]):
if i-j>MDHD*4:
begin_i=i-win[0]
begin_j=j-win[1]
end_i=i+win[0]
end_j=j+win[1]
if (begin_i<0)| (begin_j<0)| (end_i>Dsize[0]-1)|(end_j>Dsize[1]-1):
if begin_i<0:
begin_i=0
if begin_j<0:
begin_j=0
if end_i>Dsize[0]-1:
end_i=Dsize[0]-1
if end_j>Dsize[1]-1:
end_j=Dsize[1]-1
M_density[i,j]=np.mean(Data[begin_i:end_i,begin_j:end_j])+np.random.random(1)/1000.0
else:
M_density[i,j]=M_density[i,j-1]+ (-np.sum(Data[begin_i:end_i,begin_j-1])
+np.sum(Data[begin_i:end_i,end_j-1]))/(4*win[0]*win[1])
if Data[i,j]>thr:
DEN_Dict[(i,j)]=M_density[i,j]
else:
for i in range(Dsize[0]):
for j in range(Dsize[1]):
begin_i=i-win[0]
begin_j=j-win[1]
end_i=i+win[0]
end_j=j+win[1]
if (begin_i<0)| (begin_j<0)| (end_i>Dsize[0]-1)|(end_j>Dsize[1]-1):
if begin_i<0:
begin_i=0
if begin_j<0:
begin_j=0
if end_i>Dsize[0]-1:
end_i=Dsize[0]-1
if end_j>Dsize[1]-1:
end_j=Dsize[1]-1
M_density[i,j]=np.mean(Data[begin_i:end_i,begin_j:end_j])+np.random.random(1)/1000.0
else:
M_density[i,j]=M_density[i,j-1]+ (-np.sum(Data[begin_i:end_i,begin_j-1])
+np.sum(Data[begin_i:end_i,end_j-1]))/(4*win[0]*win[1])
if Data[i,j]>thr:
DEN_Dict[(i,j)]=M_density[i,j]
return M_density, DEN_Dict
def _find_highpoints_v2(DEN_Dict,ratio=1):
Dis=50
NDP_Dict={}
ASS_Dict={}
for item in DEN_Dict:
#item=ASS_Dict[item]; item
NDP_Dict[item]=np.linalg.norm((Dis,Dis*ratio))
ASS_Dict[item]=item
for step in np.arange(1,Dis+1,1):
step_point=[(item[0]+st,item[1]+ra) for st in np.arange(-step,step+1) for ra in np.arange(-step*ratio,step*ratio+1)
if (abs(st)==step or ratio*(step-1)<abs(ra)<=ratio*step)]
step_point=[point for point in step_point if point in DEN_Dict]
distance_index=[(np.linalg.norm(((item[0]-temp[0])*ratio,item[1]-temp[1])),temp) for temp in step_point if DEN_Dict[temp]>DEN_Dict[item]]
distance_index.sort()
for ind in distance_index:
if DEN_Dict[ind[1]]>DEN_Dict[item]:
NDP_Dict[item]=ind[0]
ASS_Dict[item]=ind[1]
break
if len(distance_index)>0:
break
return NDP_Dict, ASS_Dict
def _assign_class(DEN_Dict,NDP_Dict,ASS_Dict,Thr_den,Thr_dis,reso=3):
locs=['upper','bottom','left','right','cen_x','cen_y']
point_assign={}
for temp in DEN_Dict:
point_assign[temp]=0
#class_num=1
join_num=0
boundaries=pd.DataFrame()
center=dict()
for item in DEN_Dict:
den=DEN_Dict[item]
dist=NDP_Dict[item]
#value=den*dist
bound=list()
if (den>=Thr_den) and (dist>=Thr_dis):
join_num=join_num+1
point_assign[item]=join_num
center[join_num]=item
#class_num=class_num+1
bound.append(item[0])
bound.append(item[0]+1)
bound.append(item[1])
bound.append(item[1]+1)
bound.append(item[0])
bound.append(item[1])
#for k in range(len(locs)):
# if (k<2) | (k==4):
# bound.append(item[0])
# else:
# bound.append(item[1])
ASS_Dict[item]=item
bound=pd.DataFrame(bound)
boundaries=pd.concat([boundaries,bound.T],axis=0)
boundaries.columns=locs
boundaries.index=np.arange(1,len(boundaries)+1)
Thr_den1=np.percentile( | pd.Series(DEN_Dict) | pandas.Series |
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
import pypipegraph as ppg
from pathlib import Path
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Annotator
def DummyAnnotatable(name):
return DelayedDataFrame(
name,
lambda: pd.DataFrame(
{
"a": ["a", "b", "c", "d"],
"b": [1, 2, 3, 4],
"c": [200.1, 100.2, 400.3, 300.4],
}
),
)
def force_load(ddf):
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(ddf.annotate())
class SequenceAnnotator(Annotator):
columns = ["sequence"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: range(0, len(df))})
class SequenceAnnotatorDuo(Annotator):
columns = ["sequenceDuo", "rev_sequenceDuo"]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: range(0, len(df)), self.columns[1]: range(len(df), 0, -1)}
)
class SequenceAnnotatorDuoCollision(Annotator):
columns = ["shu", "rev_sequenceDuo"]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: range(0, len(df)), self.columns[1]: range(len(df), 0, -1)}
)
class FixedAnnotator(Annotator):
def __init__(self, column_name, values):
self.columns = [column_name]
self.values = values
def deps(self, ddf):
return ppg.ParameterInvariant(
ddf.name + "_" + self.columns[0], str(self.values)
)
def calc(self, df):
op = open("dummy.txt", "ab")
op.write(b"A")
op.close()
return pd.DataFrame({self.columns[0]: self.values[: len(df)]})
class FixedAnnotator2(Annotator): # used for conflict of annotator class tests
def __init__(self, column_name, values):
self.columns = [column_name]
self.values = values
def deps(self, ddf):
return ppg.ParameterInvariant(
ddf.name + "_" + self.column_name, str(self.values)
)
def annotate(self, annotat):
op = open("dummy.txt", "ab")
op.write(b"A")
op.close()
return pd.DataFrame({self.columns[0]: self.values[: len(annotat)]})
class BrokenAnnoDoesntCallAnnotatorInit(Annotator):
columns = ["shu"]
def calc(self, df):
return pd.DataFrame({self.column_name: range(0, len(df))})
class FakeAnnotator(object):
columns = ["shu"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: range(0, len(df))})
@pytest.mark.usefixtures("new_pipegraph")
class Test_FromOldGenomics:
def test_add_annotator_takes_only_annotators(self):
a = DummyAnnotatable("A")
with pytest.raises(TypeError):
a += 123
def test_non_anno_raises(self):
a = DummyAnnotatable("A")
with pytest.raises(TypeError):
a += FakeAnnotator()
def test_one_column_annotator(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_two_column_annotator(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotatorDuo()
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
assert (a.df["sequenceDuo"] == [0, 1, 2, 3]).all()
assert (a.df["rev_sequenceDuo"] == [4, 3, 2, 1]).all()
def test_two_differenct_annotators_with_identical_column_names_raise_on_adding(
self
):
a = DummyAnnotatable("A")
anno = SequenceAnnotatorDuo()
a.add_annotator(anno)
anno2 = SequenceAnnotatorDuoCollision()
a.add_annotator(anno2)
force_load(a)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
def test_annotator_copying_on_filter(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
def test_annotator_copying_on_filter_two_deep(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
second = even.filter("event2", lambda df: df["b"] == 4)
a.add_annotator(anno)
force_load(second)
ppg.run_pipegraph()
assert (second.df["b"] == [4]).all()
assert (second.df["sequence"] == [3]).all()
def test_annotator_copying_on_filter_with_anno(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["sequence"] % 2 == 0, annotators=[anno])
force_load(even)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [1, 3]).all()
assert (even.df["sequence"] == [0, 2]).all()
def test_no_anno_data_copying_if_no_annotate_dependency(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
def write():
op = open("dummy.txt", "wb")
op.write(b"SHU")
op.close()
ppg.FileGeneratingJob("dummy.txt", write).depends_on(even.load())
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert "sequence" not in even.df.columns
def test_anno_data_copying_if_add_annotator_dependency(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
def wf():
op = open("dummy.txt", "wb")
op.write(b"SHU")
op.close()
fg = ppg.FileGeneratingJob("dummy.txt", wf)
even.add_annotator(anno)
fg.depends_on(even.add_annotator(anno))
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
def test_annotator_copying_on_sort_and_top(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter(
"event", lambda df: df.sort_values("b", ascending=False)[:2].index
)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [4, 3]).all()
assert (even.df["sequence"] == [3, 2]).all()
def test_annotator_just_added_to_child(self):
a = DummyAnnotatable("A")
even = a.filter("event", lambda df: df["b"] % 2 == 0)
anno = SequenceAnnotator()
even.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
# after all, we add it anew.
assert (even.df["sequence"] == [0, 1]).all()
assert "sequence" not in a.df.columns
def test_annotator_first_added_to_parent_then_to_child(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
even.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_annotator_first_added_to_parent_then_to_second_child(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0).filter(
"shu", lambda df: df["b"] == 2
)
even.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2]).all()
assert (even.df["sequence"] == [1]).all()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_annotator_first_added_to_child_then_to_parent(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["b"] % 2 == 0)
even.add_annotator(anno)
force_load(even)
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
assert "sequence" in even.df
assert "sequence" in a.df
def test_annotator_added_after_filtering(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["b"] % 2 == 0)
a.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_non_hashable_init__args(self):
a = FixedAnnotator("shu", ["h", "i", "j", "k"])
b = FixedAnnotator("shu", ["h", "i", "j", "k"])
assert a is b
def test_annotator_copying_parent_changed(self, new_pipegraph):
# first run
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
ppg.run_pipegraph()
assert (even.df["shu"] == ["i", "k"]).all()
assert Path("dummy.txt").read_text() == "A" # so it ran once...
new_pipegraph.new_pipegraph()
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
ppg.run_pipegraph()
assert (even.df["shu"] == ["i", "k"]).all()
assert Path("dummy.txt").read_text() == "A" # so it was not rerun
new_pipegraph.new_pipegraph()
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "z"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
ppg.run_pipegraph()
assert (even.df["shu"] == ["i", "z"]).all()
assert Path("dummy.txt").read_text() == "AA" # so it was rerun
def test_filter_annotator_copy_nested(self):
# first run
a = DummyAnnotatable("A")
a.write()
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
anno2 = FixedAnnotator("shaw", ("a1", "b2", "c3", "d4"))
a.add_annotator(anno)
first = a.filter("first", lambda df: (df["a"] == "b") | (df["a"] == "d"))
second = first.filter("second", lambda df: ([True, True]))
third = second.filter("third", lambda df: (df["shu"] == "i"), annotators=[anno])
fourth = first.filter("fourth", lambda df: ([False, True]))
second.write()
fn_4 = fourth.write()[1]
a.add_annotator(anno2)
fourth.add_annotator(anno2)
force_load(first)
force_load(second)
force_load(third)
force_load(fourth)
ppg.run_pipegraph()
assert (first.df["shu"] == ["i", "k"]).all()
assert (first.df["parent_row"] == [1, 3]).all()
assert (first.df["shaw"] == ["b2", "d4"]).all()
assert (second.df["shu"] == ["i", "k"]).all()
assert (second.df["parent_row"] == [1, 3]).all()
assert (second.df["shaw"] == ["b2", "d4"]).all()
assert (third.df["shu"] == ["i"]).all()
assert (third.df["shaw"] == ["b2"]).all()
assert (third.df["parent_row"] == [1]).all()
assert (fourth.df["shu"] == ["k"]).all()
assert (fourth.df["parent_row"] == [3]).all()
assert (fourth.df["shaw"] == ["d4"]).all()
df = pd.read_csv(fn_4, sep="\t")
print(df)
assert (df["shaw"] == ["d4"]).all()
assert_frame_equal(df, fourth.df.reset_index(drop=True), check_less_precise=2)
def test_changing_anno_that_filtering_doesnt_care_about_does_not_retrigger_child_rebuild(
self, new_pipegraph
):
def count():
op = open("dummyZZ.txt", "ab")
op.write(b"A")
op.close()
fg = ppg.FileGeneratingJob("dummyZZ.txt", count)
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
fg.depends_on(even.load())
ppg.run_pipegraph()
Path("dummyZZ.txt").read_text() == "A" # so it ran once...
new_pipegraph.new_pipegraph()
fg = ppg.FileGeneratingJob("dummyZZ.txt", count)
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "z"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
fg.depends_on(even.load())
ppg.run_pipegraph()
Path("dummyZZ.txt").read_text() == "A" # so it was not rerun!
pass
def test_same_annotor_call_returns_same_object(self):
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
anno2 = FixedAnnotator("shu", ("h", "i", "j", "k"))
assert anno is anno2
def test_new_pipeline_invalidates_annotor_cache(self, new_pipegraph):
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
new_pipegraph.new_pipegraph()
anno2 = FixedAnnotator("shu", ("h", "i", "j", "k"))
assert anno is not anno2
def test_raises_on_same_column_name_differing_parameters(self):
a = DummyAnnotatable("A")
a += FixedAnnotator("shu", ("h", "i", "j", "k"))
with pytest.raises(ValueError):
a += FixedAnnotator("shu", ("h", "i", "j", "h"))
def test_raises_on_same_column_name_different_annotators(self):
a = DummyAnnotatable("A")
a += FixedAnnotator("shu", ("h", "i", "j", "k"))
with pytest.raises(ValueError):
a += FixedAnnotator2("shu", ("h", "i", "j", "k"))
def test_write(self):
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "z"))
a.add_annotator(anno)
a.write(Path("shu.xls").absolute())
ppg.run_pipegraph()
df = pd.read_excel("shu.xls")
assert_frame_equal(df, a.df, check_less_precise=2, check_dtype=False)
@pytest.mark.usefixtures("new_pipegraph")
class TestDynamicAnnotators:
def test_basic(self):
class DA(Annotator):
@property
def columns(self):
return ["DA1-A"]
def deps(self, annotatable):
return ppg.ParameterInvariant(self.columns[0], "hello")
def calc(self, df):
ll = len(df)
return pd.DataFrame({"DA1-A": [0] * ll})
a = DummyAnnotatable("A")
anno = DA()
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
print(a.df)
assert "DA1-A" in a.df.columns
assert (a.df["DA1-A"] == 0).all()
def test_multiple_columns(self):
class DA(Annotator):
@property
def columns(self):
return ["DA2-A", "DA2-B"]
def deps(self, annotatable):
return ppg.ParameterInvariant(self.columns[0], "hello")
def calc(self, df):
ll = len(df)
return pd.DataFrame({"DA2-A": [0] * ll, "DA2-B": [1] * ll})
a = DummyAnnotatable("A")
anno = DA()
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
assert "DA2-A" in a.df.columns
assert (a.df["DA2-A"] == 0).all()
assert "DA2-B" in a.df.columns
assert (a.df["DA2-B"] == 1).all()
assert "DA2-C" not in a.df.columns
def test_two_differenct_annotators_with_identical_column_names_raise_on_creation(
self
):
a = DummyAnnotatable("A")
columns_called = [False]
class DA(Annotator):
def __init__(self, prefix):
self.prefix = prefix
self.cache_name = prefix
@property
def columns(self):
raise ValueError()
columns_called[0] = True
return ["%s-A" % self.prefix]
def calc(self, df):
ll = len(df)
return | pd.DataFrame({"DA1-A": [0] * ll}) | pandas.DataFrame |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import datetime
import pandas as pd
from airflow.models import Variable
from airflow.operators.bash import BashOperator
from airflow.operators.python_operator import PythonOperator
from minio import Minio
from airflow import DAG
DEFAULT_ARGS = {
"owner": "Airflow",
"depends_on_past": False,
"start_date": datetime.datetime(2021, 1, 13),
}
dag = DAG(
"etl_mean_work_last_3_months_att",
default_args=DEFAULT_ARGS,
schedule_interval="@once",
)
data_lake_server = Variable.get("data_lake_server")
data_lake_login = Variable.get("data_lake_login")
data_lake_password = Variable.get("data_lake_password")
client = Minio(
data_lake_server,
access_key=data_lake_login,
secret_key=data_lake_password,
secure=False,
)
def extract():
# cria a estrutura para o dataframe temporário.
df_working_hours = pd.DataFrame(
data=None, columns=["emp_id", "data", "hora"]
)
# list objects
objects = client.list_objects(
"landing", prefix="working-hours", recursive=True
)
for obj in objects:
print("Downloading file...")
print(obj.bucket_name, obj.object_name.encode("utf-8"))
obj = client.get_object(
obj.bucket_name,
obj.object_name.encode("utf-8"),
)
data = obj.read()
df_ = | pd.read_excel(data) | pandas.read_excel |
from typing import Iterable
import pandas as pd
from numpy import nan
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._testing import assert_frame_equal
from datacode.portfolio.cumret import cumulate_buy_and_hold_portfolios
from tests.test_data import DataFrameTest
class PortfolioTest(DataFrameTest):
id_var: str = "PERMNO"
date_var: str = "Date"
ret_var: str = "RET"
port_var: str = "Portfolio"
port_date_var: str = "Portfolio Date"
weight_var: str = "Weight"
cum_days: Iterable[int] = (0, 1, 5)
hourly_cum_days: Iterable[float] = (0, 1 / 24, 5 / 24)
daily_port_df = pd.DataFrame(
[
(10516, "1/1/2000", 0.01, 1, "1/1/2000", 2),
(10516, "1/2/2000", 0.02, 1, "1/1/2000", 2),
(10516, "1/3/2000", 0.03, 1, "1/1/2000", 2),
(10516, "1/4/2000", 0.04, 1, "1/1/2000", 2),
(10516, "1/5/2000", 0.01, 2, "1/5/2000", 2),
(10516, "1/6/2000", 0.02, 2, "1/5/2000", 2),
(10516, "1/7/2000", 0.03, 2, "1/5/2000", 2),
(10516, "1/8/2000", 0.04, 2, "1/5/2000", 2),
(10517, "1/1/2000", 0.05, 2, "1/1/2000", 2),
(10517, "1/2/2000", 0.06, 2, "1/1/2000", 2),
(10517, "1/3/2000", 0.07, 2, "1/1/2000", 2),
(10517, "1/4/2000", 0.08, 2, "1/1/2000", 2),
(10517, "1/5/2000", 0.05, 1, "1/5/2000", 2),
(10517, "1/6/2000", 0.06, 1, "1/5/2000", 2),
(10517, "1/7/2000", 0.07, 1, "1/5/2000", 2),
(10517, "1/8/2000", 0.08, 1, "1/5/2000", 2),
(10518, "1/1/2000", 0.11, 1, "1/1/2000", 1),
(10518, "1/2/2000", 0.12, 1, "1/1/2000", 1),
(10518, "1/3/2000", 0.13, 1, "1/1/2000", 1),
(10518, "1/4/2000", 0.14, 1, "1/1/2000", 1),
(10518, "1/5/2000", 0.11, 2, "1/5/2000", 1),
(10518, "1/6/2000", 0.12, 2, "1/5/2000", 1),
(10518, "1/7/2000", 0.13, 2, "1/5/2000", 1),
(10518, "1/8/2000", 0.14, 2, "1/5/2000", 1),
(10519, "1/1/2000", 0.15, 2, "1/1/2000", 1),
(10519, "1/2/2000", 0.16, 2, "1/1/2000", 1),
(10519, "1/3/2000", 0.17, 2, "1/1/2000", 1),
(10519, "1/4/2000", 0.18, 2, "1/1/2000", 1),
(10519, "1/5/2000", 0.15, 1, "1/5/2000", 1),
(10519, "1/6/2000", 0.16, 1, "1/5/2000", 1),
(10519, "1/7/2000", 0.17, 1, "1/5/2000", 1),
(10519, "1/8/2000", 0.18, 1, "1/5/2000", 1),
],
columns=[id_var, date_var, ret_var, port_var, port_date_var, weight_var],
)
daily_port_df[date_var] = pd.to_datetime(daily_port_df[date_var])
daily_port_df[port_date_var] = pd.to_datetime(daily_port_df[port_date_var])
hourly_port_df = pd.DataFrame(
[
(10516, "1/1/2000 01:00:00", 0.01, 1, "1/1/2000 01:00:00", 2),
(10516, "1/1/2000 02:00:00", 0.02, 1, "1/1/2000 01:00:00", 2),
(10516, "1/1/2000 03:00:00", 0.03, 1, "1/1/2000 01:00:00", 2),
(10516, "1/1/2000 04:00:00", 0.04, 1, "1/1/2000 01:00:00", 2),
(10516, "1/1/2000 05:00:00", 0.01, 2, "1/1/2000 05:00:00", 2),
(10516, "1/1/2000 06:00:00", 0.02, 2, "1/1/2000 05:00:00", 2),
(10516, "1/1/2000 07:00:00", 0.03, 2, "1/1/2000 05:00:00", 2),
(10516, "1/1/2000 08:00:00", 0.04, 2, "1/1/2000 05:00:00", 2),
(10517, "1/1/2000 01:00:00", 0.05, 2, "1/1/2000 01:00:00", 2),
(10517, "1/1/2000 02:00:00", 0.06, 2, "1/1/2000 01:00:00", 2),
(10517, "1/1/2000 03:00:00", 0.07, 2, "1/1/2000 01:00:00", 2),
(10517, "1/1/2000 04:00:00", 0.08, 2, "1/1/2000 01:00:00", 2),
(10517, "1/1/2000 05:00:00", 0.05, 1, "1/1/2000 05:00:00", 2),
(10517, "1/1/2000 06:00:00", 0.06, 1, "1/1/2000 05:00:00", 2),
(10517, "1/1/2000 07:00:00", 0.07, 1, "1/1/2000 05:00:00", 2),
(10517, "1/1/2000 08:00:00", 0.08, 1, "1/1/2000 05:00:00", 2),
(10518, "1/1/2000 01:00:00", 0.11, 1, "1/1/2000 01:00:00", 1),
(10518, "1/1/2000 02:00:00", 0.12, 1, "1/1/2000 01:00:00", 1),
(10518, "1/1/2000 03:00:00", 0.13, 1, "1/1/2000 01:00:00", 1),
(10518, "1/1/2000 04:00:00", 0.14, 1, "1/1/2000 01:00:00", 1),
(10518, "1/1/2000 05:00:00", 0.11, 2, "1/1/2000 05:00:00", 1),
(10518, "1/1/2000 06:00:00", 0.12, 2, "1/1/2000 05:00:00", 1),
(10518, "1/1/2000 07:00:00", 0.13, 2, "1/1/2000 05:00:00", 1),
(10518, "1/1/2000 08:00:00", 0.14, 2, "1/1/2000 05:00:00", 1),
(10519, "1/1/2000 01:00:00", 0.15, 2, "1/1/2000 01:00:00", 1),
(10519, "1/1/2000 02:00:00", 0.16, 2, "1/1/2000 01:00:00", 1),
(10519, "1/1/2000 03:00:00", 0.17, 2, "1/1/2000 01:00:00", 1),
(10519, "1/1/2000 04:00:00", 0.18, 2, "1/1/2000 01:00:00", 1),
(10519, "1/1/2000 05:00:00", 0.15, 1, "1/1/2000 05:00:00", 1),
(10519, "1/1/2000 06:00:00", 0.16, 1, "1/1/2000 05:00:00", 1),
(10519, "1/1/2000 07:00:00", 0.17, 1, "1/1/2000 05:00:00", 1),
(10519, "1/1/2000 08:00:00", 0.18, 1, "1/1/2000 05:00:00", 1),
],
columns=[id_var, date_var, ret_var, port_var, port_date_var, weight_var],
)
hourly_port_df[date_var] = pd.to_datetime(hourly_port_df[date_var])
hourly_port_df[port_date_var] = pd.to_datetime(hourly_port_df[port_date_var])
@classmethod
def col_sort_key(cls, col: str) -> int:
col_type_order = {
'EW': 0,
'VW': 1,
'Stderr': 2,
'Count': 3,
}
if col == cls.port_var:
return 0
if col == cls.port_date_var:
return 1
col_parts = col.split()
cum_period = int(col_parts[-1])
sort_key = (cum_period + 1) * 10
col_type = col_parts[0]
sort_key += col_type_order[col_type]
return sort_key
def ew_ret_name(self, cum_period: int) -> str:
return f"EW {self.ret_var} {cum_period}"
def vw_ret_name(self, cum_period: int) -> str:
return f"VW {self.ret_var} {cum_period}"
def stderr_name(self, cum_period: int) -> str:
return f"Stderr {cum_period}"
def count_name(self, cum_period: int) -> str:
return f"Count {cum_period}"
def expect_cum_df(self, freq: str = 'd', weighted: bool = True,
include_stderr: bool = False, include_count: bool = False) -> pd.DataFrame:
if freq == 'd':
early_ts = Timestamp("2000-01-01 00:00:00")
late_ts = Timestamp("2000-01-05 00:00:00")
elif freq == 'h':
early_ts = Timestamp("2000-01-01 01:00:00")
late_ts = Timestamp("2000-01-01 05:00:00")
else:
raise ValueError(f'unsupported freq {freq}')
df = pd.DataFrame(
data=[
(
1,
early_ts,
0.06000000000000005,
0.04333333333333337,
0.07,
0.0533333333333333,
0.35252024000000026,
0.2695302400000002,
),
(
1,
late_ts,
0.09999999999999998,
0.08333333333333333,
0.11,
0.09333333333333334,
nan,
nan,
),
(
2,
early_ts,
0.09999999999999998,
0.08333333333333333,
0.11,
0.09333333333333334,
0.5639515999999999,
0.47136200000000006,
),
(
2,
late_ts,
0.06000000000000005,
0.04333333333333337,
0.07,
0.0533333333333333,
nan,
nan,
),
],
columns=[
self.port_var,
self.port_date_var,
self.ew_ret_name(0),
self.vw_ret_name(0),
self.ew_ret_name(1),
self.vw_ret_name(1),
self.ew_ret_name(5),
self.vw_ret_name(5),
],
)
if not weighted:
weight_cols = [col for col in df.columns if 'VW' in col]
df.drop(weight_cols, axis=1, inplace=True)
if include_stderr:
df_len = len(df)
df[self.stderr_name(0)] = pd.Series([0.035355] * df_len)
df[self.stderr_name(1)] = | pd.Series([0.01450574598794102] * df_len) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2015-09-16 11:45:16
# @Email: <EMAIL>
# @Last modified by: etrott
# @Last Modified time: 2016-01-19 13:30:40
import os
import sys
import re
from string import ascii_uppercase
import gspread
import pandas as pd
import numpy as np
from .utils import get_credentials
from .gfiles import get_file_id, get_worksheet
# FIXME: clarify scopes
SCOPES = ('https://www.googleapis.com/auth/drive.metadata.readonly '
'https://www.googleapis.com/auth/drive '
'https://spreadsheets.google.com/feeds '
'https://docs.google.com/feeds')
def download(gfile, wks_name=None, col_names=False, row_names=False,
credentials=None, start_cell = 'A1'):
"""
Download Google Spreadsheet and convert it to Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: assing top row to column names for Pandas DataFrame
:param row_names: assing left column to row names for Pandas DataFrame
:param credentials: provide own credentials
:param start_cell: specify where to start capturing of the DataFrame; default is A1
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:returns: Pandas DataFrame
:rtype: class 'pandas.core.frame.DataFrame'
:Example:
>>> from df2gspread import gspread2df as g2d
>>> df = g2d.download(gfile="1U-kSDyeD-...", col_names=True, row_names=True)
>>> df
col1 col2
field1 1 2
field2 3 4
"""
# access credentials
credentials = get_credentials(credentials)
# auth for gspread
gc = gspread.authorize(credentials)
try:
# if gfile is file_id
gc.open_by_key(gfile).__repr__()
gfile_id = gfile
except:
# else look for file_id in drive
gfile_id = get_file_id(credentials, gfile)
if gfile_id is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible spreadsheet")
wks = get_worksheet(gc, gfile_id, wks_name)
if wks is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible worksheet")
raw_data = wks.get_all_values()
if not raw_data:
sys.exit()
start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell)
rows, cols = np.shape(raw_data)
if start_col_int > cols or (row_names and start_col_int + 1 > cols):
raise RuntimeError(
"Start col (%s) out of the table columns(%s)" % (start_col_int +
row_names, cols))
if start_row_int > rows or (col_names and start_row_int + 1 > rows):
raise RuntimeError(
"Start row (%s) out of the table rows(%s)" % (start_row_int +
col_names, rows))
raw_data = [row[start_col_int-1:] for row in raw_data[start_row_int-1:]]
if row_names and col_names:
row_names = [row[0] for row in raw_data[1:]]
col_names = raw_data[0][1:]
raw_data = [row[1:] for row in raw_data[1:]]
elif row_names:
row_names = [row[0] for row in raw_data]
col_names = np.arange(len(raw_data[0]) - 1)
raw_data = [row[1:] for row in raw_data]
elif col_names:
row_names = np.arange(len(raw_data) - 1)
col_names = raw_data[0]
raw_data = raw_data[1:]
else:
row_names = np.arange(len(raw_data))
col_names = np.arange(len(raw_data[0]))
df = pd.DataFrame([ | pd.Series(row) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
from evalml.data_checks import (
DataCheckAction,
DataCheckActionCode,
DataCheckMessageCode,
DataCheckWarning,
HighlyNullDataCheck,
)
highly_null_data_check_name = HighlyNullDataCheck.name
def get_dataframe():
return pd.DataFrame(
{
"lots_of_null": [None, None, None, None, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
}
)
@pytest.fixture
def highly_null_dataframe():
return get_dataframe()
@pytest.fixture
def highly_null_dataframe_nullable_types(highly_null_dataframe):
df = get_dataframe()
df.ww.init(
logical_types={"lots_of_null": "IntegerNullable", "all_null": "IntegerNullable"}
)
return df
class SeriesWrap:
def __init__(self, series):
self.series = series
def __eq__(self, series_2):
return all(self.series.eq(series_2.series))
def test_highly_null_data_check_init():
highly_null_check = HighlyNullDataCheck()
assert highly_null_check.pct_null_col_threshold == 0.95
assert highly_null_check.pct_null_row_threshold == 0.95
highly_null_check = HighlyNullDataCheck(pct_null_col_threshold=0.0)
assert highly_null_check.pct_null_col_threshold == 0
assert highly_null_check.pct_null_row_threshold == 0.95
highly_null_check = HighlyNullDataCheck(pct_null_row_threshold=0.5)
assert highly_null_check.pct_null_col_threshold == 0.95
assert highly_null_check.pct_null_row_threshold == 0.5
highly_null_check = HighlyNullDataCheck(
pct_null_col_threshold=1.0, pct_null_row_threshold=1.0
)
assert highly_null_check.pct_null_col_threshold == 1.0
assert highly_null_check.pct_null_row_threshold == 1.0
with pytest.raises(
ValueError,
match="pct null column threshold must be a float between 0 and 1, inclusive.",
):
HighlyNullDataCheck(pct_null_col_threshold=-0.1)
with pytest.raises(
ValueError,
match="pct null column threshold must be a float between 0 and 1, inclusive.",
):
HighlyNullDataCheck(pct_null_col_threshold=1.1)
with pytest.raises(
ValueError,
match="pct null row threshold must be a float between 0 and 1, inclusive.",
):
HighlyNullDataCheck(pct_null_row_threshold=-0.5)
with pytest.raises(
ValueError,
match="pct null row threshold must be a float between 0 and 1, inclusive.",
):
HighlyNullDataCheck(pct_null_row_threshold=2.1)
@pytest.mark.parametrize("nullable_type", [True, False])
def test_highly_null_data_check_warnings(
nullable_type, highly_null_dataframe_nullable_types, highly_null_dataframe
):
# Test the data check with nullable types being used.
if nullable_type:
df = highly_null_dataframe_nullable_types
else:
df = highly_null_dataframe
no_null_check = HighlyNullDataCheck(
pct_null_col_threshold=0.0, pct_null_row_threshold=0.0
)
highly_null_rows = SeriesWrap(pd.Series([2 / 3, 2 / 3, 2 / 3, 2 / 3, 1 / 3]))
validate_results = no_null_check.validate(df)
validate_results["warnings"][0]["details"]["pct_null_cols"] = SeriesWrap(
validate_results["warnings"][0]["details"]["pct_null_cols"]
)
assert validate_results == {
"warnings": [
DataCheckWarning(
message="5 out of 5 rows are 0.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_ROWS,
details={
"pct_null_cols": highly_null_rows,
"rows": highly_null_rows.series.index.tolist(),
},
).to_dict(),
DataCheckWarning(
message="Columns 'lots_of_null', 'all_null' are 0.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_COLS,
details={
"columns": ["lots_of_null", "all_null"],
"pct_null_rows": {"all_null": 1.0, "lots_of_null": 0.8},
},
).to_dict(),
],
"errors": [],
"actions": [
DataCheckAction(
DataCheckActionCode.DROP_ROWS,
data_check_name=highly_null_data_check_name,
metadata={"rows": [0, 1, 2, 3, 4]},
).to_dict(),
DataCheckAction(
DataCheckActionCode.DROP_COL,
data_check_name=highly_null_data_check_name,
metadata={"columns": ["lots_of_null", "all_null"]},
).to_dict(),
],
}
some_null_check = HighlyNullDataCheck(
pct_null_col_threshold=0.5, pct_null_row_threshold=0.5
)
highly_null_rows = SeriesWrap(pd.Series([2 / 3, 2 / 3, 2 / 3, 2 / 3]))
validate_results = some_null_check.validate(df)
validate_results["warnings"][0]["details"]["pct_null_cols"] = SeriesWrap(
validate_results["warnings"][0]["details"]["pct_null_cols"]
)
assert validate_results == {
"warnings": [
DataCheckWarning(
message="4 out of 5 rows are 50.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_ROWS,
details={"pct_null_cols": highly_null_rows, "rows": [0, 1, 2, 3]},
).to_dict(),
DataCheckWarning(
message="Columns 'lots_of_null', 'all_null' are 50.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_COLS,
details={
"columns": ["lots_of_null", "all_null"],
"pct_null_rows": {"all_null": 1.0, "lots_of_null": 0.8},
},
).to_dict(),
],
"errors": [],
"actions": [
DataCheckAction(
DataCheckActionCode.DROP_ROWS,
data_check_name=highly_null_data_check_name,
metadata={"rows": [0, 1, 2, 3]},
).to_dict(),
DataCheckAction(
DataCheckActionCode.DROP_COL,
data_check_name=highly_null_data_check_name,
metadata={"columns": ["lots_of_null", "all_null"]},
).to_dict(),
],
}
all_null_check = HighlyNullDataCheck(
pct_null_col_threshold=1.0, pct_null_row_threshold=1.0
)
assert all_null_check.validate(df) == {
"warnings": [
DataCheckWarning(
message="Columns 'all_null' are 100.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_COLS,
details={
"columns": ["all_null"],
"pct_null_rows": {"all_null": 1.0},
},
).to_dict()
],
"errors": [],
"actions": [
DataCheckAction(
DataCheckActionCode.DROP_COL,
data_check_name=highly_null_data_check_name,
metadata={"columns": ["all_null"]},
).to_dict()
],
}
def test_highly_null_data_check_separate_rows_cols(highly_null_dataframe):
row_null_check = HighlyNullDataCheck(
pct_null_col_threshold=0.9, pct_null_row_threshold=0.0
)
highly_null_rows = SeriesWrap(pd.Series([2 / 3, 2 / 3, 2 / 3, 2 / 3, 1 / 3]))
validate_results = row_null_check.validate(highly_null_dataframe)
validate_results["warnings"][0]["details"]["pct_null_cols"] = SeriesWrap(
validate_results["warnings"][0]["details"]["pct_null_cols"]
)
assert validate_results == {
"warnings": [
DataCheckWarning(
message="5 out of 5 rows are 0.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_ROWS,
details={"pct_null_cols": highly_null_rows, "rows": [0, 1, 2, 3, 4]},
).to_dict(),
DataCheckWarning(
message="Columns 'all_null' are 90.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_COLS,
details={
"columns": ["all_null"],
"pct_null_rows": {"all_null": 1.0},
},
).to_dict(),
],
"errors": [],
"actions": [
DataCheckAction(
DataCheckActionCode.DROP_ROWS,
data_check_name=highly_null_data_check_name,
metadata={"rows": [0, 1, 2, 3, 4]},
).to_dict(),
DataCheckAction(
DataCheckActionCode.DROP_COL,
data_check_name=highly_null_data_check_name,
metadata={"columns": ["all_null"]},
).to_dict(),
],
}
col_null_check = HighlyNullDataCheck(
pct_null_col_threshold=0.0, pct_null_row_threshold=0.9
)
validate_results = col_null_check.validate(highly_null_dataframe)
assert validate_results == {
"warnings": [
DataCheckWarning(
message="Columns 'lots_of_null', 'all_null' are 0.0% or more null",
data_check_name=highly_null_data_check_name,
message_code=DataCheckMessageCode.HIGHLY_NULL_COLS,
details={
"columns": ["lots_of_null", "all_null"],
"pct_null_rows": {"lots_of_null": 0.8, "all_null": 1.0},
},
).to_dict(),
],
"errors": [],
"actions": [
DataCheckAction(
DataCheckActionCode.DROP_COL,
data_check_name=highly_null_data_check_name,
metadata={"columns": ["lots_of_null", "all_null"]},
).to_dict(),
],
}
def test_highly_null_data_check_input_formats():
highly_null_check = HighlyNullDataCheck(
pct_null_col_threshold=0.8, pct_null_row_threshold=0.8
)
# test empty pd.DataFrame
assert highly_null_check.validate( | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.spatial import distance
from scipy.optimize import differential_evolution
class IntracellAnalysisV2:
# IA constants
FC_UPPER_VOLTAGE = 4.20
FC_LOWER_VOLTAGE = 2.70
NE_UPPER_VOLTAGE = 0.01
NE_LOWER_VOLTAGE = 1.50
PE_UPPER_VOLTAGE = 4.30
PE_LOWER_VOLTAGE = 2.86
THRESHOLD = 4.84 * 0.0
def __init__(self,
pe_pristine_file,
ne_pristine_file,
cycle_type='rpt_0.2C',
step_type=0,
error_type='V-Q',
ne_2pos_file=None,
ne_2neg_file=None
):
"""
Invokes the cell electrode analysis class. This is a class designed to fit the cell and electrode
parameters in order to determine changes of electrodes within the full cell from only full cell cycling data.
Args:
pe_pristine_file (str): file name for the half cell data of the pristine (uncycled) positive
electrode
ne_pristine_file (str): file name for the half cell data of the pristine (uncycled) negative
electrode
cycle_type (str): type of diagnostic cycle for the fitting
step_type (int): charge or discharge (0 for charge, 1 for discharge)
error_type (str): defines which error metric is to be used
ne_2neg_file (str): file name of the data for the negative component of the anode
ne_2pos_file (str): file name of the data for the positive component of the anode
"""
self.pe_pristine = pd.read_csv(pe_pristine_file, usecols=['SOC_aligned', 'Voltage_aligned'])
self.ne_1_pristine = pd.read_csv(ne_pristine_file, usecols=['SOC_aligned', 'Voltage_aligned'])
if ne_2neg_file and ne_2pos_file:
self.ne_2_pristine_pos = pd.read_csv(ne_2pos_file)
self.ne_2_pristine_neg = pd.read_csv(ne_2neg_file)
else:
self.ne_2_pristine_pos = pd.DataFrame()
self.ne_2_pristine_neg = pd.DataFrame()
if step_type == 0:
self.capacity_col = 'charge_capacity'
else:
self.capacity_col = 'discharge_capacity'
self.cycle_type = cycle_type
self.step_type = step_type
self.error_type = error_type
def process_beep_cycle_data_for_candidate_halfcell_analysis_ah(self,
cell_struct,
cycle_index):
"""
Ingests BEEP structured cycling data and cycle_index and returns
a Dataframe of evenly spaced capacity with corresponding voltage.
Inputs:
cell_struct (MaccorDatapath): BEEP structured cycling data
cycle_index (int): cycle number at which to evaluate
Outputs:
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned (evenly spaced)
and Voltage_aligned
"""
# filter the data down to the diagnostic type of interest
diag_type_cycles = cell_struct.diagnostic_data.loc[cell_struct.diagnostic_data['cycle_type'] == self.cycle_type]
real_cell_candidate_charge_profile = diag_type_cycles.loc[
(diag_type_cycles.cycle_index == cycle_index)
& (diag_type_cycles.step_type == 0) # step_type = 0 is charge, 1 is discharge
& (diag_type_cycles.voltage < self.FC_UPPER_VOLTAGE)
& (diag_type_cycles[self.capacity_col] > 0)][['voltage', 'charge_capacity']]
# renaming capacity,voltage column
real_cell_candidate_charge_profile['Q'] = real_cell_candidate_charge_profile['charge_capacity']
real_cell_candidate_charge_profile['Voltage'] = real_cell_candidate_charge_profile['voltage']
real_cell_candidate_charge_profile.drop('voltage', axis=1, inplace=True)
# interpolate voltage along evenly spaced capacity axis
q_vec = np.linspace(0, np.max(real_cell_candidate_charge_profile['Q']), 1001)
real_cell_candidate_charge_profile_aligned = pd.DataFrame()
real_cell_candidate_charge_profile_interper = interp1d(real_cell_candidate_charge_profile['Q'],
real_cell_candidate_charge_profile['Voltage'],
bounds_error=False,
fill_value=(
self.FC_LOWER_VOLTAGE, self.FC_UPPER_VOLTAGE))
real_cell_candidate_charge_profile_aligned['Voltage_aligned'] = real_cell_candidate_charge_profile_interper(
q_vec)
real_cell_candidate_charge_profile_aligned['Q_aligned'] = q_vec
return real_cell_candidate_charge_profile_aligned
def _impose_electrode_scale(self,
pe_pristine=pd.DataFrame(),
ne_1_pristine=pd.DataFrame(),
ne_2_pristine_pos=pd.DataFrame(),
ne_2_pristine_neg=pd.DataFrame(),
lli=0.0, q_pe=0.0, q_ne=0.0, x_ne_2=0.0):
"""
Scales the reference electrodes according to specified capacities and
offsets their capacities according to lli. Blends negative electrode materials.
Inputs:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
lli (float): Loss of Lithium Inventory - capacity of the misalignment between
cathode and anode zero-capacity
q_pe (float): capacity of the positive electrode (cathode)
q_ne (float): capacity of the negative electrode (anode)
x_ne_2 (float): fraction of ne_2_pristine_pos or ne_2_pristine_neg
(positive or negative value, respectively) to ne_1_pristine
Outputs:
pe_degraded (Dataframe): positive electrode with imposed capacity
scale to emulate degradation
ne_degraded (Dataframe): negative electrode with imposed capacity
scale and capacity offset to emulate degradation
"""
# Blend negative electrodes
ne_pristine = blend_electrodes(ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg, x_ne_2)
# rescaling pristine electrodes to q_pe and q_ne
pe_q_scaled = pe_pristine.copy()
pe_q_scaled['Q_aligned'] = (pe_q_scaled['SOC_aligned'] / 100) * q_pe
ne_q_scaled = ne_pristine.copy()
ne_q_scaled['Q_aligned'] = (ne_q_scaled['SOC_aligned'] / 100) * q_ne
# translate pristine ne electrode with lli
ne_q_scaled['Q_aligned'] = ne_q_scaled['Q_aligned'] + lli
# Re-interpolate to align dataframes for differencing
lower_q = np.min((np.min(pe_q_scaled['Q_aligned']),
np.min(ne_q_scaled['Q_aligned'])))
upper_q = np.max((np.max(pe_q_scaled['Q_aligned']),
np.max(ne_q_scaled['Q_aligned'])))
q_vec = np.linspace(lower_q, upper_q, 1001)
# Actually aligning the electrode Q's
pe_pristine_interper = interp1d(pe_q_scaled['Q_aligned'],
pe_q_scaled['Voltage_aligned'], bounds_error=False)
pe_degraded = pe_q_scaled.copy()
pe_degraded['Q_aligned'] = q_vec
pe_degraded['Voltage_aligned'] = pe_pristine_interper(q_vec)
ne_pristine_interper = interp1d(ne_q_scaled['Q_aligned'],
ne_q_scaled['Voltage_aligned'], bounds_error=False)
ne_degraded = ne_q_scaled.copy()
ne_degraded['Q_aligned'] = q_vec
ne_degraded['Voltage_aligned'] = ne_pristine_interper(q_vec)
# Returning pe and ne degraded on an Ah basis
return pe_degraded, ne_degraded
def halfcell_degradation_matching_ah(self, x, *params):
"""
Calls underlying functions to impose degradation through electrode
capacity scale and alignment through LLI. Modifies emulated full cell
data to be within full cell voltage range and calibrates (zeros) capacity
at the lowest permissible voltage. Interpolates real and emulated data onto
a common capacity axis.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
df_real_aligned (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_aligned (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
lli = x[0]
q_pe = x[1]
q_ne = x[2]
x_ne_2 = x[3]
(pe_pristine,
ne_1_pristine,
ne_2_pristine_pos,
ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned) = params
# output degraded ne and pe (on a AH basis, with electrode alignment
# (NaNs for voltage, when no capacity actually at the corresponding capacity index))
pe_out, ne_out = self._impose_electrode_scale(pe_pristine, ne_1_pristine,
ne_2_pristine_pos, ne_2_pristine_neg,
lli, q_pe,
q_ne, x_ne_2)
# PE - NE = full cell voltage
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['Q_aligned'] = pe_out['Q_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
# Replace emulated full cell values outside of voltage range with NaN
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] < self.FC_LOWER_VOLTAGE] = np.nan
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] > self.FC_UPPER_VOLTAGE] = np.nan
# Center the emulated full cell and half cell curves onto the same Q at which the real (degraded)
# capacity measurement started (self.FC_LOWER_VOLTAGE)
emulated_full_cell_with_degradation_zeroed = pd.DataFrame()
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'] = emulated_full_cell_with_degradation[
'Voltage_aligned'].copy()
zeroing_value = emulated_full_cell_with_degradation['Q_aligned'].loc[
np.nanargmin(emulated_full_cell_with_degradation['Voltage_aligned'])
]
emulated_full_cell_with_degradation_zeroed['Q_aligned'] = \
(emulated_full_cell_with_degradation['Q_aligned'].copy() - zeroing_value)
pe_out_zeroed = pe_out.copy()
pe_out_zeroed['Q_aligned'] = pe_out['Q_aligned'] - zeroing_value
ne_out_zeroed = ne_out.copy()
ne_out_zeroed['Q_aligned'] = ne_out['Q_aligned'] - zeroing_value
# Interpolate full cell profiles across same Q range
min_q = np.min(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()])
max_q = np.max(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()])
emulated_interper = interp1d(emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
bounds_error=False)
real_interper = interp1d(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()],
real_cell_candidate_charge_profile_aligned['Voltage_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()],
bounds_error=False)
q_vec = np.linspace(min_q, max_q, 1001)
emulated_aligned = pd.DataFrame()
emulated_aligned['Q_aligned'] = q_vec
emulated_aligned['Voltage_aligned'] = emulated_interper(q_vec)
real_aligned = pd.DataFrame()
real_aligned['Q_aligned'] = q_vec
real_aligned['Voltage_aligned'] = real_interper(q_vec)
return pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned
def get_dqdv_over_v_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the dqdv representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
dq_dv_over_v_real (Dataframe): dqdv across voltage for the real cell data
dq_dv_over_v_emulated (Dataframe): dqdv across voltage for the emulated cell data
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_ah(x, *params)
# Calculate dqdv from full cell profiles
dq_dv_real = pd.DataFrame(np.gradient(df_real_interped['Q_aligned'], df_real_interped['Voltage_aligned']),
columns=['dQdV']).ewm(0.1).mean()
dq_dv_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Q_aligned'], emulated_full_cell_interped['Voltage_aligned']),
columns=['dQdV']).ewm(0.1).mean()
# Include original data
dq_dv_real['Q_aligned'] = df_real_interped['Q_aligned']
dq_dv_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dq_dv_emulated['Q_aligned'] = emulated_full_cell_interped['Q_aligned']
dq_dv_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate dQdV and Q over V, aligns real and emulated over V
voltage_vec = np.linspace(self.FC_LOWER_VOLTAGE, self.FC_UPPER_VOLTAGE, 1001)
v_dq_dv_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['dQdV'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_q_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['Q_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, np.max(df_real_interped['Q_aligned'])))
v_dq_dv_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['dQdV'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_q_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['Q_aligned'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, np.max(df_real_interped['Q_aligned'])))
dq_dv_over_v_real = pd.DataFrame(v_dq_dv_interper_real(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_real['Q_aligned'] = v_q_interper_real(voltage_vec)
dq_dv_over_v_real['Voltage_aligned'] = voltage_vec
dq_dv_over_v_emulated = pd.DataFrame(v_dq_dv_interper_emulated(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_emulated['Q_aligned'] = v_q_interper_emulated(voltage_vec)
dq_dv_over_v_emulated['Voltage_aligned'] = voltage_vec
return (pe_out_zeroed,
ne_out_zeroed,
dq_dv_over_v_real,
dq_dv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_dvdq_over_q_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the dVdQ representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
dv_dq_real (Dataframe): dVdQ across capacity for the real cell data
dv_dq_emulated (Dataframe): dVdQ across capacity for the emulated cell data
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_ah(x, *params)
# Calculate dQdV from full cell profiles
dv_dq_real = pd.DataFrame(np.gradient(df_real_interped['Voltage_aligned'], df_real_interped['Q_aligned']),
columns=['dVdQ']).ewm(0.1).mean()
dv_dq_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Voltage_aligned'], emulated_full_cell_interped['Q_aligned']),
columns=['dVdQ']).ewm(0.1).mean()
# Include original data
dv_dq_real['Q_aligned'] = df_real_interped['Q_aligned']
dv_dq_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dv_dq_emulated['Q_aligned'] = emulated_full_cell_interped['Q_aligned']
dv_dq_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Q interpolation not needed, as interpolated over Q by default
return (pe_out_zeroed,
ne_out_zeroed,
dv_dq_real,
dv_dq_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_v_over_q_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the V-Q representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
(pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned) = \
self.halfcell_degradation_matching_ah(x, *params)
min_soc_full_cell = np.min(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].Q_aligned)
max_soc_full_cell = np.max(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].Q_aligned)
soc_vec_full_cell = np.linspace(min_soc_full_cell, max_soc_full_cell, 1001)
emulated_full_cell_interper = interp1d(
emulated_aligned.Q_aligned.loc[~real_aligned.Voltage_aligned.isna()],
emulated_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
real_full_cell_interper = interp1d(real_aligned.Q_aligned.loc[~real_aligned.Voltage_aligned.isna()],
real_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['Q_aligned'] = soc_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(soc_vec_full_cell)
# Interpolate the true full-cell profile
df_real_interped = emulated_full_cell_interped.copy()
df_real_interped['Q_aligned'] = soc_vec_full_cell
df_real_interped['Voltage_aligned'] = real_full_cell_interper(soc_vec_full_cell)
return pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped
def get_v_over_q_from_degradation_matching_ah_no_real(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the V-Q representation of the
emulated cell data, in the absence of real cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
(pe_out_zeroed, ne_out_zeroed, emulated_aligned) = \
self.halfcell_degradation_matching_ah_no_real(x, *params)
min_q_full_cell = np.min(emulated_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].Q_aligned)
max_q_full_cell = np.max(emulated_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].Q_aligned)
q_vec_full_cell = np.linspace(min_q_full_cell, max_q_full_cell, 1001)
emulated_full_cell_interper = interp1d(
emulated_aligned.Q_aligned.loc[~emulated_aligned.Voltage_aligned.isna()],
emulated_aligned.Voltage_aligned.loc[~emulated_aligned.Voltage_aligned.isna()],
bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['Q_aligned'] = q_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(q_vec_full_cell)
return pe_out_zeroed, ne_out_zeroed, emulated_full_cell_interped
def halfcell_degradation_matching_ah_no_real(self, x, *params):
"""
Calls underlying functions to impose degradation through electrode
capacity scale and alignment through LLI. Modifies emulated full cell
data to be within full cell voltage range and calibrates (zeros) capacity
at the lowest permissible voltage.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
emulated_aligned (Dataframe): full cell data corresponding to the imposed degradation
"""
lli = x[0]
q_pe = x[1]
q_ne = x[2]
x_ne_2 = x[3]
pe_pristine, ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg = params
pe_out, ne_out = self._impose_electrode_scale(pe_pristine, ne_1_pristine,
ne_2_pristine_pos, ne_2_pristine_neg,
lli, q_pe,
q_ne,
x_ne_2)
# outputs degraded ne and pe (on a AH basis, with electrode alignment (NaNs for voltage, when no overlap))
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['Q_aligned'] = pe_out['Q_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
# Replace emulated full cell values outside of voltage range with NaN
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] < self.FC_LOWER_VOLTAGE] = np.nan
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] > self.FC_UPPER_VOLTAGE] = np.nan
# Center the emulated full cell and half cell curves onto the same Q at which the real (degraded)
# capacity measurement started (self.FC_LOWER_VOLTAGE)
emulated_full_cell_with_degradation_zeroed = pd.DataFrame()
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'] = emulated_full_cell_with_degradation[
'Voltage_aligned']
zeroing_value = emulated_full_cell_with_degradation['Q_aligned'].loc[
np.nanargmin(emulated_full_cell_with_degradation['Voltage_aligned'])
]
emulated_full_cell_with_degradation_zeroed['Q_aligned'] = \
(emulated_full_cell_with_degradation['Q_aligned'] - zeroing_value)
pe_out_zeroed = pe_out.copy()
pe_out_zeroed['Q_aligned'] = pe_out['Q_aligned'] - zeroing_value
ne_out_zeroed = ne_out.copy()
ne_out_zeroed['Q_aligned'] = ne_out['Q_aligned'] - zeroing_value
# Interpolate full profiles across same Q range
min_q = np.min(
emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()])
max_q = np.max(
emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()])
emulated_interper = interp1d(emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
bounds_error=False)
q_vec = np.linspace(min_q, max_q, 1001)
emulated_aligned = pd.DataFrame()
emulated_aligned['Q_aligned'] = q_vec
emulated_aligned['Voltage_aligned'] = emulated_interper(q_vec)
return pe_out_zeroed, ne_out_zeroed, emulated_aligned
def _get_error_from_degradation_matching_ah(self, x, *params):
"""
Wrapper function which selects the correct error sub routine and returns its error value.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error value (float) - output of the specified error sub function
"""
error_type = self.error_type
if error_type == 'V-Q':
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
elif error_type == 'dVdQ':
return self._get_error_from_degradation_matching_dvdq(x, *params)[0]
elif error_type == 'dQdV':
return self._get_error_from_degradation_matching_dqdv(x, *params)[0]
else:
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
def _get_error_from_degradation_matching_v_q(self, x, *params):
"""
Error function returning the mean standardized Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the V-Q representation.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error (float): output of the specified error sub function
error_vector (array): vector containingEuclidean distance of each point of the real curve to
the closest value on the emulated curve in the V-Q representation
xa (Dataframe): real full cell data used for error analysis
xb (Dataframe): emulated full cell data used for error analysis
"""
try:
(pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned
) = self.get_v_over_q_from_degradation_matching_ah(x, *params)
xa = real_aligned.dropna()
xb = emulated_aligned.dropna()
error_matrix = distance.cdist(xa, xb, 'seuclidean')
error_vector = error_matrix.min(axis=1)
error = error_vector.mean()
except ValueError:
error = 100
return error, None, None, None
return error, error_vector, xa, xb
# Pairwise euclidean from premade dQdV
def _get_error_from_degradation_matching_dqdv(self, x, *params):
"""
Error function returning the mean standardized Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dQdV representation.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error (float): output of the specified error sub function
error_vector (array): vector containing Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dQdV representation
xa (Dataframe): real full cell data used for error analysis
xb (Dataframe): emulated full cell data used for error analysis
"""
try:
# Call dQdV generating function
(pe_out_zeroed,
ne_out_zeroed,
dqdv_over_v_real,
dqdv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dqdv_over_v_from_degradation_matching_ah(x, *params)
xa = dqdv_over_v_real[['Voltage_aligned', 'dQdV']].dropna()
xb = dqdv_over_v_emulated[['Voltage_aligned', 'dQdV']].dropna()
error_matrix = distance.cdist(xa, xb, 'seuclidean')
error_vector = error_matrix.min(axis=1)
error = error_vector.mean()
except ValueError:
error = 100
return error, None, None, None
return error, error_vector, xa, xb
def _get_error_from_degradation_matching_dvdq(self, x, *params):
"""
Error function returning the mean standardized Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dVdQ representation.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error (float): output of the specified error sub function
error_vector (array): vector containing Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dVdQ representation
xa (Dataframe): real full cell data used for error analysis
xb (Dataframe): emulated full cell data used for error analysis
"""
try:
(pe_out_zeroed,
ne_out_zeroed,
dvdq_over_q_real,
dvdq_over_q_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dvdq_over_q_from_degradation_matching_ah(x, *params)
xa = dvdq_over_q_real[['Q_aligned', 'dVdQ']].dropna()
xb = dvdq_over_q_emulated[['Q_aligned', 'dVdQ']].dropna()
# down-select to values with capacity more than 0.5 Ahr to eliminate high-slope region of dVdQ
xa = xa.loc[(xa.Q_aligned > 0.5)]
xb = xb.loc[(xb.Q_aligned > 0.5)]
error_matrix = distance.cdist(xa, xb, 'seuclidean')
error_vector = error_matrix.min(axis=1)
error = error_vector.mean()
except ValueError:
error = 100
return error, None, None, None
return error, error_vector, xa, xb
def _get_error_from_synthetic_fitting_ah(self, x, *params):
"""
Wrapper function which selects the correct error sub routine and returns its error value.
This function is specific to fitting synthetic data rather than real cycling data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error value (float) - output of the specified error sub function
"""
error_type = self.error_type
try:
if error_type == 'V-Q':
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
elif error_type == 'dVdQ':
return self._get_error_from_degradation_matching_dvdq(x, *params)[0]
elif error_type == 'dQdV':
return self._get_error_from_degradation_matching_dvdq(x, *params)[0]
else:
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
except RuntimeError:
print("Can't return error")
return 100
def intracell_values_wrapper_ah(self,
cycle_index,
cell_struct,
degradation_bounds=None
):
"""
Wrapper function to solve capacity sizing and offset of reference electrodes to real full cell cycle data.
Inputs:
cycle_index (int): the index of the cycle of interest of the structured real cycling data
cell_struct (MaccorDatapath): BEEP structured cycling data
Outputs:
loss_dict (dict): dictionary with key of cycle index and entry of a list of
error, lli_opt, q_pe_opt, q_ne_opt, x_ne_2, Q_li
profiles_dict (dict): dictionary with key of cycle index and entry of a dictionary
containing various key/entry pairs of resulting from the fitting
"""
if degradation_bounds is None:
degradation_bounds = ((0, 3), # LLI
(2.5, 6.5), # q_pe
(2.5, 6.5), # q_ne
(1, 1), # (-1,1) x_ne_2
)
real_cell_candidate_charge_profile_aligned = self.process_beep_cycle_data_for_candidate_halfcell_analysis_ah(
cell_struct,
cycle_index)
degradation_optimization_result = differential_evolution(self._get_error_from_degradation_matching_ah,
degradation_bounds,
args=(self.pe_pristine,
self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned
),
strategy='best1bin', maxiter=100000,
popsize=15, tol=0.001, mutation=0.5,
recombination=0.7,
seed=1,
callback=None, disp=False, polish=True,
init='latinhypercube',
atol=0, updating='deferred', workers=-1,
constraints=()
)
# print(degradation_optimization_result.x) #BVV
(pe_out_zeroed,
ne_out_zeroed,
dqdv_over_v_real,
dqdv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dqdv_over_v_from_degradation_matching_ah(
degradation_optimization_result.x,
self.pe_pristine,
self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned)
#
electrode_info_df = get_electrode_info_ah(pe_out_zeroed, ne_out_zeroed)
#
error = degradation_optimization_result.fun
lli_opt = degradation_optimization_result.x[0]
q_pe_opt = degradation_optimization_result.x[1]
q_ne_opt = degradation_optimization_result.x[2]
x_ne_2 = degradation_optimization_result.x[3]
loss_dict = {cycle_index: np.append([error, lli_opt, q_pe_opt, q_ne_opt,
x_ne_2],
electrode_info_df.iloc[-1].values)
}
profiles_per_cycle_dict = {
'NE_zeroed': ne_out_zeroed,
'PE_zeroed': pe_out_zeroed,
'dQdV_over_v_real': dqdv_over_v_real,
'dQdV_over_v_emulated': dqdv_over_v_emulated,
'df_real_interped': df_real_interped,
'emulated_full_cell_interped': emulated_full_cell_interped,
'real_cell_candidate_charge_profile_aligned': real_cell_candidate_charge_profile_aligned
}
profiles_dict = {cycle_index: profiles_per_cycle_dict}
return loss_dict, profiles_dict
def solve_emulated_degradation(self,
forward_simulated_profile,
degradation_bounds=None
):
"""
"""
if degradation_bounds is None:
degradation_bounds = ((0, 3), # LLI
(2.5, 6.5), # q_pe
(2.5, 6.5), # q_ne
(1, 1), # (-1,1) x_ne_2
)
degradation_optimization_result = differential_evolution(self._get_error_from_synthetic_fitting_ah,
degradation_bounds,
args=(self.pe_pristine,
self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
forward_simulated_profile,
),
strategy='best1bin', maxiter=100000,
popsize=15, tol=0.001, mutation=0.5,
recombination=0.7,
seed=1,
callback=None, disp=False, polish=True,
init='latinhypercube',
atol=0, updating='deferred', workers=-1,
constraints=()
)
return degradation_optimization_result
# TODO revisit this function
def blend_electrodes(electrode_1, electrode_2_pos, electrode_2_neg, x_2):
"""
Blends two electrode materials from their SOC-V profiles to form a blended electrode.
Inputs:
electrode_1: Primary material in electrode, typically Gr. DataFrame supplied with SOC evenly spaced and voltage.
electrode_2: Secondary material in electrode, typically Si. DataFrame supplied with SOC evenly spaced and
voltage as an additional column.
x_2: Fraction of electrode_2 material's capacity (not mass). Supplied as scalar value.
Outputs:
df_blended_soc_mod (Dataframe): blended electrode with SOC_aligned and Voltage_aligned columns
"""
if electrode_2_pos.empty:
df_blended = electrode_1
return df_blended
if electrode_2_neg.empty:
electrode_2 = electrode_2_pos
x_2 = np.abs(x_2)
elif x_2 > 0:
electrode_2 = electrode_2_pos
else:
electrode_2 = electrode_2_neg
x_2 = np.abs(x_2)
electrode_1_interper = interp1d(electrode_1['Voltage_aligned'], electrode_1['SOC_aligned'], bounds_error=False,
fill_value='extrapolate')
electrode_2_interper = interp1d(electrode_2['Voltage_aligned'], electrode_2['SOC_aligned'], bounds_error=False,
fill_value='extrapolate')
voltage_vec = np.linspace(np.min((np.min(electrode_1['Voltage_aligned']),
np.min(electrode_2['Voltage_aligned']))),
np.max((np.max(electrode_1['Voltage_aligned']),
np.max(electrode_2['Voltage_aligned']))),
1001)
electrode_1_voltage_aligned = pd.DataFrame(electrode_1_interper(voltage_vec), columns=['SOC'])
electrode_2_voltage_aligned = pd.DataFrame(electrode_2_interper(voltage_vec), columns=['SOC'])
electrode_1_voltage_aligned['Voltage'] = voltage_vec
electrode_2_voltage_aligned['Voltage'] = voltage_vec
df_blend_voltage_aligned = pd.DataFrame(
(1 - x_2) * electrode_1_voltage_aligned['SOC'] + x_2 * electrode_2_voltage_aligned['SOC'], columns=['SOC'])
df_blend_voltage_aligned['Voltage'] = electrode_1_voltage_aligned.merge(electrode_2_voltage_aligned,
on='Voltage')['Voltage']
df_blended_interper = interp1d(df_blend_voltage_aligned['SOC'], df_blend_voltage_aligned['Voltage'],
bounds_error=False)
soc_vec = np.linspace(0, 100, 1001)
df_blended = pd.DataFrame(df_blended_interper(soc_vec), columns=['Voltage_aligned'])
df_blended['SOC_aligned'] = soc_vec
# Modify NE to fully span 100% SOC within its valid voltage window
df_blended_soc_mod_interper = interp1d(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()],
df_blended['Voltage_aligned'].loc[~df_blended['Voltage_aligned'].isna()],
bounds_error=False)
soc_vec = np.linspace(np.min(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]),
np.max(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]),
1001)
df_blended_soc_mod = pd.DataFrame(df_blended_soc_mod_interper(soc_vec), columns=['Voltage_aligned'])
df_blended_soc_mod['SOC_aligned'] = soc_vec / np.max(soc_vec) * 100
return df_blended_soc_mod
def get_electrode_info_ah(pe_out_zeroed, ne_out_zeroed):
"""
Calculates a variety of half-cell metrics at various positions in the full-cell profile.
Inputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
Outputs:
electrode_info_df (Dataframe): dataframe containing a variety of half-cell metrics
at various positions in the emulated full-cell profile.
pe_voltage_FC4p2V: voltage of the positive electrode (catahode) corresponding
to the full cell at 4.2V
...
pe_voltage_FC2p7V: voltage of the positive electrode (catahode) corresponding
to the full cell at 2.7V
pe_soc_FC4p2V: state of charge of the positive electrode corresponding
to the full cell at 4.2V
...
pe_soc_FC2p7V: state of charge of the positive electrode corresponding
to the full cell at 2.7V
ne_voltage_FC4p2V: voltage of the negative electrode (anode) corresponding
to the full cell at 4.2V
...
ne_voltage_FC2p7V: voltage of the negative electrode (anode) corresponding
to the full cell at 2.7V
ne_soc_FC4p2V: state of charge of the anode electrode corresponding
to the full cell at 4.2V
...
ne_soc_FC2p7V: state of charge of the anode electrode corresponding
to the full cell at 2.7V
Q_fc: capacity of the full cecll within the full cell voltage limits
q_pe: capacity of the cathode
q_ne: capacity of the anode [Ahr]
Q_li
"""
pe_minus_ne_zeroed = pd.DataFrame(pe_out_zeroed['Voltage_aligned'] - ne_out_zeroed['Voltage_aligned'],
columns=['Voltage_aligned'])
pe_minus_ne_zeroed['Q_aligned'] = pe_out_zeroed['Q_aligned']
electrode_info_df = | pd.DataFrame(index=[0]) | pandas.DataFrame |
from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see gh-4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see gh-9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series([str("2010-01-04")])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series([str("2010-01-04 00:00:00-05:00")])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see gh-9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series([str("1 days")])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
# see gh-7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see gh-7271
s = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = s.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna( | pd.Timestamp('2011-01-02 10:00', tz=tz) | pandas.Timestamp |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
| tm.assert_sp_array_equal(result, expected) | pandas.util.testing.assert_sp_array_equal |
#!/usr/bin/env python
__author__ = "<NAME>, <NAME>, <NAME>"
__license__ = "Apache-2.0 License"
# Import libraries
import numpy as np
import pandas as pd
import scipy
from scipy.stats import norm, lognorm
import matplotlib.pyplot as plt
def calculate_cumulative_conf(areaP90: float=1., areaP10: float=10., pdP90: float=10., pdP10: float=24):
"""Calculate cumulative confidence level for expected development size in MW
Args:
areaP90 (float): pessimistic area in sqkm
areaP10 (float): optimistic area in sqkm
pdP90 (float): pessimistic power density in MWe/sqkm
pdP10 (float): optimistic power density in MWe/sqkm
Returns:
prob_df (pandas Dataframe): cumulative confidence curve in Reservoir Size
"""
# calculate area > 250 °C
area_mu = ((np.log(areaP90)+np.log(areaP10))/2)
area_sigma = (np.log(areaP10)-np.log(areaP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))
# calculate powerdensity mean and standard dev
powerdens_mu = ((np.log(pdP90)+np.log(pdP10))/2)
powerdens_sigma = (np.log(pdP10)-np.log(pdP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))
capacity_mu = area_mu + powerdens_mu
capacity_sigma = ((area_sigma**2)+(powerdens_sigma**2))**0.5
eds = [lognorm.ppf(x/100, capacity_sigma, loc=0, scale=np.exp(capacity_mu)) for x in range(0,100)]
indx = list(np.arange(1,101)[::-1])
edsepc_tups = list(zip(indx,eds))
prob_df = | pd.DataFrame(edsepc_tups, columns = ['Cumulative confidence (%)', 'expected development size (MW)']) | pandas.DataFrame |
import logging
import boto3
import os
import pandas as pd
import argparse
from datetime import datetime
from dataactcore.models.domainModels import DUNS
from dataactcore.utils.parentDuns import sam_config_is_valid
from dataactcore.utils.duns import load_duns_by_row
from dataactvalidator.scripts.loader_utils import clean_data
from dataactvalidator.health_check import create_app
from dataactcore.interfaces.db import GlobalDB
from dataactcore.logging import configure_logging
from dataactcore.config import CONFIG_BROKER
import dataactcore.utils.parentDuns
logger = logging.getLogger(__name__)
# CSV column header name in DUNS file
column_headers = [
"awardee_or_recipient_uniqu", # DUNS Field
"registration_date", # Registration_Date
"expiration_date", # Expiration_Date
"last_sam_mod_date", # Last_Update_Date
"activation_date", # Activation_Date
"legal_business_name" # Legal_Business_Name
]
props_columns = {
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': []
}
column_mappings = {x: x for x in column_headers + list(props_columns.keys())}
def remove_existing_duns(data, sess):
""" Remove rows from file that already have a entry in broker database. We should only update missing DUNS
Args:
data: dataframe representing a list of duns
sess: the database session
Returns:
a new dataframe with the DUNS removed that already exist in the database
"""
duns_in_file = ",".join(list(data['awardee_or_recipient_uniqu'].unique()))
sql_query = "SELECT awardee_or_recipient_uniqu " +\
"FROM duns where awardee_or_recipient_uniqu = ANY('{" + \
duns_in_file +\
"}')"
db_duns = pd.read_sql(sql_query, sess.bind)
missing_duns = data[~data['awardee_or_recipient_uniqu'].isin(db_duns['awardee_or_recipient_uniqu'])]
return missing_duns
def clean_duns_csv_data(data):
""" Simple wrapper around clean_data applied just for duns
Args:
data: dataframe representing the data to be cleaned
Returns:
a dataframe cleaned and to be imported to the database
"""
return clean_data(data, DUNS, column_mappings, {})
def batch(iterable, n=1):
""" Simple function to create batches from a list
Args:
iterable: the list to be batched
n: the size of the batches
Yields:
the same list (iterable) in batches depending on the size of N
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def update_duns_props(df, client):
""" Returns same dataframe with address data updated"
Args:
df: the dataframe containing the duns data
client: the connection to the SAM service
Returns:
a merged dataframe with the duns updated with location info from SAM
"""
all_duns = df['awardee_or_recipient_uniqu'].tolist()
columns = ['awardee_or_recipient_uniqu'] + list(props_columns.keys())
duns_props_df = pd.DataFrame(columns=columns)
# SAM service only takes in batches of 100
for duns_list in batch(all_duns, 100):
duns_props_batch = dataactcore.utils.parentDuns.get_location_business_from_sam(client, duns_list)
# Adding in blank rows for DUNS where location data was not found
added_duns_list = []
if not duns_props_batch.empty:
added_duns_list = [str(duns) for duns in duns_props_batch['awardee_or_recipient_uniqu'].tolist()]
empty_duns_rows = []
for duns in (set(added_duns_list) ^ set(duns_list)):
empty_duns_row = props_columns.copy()
empty_duns_row['awardee_or_recipient_uniqu'] = duns
empty_duns_rows.append(empty_duns_row)
duns_props_batch = duns_props_batch.append( | pd.DataFrame(empty_duns_rows) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # MFRpred
#
# This is the MFRpred code runnning as a jupyter notebook, about the prediction of flux rope magnetic fields
#
# Authors: <NAME>, <NAME>, M. Reiss Space Research Institute IWF Graz, Austria
# Last update: July 2020
#
# How to predict the rest of the MFR if first 10, 20, 30, 40, 50% are seen?
# Everything should be automatically with a deep learning method or ML fit methods
#
# ---
# MIT LICENSE
#
# Copyright 2020, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# In[2]:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import cm
from scipy import stats
import scipy.io
import sunpy.time
import numpy as np
import time
import pickle
import seaborn as sns
import pandas as pd
import os
import sys
from sunpy.time import parse_time
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, median_absolute_error, r2_score
from sklearn.feature_selection import SelectKBest, SelectPercentile, f_classif
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import HuberRegressor
from sklearn.linear_model import Lars
from sklearn.linear_model import LassoLars
from sklearn.linear_model import PassiveAggressiveRegressor
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from pandas.plotting import scatter_matrix
import warnings
warnings.filterwarnings('ignore')
#get all variables from the input.py file:
from input import feature_hours
#make new directory if it not exists
mfrdir='mfr_predict'
if os.path.isdir(mfrdir) == False: os.mkdir(mfrdir)
plotdir='plots'
if os.path.isdir(plotdir) == False: os.mkdir(plotdir)
os.system('jupyter nbconvert --to script mfrpred.ipynb')
# # 1 feature selection
#
#
# In[5]:
# sns.set_context("talk")
# sns.set_style("darkgrid")
sns.set_context("notebook", font_scale=0.4, rc={"lines.linewidth": 2.5})
#usage of script: python mfr_featureSelection.py wind_features.p sta_features.p stb_features.p --features
# if --features is set, then code will produce pickle-file with features and labels
# if --features is not set, then code will read from already existing pickle-file
# you only have to set features at the first run of the code, or if you changed something in the corresponding parts of the code
# then --features if features need to be determined again
# and --mfr if there are shall be no sheath features determined
features = True
if features: print("get features")
mfr = False
if mfr: print("only mfr")
# first three arguments need to be file names to save features into -
argv0='wind_features.p'
argv1='sta_features.p'
argv2='stb_features.p'
# ####################### functions ###############################################
def get_feature(sc_time, start_time, end_time, sc_ind, sc_feature, feature_hours, *VarArgs):
feature_mean = np.zeros(np.size(sc_ind))
feature_max = np.zeros(np.size(sc_ind))
feature_std = np.zeros(np.size(sc_ind))
for Arg in VarArgs:
if Arg == 'mean':
for p in np.arange(0, np.size(sc_ind)):
# extract values from MFR data
feature_temp = sc_feature[np.where(np.logical_and(sc_time > start_time[sc_ind[p]], sc_time < end_time[sc_ind[p]] + feature_hours / 24.0))]
# print(feature_temp)
feature_mean[p] = np.nanmean(feature_temp)
# print('mean')
elif Arg == 'std':
for p in np.arange(0, np.size(sc_ind)):
# extract values from MFR data
feature_temp = sc_feature[np.where(np.logical_and(sc_time > start_time[sc_ind[p]], sc_time < end_time[sc_ind[p]] + feature_hours / 24.0))]
# print(feature_temp)
feature_std[p] = np.nanstd(feature_temp)
elif Arg == 'max':
for p in np.arange(0, np.size(sc_ind)):
# extract values from MFR data
feature_temp = sc_feature[np.where(np.logical_and(sc_time > start_time[sc_ind[p]], sc_time < end_time[sc_ind[p]] + feature_hours / 24.0))]
# print(feature_temp)
feature_temp = feature_temp[np.isfinite(feature_temp)]
try:
feature_max[p] = np.max(feature_temp)
except ValueError: # raised if `y` is empty.
pass
# print('max')
if np.any(feature_mean) and np.any(feature_max) and np.any(feature_std):
# print('mean and std and max')
return feature_mean, feature_max, feature_std
elif np.any(feature_mean) and np.any(feature_max) and (not np.any(feature_std)):
# print('mean and max')
return feature_mean, feature_max
elif np.any(feature_mean) and (not np.any(feature_max)) and (not np.any(feature_std)):
# print('only mean')
return feature_mean
elif (not np.any(feature_mean)) and np.any(feature_max) and np.any(feature_std):
# print('max and std')
return feature_max, feature_std
elif (not np.any(feature_mean)) and (not np.any(feature_max)) and np.any(feature_std):
# print('only std')
return feature_std
elif (not np.any(feature_mean)) and np.any(feature_max) and (not np.any(feature_std)):
# print('only max')
return feature_max
elif np.any(feature_mean) and (not np.any(feature_max)) and np.any(feature_std):
# print('mean and std')
return feature_mean, feature_std
def get_label(sc_time, start_time, end_time, sc_ind, sc_label, feature_hours, *VarArgs):
label_mean = np.zeros(np.size(sc_ind))
label_max = np.zeros(np.size(sc_ind))
for Arg in VarArgs:
if Arg == 'mean':
for p in np.arange(0, np.size(sc_ind)):
label_temp = sc_label[np.where(np.logical_and(sc_time > start_time[sc_ind[p]] + feature_hours / 24.0, sc_time < end_time[sc_ind[p]]))]
label_mean[p] = np.nanmean(label_temp)
elif Arg == 'max':
for p in np.arange(0, np.size(sc_ind)):
label_temp = sc_label[np.where(np.logical_and(sc_time > start_time[sc_ind[p]] + feature_hours / 24.0, sc_time < end_time[sc_ind[p]]))]
label_max[p] = np.nanmax(label_temp)
if np.any(label_mean) and (not np.any(label_max)):
# print('only mean')
return label_mean
elif (not np.any(label_mean)) and np.any(label_max):
# print('only mean')
return label_max
# In[6]:
# ------------------------ READ ICMECAT
filename_icmecat = 'data/HELCATS_ICMECAT_v20_pandas.p'
[ic,header,parameters] = pickle.load(open(filename_icmecat, "rb" ))
print()
print()
print('load icmecat')
#ic is the pandas dataframe with the ICMECAT
#print(ic.keys())
# ------------------------ get all parameters from ICMECAT for easier handling
# id for each event
iid = ic.loc[:,'icmecat_id']
# observing spacecraft
isc = ic.loc[:,'sc_insitu']
icme_start_time = ic.loc[:,'icme_start_time']
icme_start_time_num = parse_time(icme_start_time).plot_date
mo_start_time = ic.loc[:,'mo_start_time']
mo_start_time_num = parse_time(mo_start_time).plot_date
mo_end_time = ic.loc[:,'mo_end_time']
mo_end_time_num = parse_time(mo_end_time).plot_date
sc_heliodistance = ic.loc[:,'mo_sc_heliodistance']
sc_long_heeq = ic.loc[:,'mo_sc_long_heeq']
sc_lat_heeq = ic.loc[:,'mo_sc_long_heeq']
mo_bmax = ic.loc[:,'mo_bmax']
mo_bmean = ic.loc[:,'mo_bmean']
mo_bstd = ic.loc[:,'mo_bstd']
mo_duration = ic.loc[:,'mo_duration']
# get indices of events by different spacecraft
istaind = np.where(isc == 'STEREO-A')[0]
istbind = np.where(isc == 'STEREO-B')[0]
iwinind = np.where(isc == 'Wind')[0]
# ############################# load spacecraft data ################################
print('load Wind data')
[win,winheader] = pickle.load(open("data/wind_2007_2019_heeq_ndarray.p", "rb"))
print('load STEREO-A data')
[sta,att, staheader] = pickle.load(open("data/stereoa_2007_2019_sceq_ndarray.p", "rb"))
print('load STEREO-B data')
[stb,att, stbheader] = pickle.load(open("data/stereob_2007_2014_sceq_ndarray.p", "rb"))
# ### Version (1.1) - prediction of scalar labels with a linear model, start with Btot
# In[ ]:
# ################################# spacecraft #####################################
# wind data: win.time win['bx'] win['by'] ... win['vt'] win.vy etc.
# sheath time: icme_start_time_num[iwinind] mo_start_time[iwinind]
# mfr time: mo_start_time[iwinind] mo_end_time[iwinind]
# Stereo-A data: sta.time sta['bx'] sta.by ... sta['vt'] sta.vy etc.
# sheath time: icme_start_time_num[istaind] mo_start_time[istaind]
# mfr time: mo_start_time[istaind] mo_end_time[istaind]
# Stereo-B data: stb.time stb['bx'] stb['by'] ... stb['vt'] stb.vy etc.
# sheath time: icme_start_time_num[istbind] mo_start_time[istbind]
# mfr time: mo_start_time[istbind] mo_end_time[istbind]
# use some hours of MFR for feature
# only sheath for features: feature_hours = 0
# only take events where there is a sheath, so where the start of the ICME is NOT equal to the start of the flux rope
n_iwinind = np.where(icme_start_time_num[iwinind] != mo_start_time_num[iwinind])[0]
n_istaind = np.where(icme_start_time_num[istaind] != mo_start_time_num[istaind])[0]
n_istbind = np.where(icme_start_time_num[istbind] != mo_start_time_num[istbind])[0]
if features:
# List of features - go through each ICME and extract values characterising them
# only features of the sheath
# syntax: get_features(spacecraft time, start time of intervall for values, end time of intervall for values, event index of spacecraft, value to be extracted, "mean", "std", "max")
################################ WIND #############################
feature_bzmean, feature_bzstd = get_feature(win['time'], icme_start_time_num, mo_start_time_num, n_iwinind, win['bz'], feature_hours, "mean", "std")
feature_bymean, feature_bystd = get_feature(win['time'], icme_start_time_num, mo_start_time_num, n_iwinind, win['by'], feature_hours, "mean", "std")
feature_bxmean, feature_bxstd = get_feature(win['time'], icme_start_time_num, mo_start_time_num, n_iwinind, win['bx'], feature_hours, "mean", "std")
feature_btotmean, feature_btotstd = get_feature(win['time'], icme_start_time_num, mo_start_time_num, n_iwinind, win['bt'], feature_hours, "mean", "std")
feature_btotmean, feature_btotmax, feature_btotstd = get_feature(win['time'], icme_start_time_num, mo_start_time_num, n_iwinind, win['bt'], feature_hours, "mean", "max", "std")
feature_vtotmean, feature_vtotmax, feature_vtotstd = get_feature(win['time'], icme_start_time_num, mo_start_time_num, n_iwinind, win['vt'], feature_hours, "mean", "std", "max")
if mfr:
feature_bzmean, feature_bzstd = get_feature(win['time'], mo_start_time_num, mo_start_time_num, n_iwinind, win['bz'], feature_hours, "mean", "std")
feature_bymean, feature_bystd = get_feature(win['time'], mo_start_time_num, mo_start_time_num, n_iwinind, win['by'], feature_hours, "mean", "std")
feature_bxmean, feature_bxstd = get_feature(win['time'], mo_start_time_num, mo_start_time_num, n_iwinind, win['bx'], feature_hours, "mean", "std")
feature_btotmean, feature_btotstd = get_feature(win['time'], mo_start_time_num, mo_start_time_num, n_iwinind, win['bt'], feature_hours, "mean", "std")
feature_btotmean, feature_btotmax, feature_btotstd = get_feature(win['time'], mo_start_time_num, mo_start_time_num, n_iwinind, win['bt'], feature_hours, "mean", "max", "std")
feature_vtotmean, feature_vtotmax, feature_vtotstd = get_feature(win['time'], mo_start_time_num, mo_start_time_num, n_iwinind, win['vt'], feature_hours, "mean", "std", "max")
# ------------------
# label
label_btotmean = get_label(win['time'], mo_start_time_num, mo_end_time_num, n_iwinind, win['bt'], feature_hours, "mean")
# ------------------
dwin = {'$<B_{tot}>$': feature_btotmean, 'btot_std': feature_btotstd, '$max(B_{tot})$': feature_btotmax, '$<B_{x}>$': feature_bxmean, 'bx_std': feature_bxstd, '$<B_{y}>$': feature_bymean, 'by_std': feature_bystd, '$<B_{z}>$': feature_bzmean, 'bz_std': feature_bzstd, '$<v_{tot}>$': feature_vtotmean, '$max(v_{tot})$': feature_vtotmax, 'vtot_std': feature_vtotstd, '<B> label': label_btotmean}
dfwin = | pd.DataFrame(data=dwin) | pandas.DataFrame |
import os
import pickle
import numpy as np
import pandas as pd
import torch
import torch.utils.data
import torch.nn as nn
from multiprocessing import Process
from subprocess import call
DIR_FLOW_LOG = 'flow_creation_logs'
DIR_FLOW_PROCESS = 'flow_process_semaphores'
DIR_CSV = 'csv'
DIR_MODELS = 'models'
DIR_CLASSIFIED_FLOWS = os.path.join(DIR_CSV, 'classified_flows')
DIR_CLASSIFIED_FLOWS_RFC = os.path.join(DIR_CLASSIFIED_FLOWS, 'rfc')
DIR_CLASSIFIED_FLOWS_DNN = os.path.join(DIR_CLASSIFIED_FLOWS, 'dnn')
DIR_UNCLASSIFIED_FLOWS = os.path.join(DIR_CSV, 'unclassified_flows')
def rfc_classification(data, pcap_file_name):
"""
Args:
data: pd.DataFrame
"""
print('Binning data for Random Forest Classifier...')
bins = 5
# binning columns
for feature in data.columns[7:]:
data[feature] = | pd.cut(data[feature], bins, labels=False) | pandas.cut |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import ast
import logging.config
# .. other safe imports
try:
# z test
from statsmodels.stats.proportion import proportions_ztest
# bayesian bootstrap and vis
import matplotlib.pyplot as plt
import seaborn as sns
import bayesian_bootstrap.bootstrap as bb
from astropy.utils import NumpyRNGContext
# progress bar
from tqdm import tqdm
# are these needed?
from scipy import stats
from collections import Counter
except ImportError:
logging.error("Missing niche library")
sys.exit()
logging.debug("other modules loaded")
# instantiate progress bar goodness
tqdm.pandas()
# cols for related links A/B tests
REQUIRED_COLUMNS = ["Occurrences", "ABVariant", "Page_Event_List",
"Page_List", "Event_cat_act_agg"
]
def is_a_b(variant, variant_dict):
"""
Is the value of the variant either 'A' or 'B'? Filters out junk data
:param variant:
:return: True or False
"""
return any([variant == x for x in list(variant_dict.values())])
def get_number_of_events_rl(event):
"""Counts events with category 'relatedLinkClicked' and action'Related content'."""
if event[0][0] == 'relatedLinkClicked' and 'Related content' in event[0][1]:
return event[1]
return 0
def sum_related_click_events(event_list):
return sum([get_number_of_events_rl(event) for event in event_list])
def is_related(x):
"""Compute whether a journey includes at least one related link click."""
return x > 0
def is_nav_event(event):
"""
Determine whether an event is navigation related.
"""
return any(
['breadcrumbClicked' in event, 'homeLinkClicked' in event,
all(cond in event for cond in [
'relatedLinkClicked', 'Explore the topic'])])
def count_nav_events(page_event_list):
"""
Counts the number of nav events from a content page in a Page Event List.
Helper function dependent on thing_page_paths instantiated in analyse_sampled_processed_journey.
"""
content_page_nav_events = 0
for pair in page_event_list:
if is_nav_event(pair[1]):
if pair[0] in thing_page_paths:
content_page_nav_events += 1
return content_page_nav_events
def count_search_from_content(page_list):
"""
Counts the number of GOV.UK searches from a content page,
as specified by the list of content pages, `thing_page_paths`.
Helper function dependent on thing_page_paths instantiated in analyse_sampled_processed_journey.
"""
search_from_content = 0
for i, page in enumerate(page_list):
if i > 0:
if '/search?q=' in page:
if page_list[i-1] in thing_page_paths:
search_from_content += 1
return search_from_content
def count_total_searches(df, group):
searches = df[df.ABVariant == group].groupby(
'Content_Nav_or_Search_Count').sum().iloc[:, 0].reset_index(0)
total_searches = searches['Content_Nav_or_Search_Count']*searches['Occurrences']
return sum(total_searches)
def compare_total_searches(df, variant_dict):
control = count_total_searches(df, variant_dict['CONTROL_GROUP'])
intervention = count_total_searches(df, variant_dict['INTERVENTION_GROUP'])
print("total searches in control group = {}".format(control))
print("total searches in intervention group = {}".format(intervention))
percent_diff = abs((intervention - control)/(control + intervention))*100
if control>intervention:
print("intervention has {} fewer navigation or searches than control;".format(control-intervention))
if intervention>control:
print("intervention has {} more navigation or searches than control;".format(intervention-control))
print("a {0:.2f}% overall difference".format(percent_diff))
print("The relative change was {0:.2f}% from control to intervention".format(
((intervention - control)/control)*100
))
def z_prop(df, col_name, variant_dict):
"""
Conduct z_prop test and generate confidence interval.
Using Bernoulli trial terminology where X (or x)
is number of successes and n is number of trials
total occurrences, we compare ABVariant A and B.
p is x/n. We use a z proportion test between variants.
"""
# A & B
n = df.Occurrences.sum()
# prop of journeys with at least one related link, occurrences summed for those rows gives X
p = df[df[col_name] == 1].Occurrences.sum() / n
assert (p >= 0), "Prop less than zero!"
assert (p <= 1), "Prop greater than one!"
# A
# number of trials for page A
n_a = df[df.ABVariant == variant_dict['CONTROL_GROUP']].Occurrences.sum()
# number of successes (oc currences), for page A and at least one related link clicked journeys
x_a = df[(df['ABVariant'] == variant_dict['CONTROL_GROUP']) & (df[col_name] == 1)].Occurrences.sum()
# prop of journeys where one related link was clicked, on A
p_a = x_a / n_a
# B
# number of trials for page B
n_b = df[df.ABVariant == variant_dict['INTERVENTION_GROUP']].Occurrences.sum()
# number of successes for page B, at least one related link clicked
x_b = df[(df['ABVariant'] == variant_dict['INTERVENTION_GROUP']) & (df[col_name] == 1)].Occurrences.sum()
# prop of journeys where one related link was clicked, on B
p_b = x_b / n_b
assert (n == n_a + n_b), "Error in filtering by ABVariant!"
# validate assumptions
# The formula of z-statistic is valid only when sample size (n) is large enough.
# nAp, nAq, nBp and nBq should be ≥ 5.
# where p is probability of success (we can use current baseline)
# q = 1 - p
# tried a helper function here but it didn't work hence not DRY
assert (n_a * p) >= 5, "Assumptions for z prop test invalid!"
assert (n_a * (1 - p)) >= 5, "Assumptions for z prop test invalid!"
assert (n_b * p) >= 5, "Assumptions for z prop test invalid!"
assert (n_b * (1 - p)) >= 5, "Assumptions for z prop test invalid!"
# using statsmodels
# successes
count = np.array([x_a, x_b])
# number of trials
nobs = np.array([n_a, n_b])
# z prop test
z, p_value = proportions_ztest(count, nobs, value=0, alternative='two-sided')
# print(' z-stat = {z} \n p-value = {p_value}'.format(z=z,p_value=p_value))
statsdict = {'metric_name': col_name, 'stats_method': 'z_prop_test',
'x_ab': x_a + x_b, 'n_ab': n, 'p': p,
'x_a': x_a, 'n_a': n_a, 'p_a': p_a,
'x_b': x_b, 'n_b': n_b, 'p_b': p_b,
'test_statistic': z, 'p-value': p_value}
return statsdict
def compute_standard_error_prop_two_samples(x_a, n_a, x_b, n_b):
"""
The standard error of the difference between two proportions
is given by the square root of the sum of the variances.
The variance of the difference between two independent proportions is
equal to the sum of the variances of the proportions of each sample,
because each sample contributes to sampling error in the distribution of differences.
var(A-B) = var(A) + ((-1)^2)*var(B)
"""
p1 = x_a / n_a
p2 = x_b / n_b
se = p1 * (1 - p1) / n_a + p2 * (1 - p2) / n_b
return np.sqrt(se)
def zconf_interval_two_samples(x_a, n_a, x_b, n_b, alpha=0.05):
"""
Gives two points, the lower and upper bound of a (1-alpha)% confidence interval.
To calculate the confidence interval we need to know the standard error of the difference between two proportions.
The standard error of the difference between two proportions is the combination of the standard error
of two independent distributions, ES (p_a) and (p_b).
If the CI includes one then we accept the null hypothesis at the defined alpha.
"""
p1 = x_a / n_a
p2 = x_b / n_b
se = compute_standard_error_prop_two_samples(x_a, n_a, x_b, n_b)
z_critical = stats.norm.ppf(1 - 0.5 * alpha)
return p2 - p1 - z_critical * se, p2 - p1 + z_critical * se
def mean_bb(counter_X_keys, counter_X_vals, n_replications):
"""Simulate the posterior distribution of the mean.
Parameter X: The observed data (array like)
Parameter n_replications: The number of bootstrap replications to perform (positive integer)
Returns: Samples from the posterior
"""
samples = []
weights = np.random.dirichlet(counter_X_vals, n_replications)
for w in weights:
samples.append(np.dot(counter_X_keys, w))
return samples
def bayesian_bootstrap_analysis(df, col_name=None, boot_reps=10000, seed=1337, variant_dict=None):
"""Run bayesian bootstrap on the mean of a variable of interest between Page Variants.
Args:
df: A rl_sampled_processed pandas Datframe.
col_name: A string of the column of interest.
boot_reps: An int of number of resamples with replacement.
seed: A int random seed for reproducibility.
variant_dict:dictionary containing letter codes for CONTROL_GROUP and INTERVENTION_GROUP
Returns:
a_bootstrap: a vector of boot_reps n resampled means from A.
b_bootstrap: a vector of boot_reps n resampled means from B.
"""
if variant_dict is None:
variant_dict = {
'CONTROL_GROUP':'B',
'INTERVENTION_GROUP':'C'
}
logging.info('assigning defaults for variants: control group = "A" and intervention = "B"')
with NumpyRNGContext(seed):
A_grouped_by_length = df[df.ABVariant == variant_dict['CONTROL_GROUP']].groupby(
col_name).sum().reset_index()
B_grouped_by_length = df[df.ABVariant == variant_dict['INTERVENTION_GROUP']].groupby(
col_name).sum().reset_index()
a_bootstrap = mean_bb(A_grouped_by_length[col_name],
A_grouped_by_length['Occurrences'],
boot_reps)
b_bootstrap = mean_bb(B_grouped_by_length[col_name],
B_grouped_by_length['Occurrences'],
boot_reps)
return a_bootstrap, b_bootstrap
def bb_hdi(a_bootstrap, b_bootstrap, alpha=0.05):
"""Calculate a 1-alpha high density interval
Args:
a_bootstrap: a list of resampled means from page A journeys.
b_bootstrap: a list of resampled means from page B journeys.
alpha: false positive rate.
Returns:
a_ci_low: the lower point of the 1-alpha% highest density interval for A.
a_ci_hi: the higher point of the 1-alpha% highest density interval for A.
b_ci_low: the lower point of the 1-alpha% highest density interval for B.
b_ci_hi: the higher point of the 1-alpha% highest density interval for B.
ypa_diff_mean: the mean difference for the posterior between A's and B's distributions.
ypa_diff_ci_low: lower hdi for posterior of the difference.
ypa_diff_ci_hi: upper hdi for posterior of the difference.
prob_b_>_a: number of values greater than 0 divided by num of obs for mean diff posterior. Or
the probability that B's mean metric was greater than A's mean metric.
"""
# Calculate a 95% HDI
a_ci_low, a_ci_hi = bb.highest_density_interval(a_bootstrap, alpha=alpha)
# Calculate a 95% HDI
b_ci_low, b_ci_hi = bb.highest_density_interval(b_bootstrap, alpha=alpha)
# calculate the posterior for the difference between A's and B's mean of resampled means
# ypa prefix is vestigial from blog post
ypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)
ypa_diff_mean = ypa_diff.mean()
# get the hdi
ypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff, alpha=alpha)
# We count the number of values greater than 0 and divide by the total number
# of observations
# which returns us the the proportion of values in the distribution that are
# greater than 0
p_value = (ypa_diff > 0).sum() / ypa_diff.shape[0]
return {'a_ci_low': a_ci_low, 'a_ci_hi': a_ci_hi,
'b_ci_low': b_ci_low, 'b_ci_hi': b_ci_hi,
'diff_mean': ypa_diff_mean,
'diff_ci_low': ypa_diff_ci_low, 'diff_ci_hi': ypa_diff_ci_hi,
'prob_b_>_a': p_value}
# main
def analyse_sampled_processed_journey(data_dir, filename, alpha, boot_reps, variants):
"""
Conducts various A/B tests on one sampled processed journey file.
This function is dependent on document_types.csv.gz existing in data/metadata dir.
As this takes some time to run ~ 1 hour, we output an additional dataframe as .csv.gz
to the rl_sampled_processed dir as a side effect.
This can allow the user to revisit the metrics
at a later date without having to rerun the analysis.
Parameters:
data_dir: The directory processed_journey and sampled_journey can be
found in.
filename (str): The filename of the sampled processed journey, please include
any .csv.gz etc extensions.
alpha: The corrected false positive rate.
boot_reps: int of number of statistics generated from resampling to create distribution.
variants: list containing two str elements defining the control and intervention group labels
Returns:
pandas.core.frame.DataFrame: A data frame containing statistics of the A/B tests on various metrics.
"""
variant_dict = {
'CONTROL_GROUP': variants[0],
'INTERVENTION_GROUP': variants[1]
}
logger.info(f"Analysing {filename} - calculating A/B test statistics...")
in_path = os.path.join(data_dir, "sampled_journey", filename)
logger.info("Reading in file...")
df = pd.read_csv(in_path, sep='\t', usecols=REQUIRED_COLUMNS)
logger.debug(f'{filename} DataFrame shape {df.shape}')
logger.info("Finished reading, defensively removing any non A or B variants,"
" in-case the user did not sample...")
# filter out any weird values like Object object
df.query("ABVariant in @variants", inplace=True)
logger.debug(f'Cleaned DataFrame shape {df.shape}')
logger.info('Preparing variables / cols for analysis...')
logger.debug('Convert three variables from str to list...')
df['Event_cat_act_agg'] = df['Event_cat_act_agg'].progress_apply(ast.literal_eval)
df['Page_Event_List'] = df['Page_Event_List'].progress_apply(ast.literal_eval)
df['Page_List'] = df['Page_List'].progress_apply(ast.literal_eval)
logger.debug('Create Page_Length_List col...')
df['Page_List_Length'] = df['Page_List'].progress_apply(len)
logger.info('Related link preparation...')
logger.debug('Get the number of related links clicks per Sequence')
df['Related Links Clicks per seq'] = df['Event_cat_act_agg'].progress_map(sum_related_click_events)
logger.debug('Calculate number of related links per experimental unit.')
df["Has_Related"] = df["Related Links Clicks per seq"].progress_map(is_related)
df['Related Links Clicks row total'] = df['Related Links Clicks per seq'] * df['Occurrences']
# needs finding_thing_df read in from document_types.csv.gz
logger.info('Navigation events preparation...')
df['Content_Page_Nav_Event_Count'] = df['Page_Event_List'].progress_map(count_nav_events)
logger.info('Search events preparation...')
df['Content_Search_Event_Count'] = df['Page_List'].progress_map(count_search_from_content)
logger.debug('Summing Nav and Search Events')
df['Content_Nav_or_Search_Count'] = df['Content_Page_Nav_Event_Count'] + df['Content_Search_Event_Count']
logger.debug('Sum content page nav event and search events, then multiply by occurrences for row total.')
df['Content_Nav_Search_Event_Sum_row_total'] = df['Content_Nav_or_Search_Count'] * df['Occurrences']
logger.debug('Calculating the ratio of clicks on navigation elements vs. clicks on related links')
# avoid NaN with +1
df['Ratio_Nav_Search_to_Rel'] = (df['Content_Nav_Search_Event_Sum_row_total'] + 1) / \
(df['Related Links Clicks row total'] + 1)
# if (Content_Nav_Search_Event_Sum == 0) that's our success
# Has_No_Nav_Or_Search will equal 1, that's our success, works with z_prop function
df['Has_No_Nav_Or_Search'] = df['Content_Nav_Search_Event_Sum_row_total'] == 0
logger.info('All necessary variables derived for pending statistical tests...')
logger.debug('Performing z_prop test on prop with at least one related link.')
rl_stats = z_prop(df, 'Has_Related', variant_dict)
# as it's one row needs to be a Series
df_ab = pd.Series(rl_stats).to_frame().T
logger.debug(df_ab)
ci_low, ci_upp = zconf_interval_two_samples(rl_stats['x_a'], rl_stats['n_a'],
rl_stats['x_b'], rl_stats['n_b'], alpha=alpha)
logger.debug(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'
.format(100 * ci_low, 100 * ci_upp))
df_ab['ci_low'] = ci_low
df_ab['ci_upp'] = ci_upp
logger.debug('Performing z_prop test on prop with content page nav event.')
nav_stats = z_prop(df, 'Has_No_Nav_Or_Search', variant_dict)
# concat rows
df_ab_nav = pd.Series(nav_stats).to_frame().T
logger.debug(df_ab_nav)
ci_low, ci_upp = zconf_interval_two_samples(nav_stats['x_a'], nav_stats['n_a'],
nav_stats['x_b'], nav_stats['n_b'], alpha=alpha)
logger.debug(' 1-alpha % Confidence Interval = ( {0:.2f}% , {1:.2f}% )'
.format(100 * ci_low, 100 * ci_upp))
# assign a dict to row of dataframe
df_ab_nav['ci_low'] = ci_low
df_ab_nav['ci_upp'] = ci_upp
logger.debug('Joining z_prop dataframes.')
df_ab = pd.concat([df_ab, df_ab_nav])
logger.info('Saving df with related links derived variables to rl_sampled_processed_journey dir')
out_path = os.path.join(DATA_DIR, "rl_sampled_processed_journey", ("zprop_" + f"{filename}"))
logger.info(f"Saving to {out_path}")
df_ab.to_csv(out_path, compression="gzip", index=False)
logger.info('Performing Bayesian bootstrap on count of nav or search.')
a_bootstrap, b_bootstrap = bayesian_bootstrap_analysis(df, col_name='Content_Nav_or_Search_Count',
boot_reps=boot_reps,
variant_dict=variant_dict)
# high density interval of page variants and difference posteriors
# ratio is vestigial name
ratio_nav_stats = bb_hdi(a_bootstrap, b_bootstrap, alpha=alpha)
df_ab_ratio = pd.Series(ratio_nav_stats).to_frame().T
logger.debug(df_ab_ratio)
logger.info('Performing Bayesian bootstrap on Page_List_Length')
a_bootstrap, b_bootstrap = bayesian_bootstrap_analysis(df, col_name='Page_List_Length', boot_reps=boot_reps)
# high density interval of page variants and difference posteriors
length_stats = bb_hdi(a_bootstrap, b_bootstrap, alpha=alpha)
df_ab_length = pd.Series(length_stats).to_frame().T
logger.debug(df_ab_length)
logger.debug('Joining bayesian boot dataframes.')
df_bayes = | pd.concat([df_ab_ratio, df_ab_length]) | pandas.concat |
import honeycomb_io
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table
from flask_caching import Cache
import pandas as pd
import datetime
import dateutil
import uuid
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = dash.Dash(__name__)
cache = Cache(app.server, config={
'CACHE_TYPE': 'redis',
# Note that filesystem cache doesn't work on systems with ephemeral
# filesystems like Heroku.
'CACHE_TYPE': 'filesystem',
'CACHE_DIR': 'cache-directory',
# should be equal to maximum number of users on the app at a single time
# higher numbers will store more data in the filesystem / redis cache
'CACHE_THRESHOLD': 200
})
def serve_layout():
session_id = str(uuid.uuid4())
return html.Div([
html.Div(session_id, id='session-id', style={'display': 'none'}),
html.Div([
html.Div(
[
html.H4('Date range'),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=datetime.date(2020, 8, 1),
max_date_allowed=datetime.date(2021, 7, 31),
initial_visible_month=datetime.date.today()
)
],
style={'width': '32%', 'display': 'inline-block'}
),
html.Div(
[
html.H4('Students'),
dcc.Dropdown(
id='students-dropdown',
# options=[
# {'label': 'Alpha', 'value': 'a'},
# {'label': 'Beta', 'value': 'b'},
# {'label': 'Gamma', 'value': 'c'}
# ],
# value=['a', 'c'],
multi=True
)
],
style={'width': '32%', 'display': 'inline-block'}
),
html.Div(
[
html.H4('Materials'),
dcc.Dropdown(
id='materials-dropdown',
# options=[
# {'label': 'One', 'value': '1'},
# {'label': 'Two', 'value': '2'},
# {'label': 'Three', 'value': '3'}
# ],
# value=[1],
multi=True
)
],
style={'width': '32%', 'display': 'inline-block'}
)
]),
html.Div(
dash_table.DataTable(
id='table',
columns=[
{"name": 'Student', "id": 'Student'},
{"name": 'Material', "id": 'Material'},
{"name": 'Day', "id": 'Day'},
{"name": 'Start', "id": 'Start'},
{"name": 'End', "id": 'End'}
],
# filter_action='native',
# filter_query='{Material} contains Bells && {Student} contains Flower Arranging',
# fill_width=False,
fixed_rows={'headers': True},
page_action='none',
style_table={'height': '500px', 'overflowY': 'auto'},
style_as_list_view=True,
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
},
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
'textAlign': 'left'
}
)
)
])
app.layout = serve_layout
@app.callback(
Output('table', "data"),
Output('students-dropdown', "options"),
Output('materials-dropdown', "options"),
Input('session-id', "children"),
Input('my-date-picker-range', "start_date"),
Input('my-date-picker-range', "end_date"),
Input('students-dropdown', "value"),
Input('materials-dropdown', "value")
)
def update_data(
session_id,
start_date_string,
end_date_string,
selected_students,
selected_materials
):
if pd.isnull(start_date_string) or pd.isnull(end_date_string):
return [], [], []
material_interactions_display_df = fetch_dataframe(
session_id,
start_date_string,
end_date_string
)
student_options = [
{'label': option, 'value': option}
for option in list(material_interactions_display_df['Student'].unique())
]
material_options = [
{'label': option, 'value': option}
for option in list(material_interactions_display_df['Material'].unique())
]
logger.info('Selected students: \'{}\''.format(selected_students))
logger.info('Selected materials: \'{}\''.format(selected_materials))
if selected_students is not None and len(selected_students) > 0:
material_interactions_display_df = material_interactions_display_df.loc[
material_interactions_display_df['Student'].isin(selected_students)
]
if selected_materials is not None and len(selected_materials) > 0:
material_interactions_display_df = material_interactions_display_df.loc[
material_interactions_display_df['Material'].isin(selected_materials)
]
table_data = material_interactions_display_df.to_dict('records')
return table_data, student_options, material_options
def fetch_dataframe(
session_id,
start_date_string,
end_date_string
):
@cache.memoize()
def fetch_and_serialize_data(
session_id,
start_date_string,
end_date_string
):
logger.info('Fetching new data from Honeycomb for start date {} and end date {}'.format(
start_date_string,
end_date_string
))
if pd.isnull(start_date_string) or pd.isnull(end_date_string):
return pd.DataFrame()
start_date = | pd.to_datetime(start_date_string) | pandas.to_datetime |
import click
import pysam
import pandas as pd
from os import environ
from plotnine import *
from .svs import (
tabulate_split_read_signatures,
)
from .api import (
download_genomes,
download_kraken2,
make_index,
search_reads,
condense_alignment,
make_report,
kraken2_search_reads,
)
@click.group()
def covid():
pass
@covid.group('sv')
def cli_sv():
pass
@cli_sv.command('split-reads')
@click.option('-g', '--min-gap', default=100)
@click.option('-p/-a', '--primary/--all', default=False)
@click.option('-o', '--outfile', default='-', type=click.File('w'))
@click.argument('bams', nargs=-1)
def cli_split_reads(min_gap, primary, outfile, bams):
tbls = []
for bam in bams:
click.echo(bam, err=True)
samfile = pysam.AlignmentFile(bam, "rb")
tbl = tabulate_split_read_signatures(samfile, min_gap=min_gap, primary_only=primary)
tbl['sample_name'] = bam.split('/')[-1].split('.')[0]
tbls.append(tbl)
tbl = pd.concat(tbls)
click.echo(tbl.shape, err=True)
tbl.to_csv(outfile)
@cli_sv.command('plot-split')
@click.option('-o', '--outfile', default='-', type=click.File('wb'))
@click.argument('tbl')
def cli_split_reads(outfile, tbl):
tbl = pd.read_csv(tbl, index_col=0)
# tbl = tbl.groupby('sample_name').apply(lambda t: t.sample(min(1000, t.shape[0])))
plot = (
ggplot(tbl, aes(x='position', y='split_position', color='strand')) +
geom_point(size=2, alpha=1) +
geom_density_2d() +
ylab('Position') +
xlab('Split Position') +
ggtitle('Split Signature') +
scale_color_brewer(type='qualitative', palette=6) +
labs(color='Strand') +
theme(
text=element_text(size=20),
legend_position='right',
figure_size=(8, 8),
panel_border=element_rect(colour="black", fill='none', size=1),
)
)
plot.save(outfile)
@cli_sv.command('plot-split-2')
@click.option('-o', '--outfile', default='-', type=click.File('wb'))
@click.argument('tbl')
def cli_split_reads(outfile, tbl):
tbl = | pd.read_csv(tbl, index_col=0) | pandas.read_csv |
import string
from functools import reduce
from typing import List, Dict
import pandas as pd
from pandas import DataFrame
def alphabets() -> List[str]:
return list(string.ascii_lowercase)
def positional_alphabet_columns(word_length: int = 5) -> List[str]:
return [f'{letter}_{position}' for letter in alphabets() for position in range(1, word_length + 1)]
def letter_positions_in_word(word: str) -> List[str]:
return [f'{letter}_{position + 1}' for position, letter in enumerate(word)]
def word_position_count_as_row(word: str) -> Dict[str, int]:
letter_positions = letter_positions_in_word(word)
return {
column_name: 1 if column_name in letter_positions else 0
for column_name in positional_alphabet_columns(len(word))
}
def word_frame(word: str) -> DataFrame:
return pd.DataFrame(word_position_count_as_row(word), index=[word])
def words_frame(word_frames: List[DataFrame]) -> DataFrame:
return | pd.concat(word_frames) | pandas.concat |
import urllib.request as request
import re
import os
import pandas as pd
import pkg_resources
from urllib.error import URLError
from ._logging import _logger
from .exceptions import DownloadNotAllowedError
def get_noaa_isd_lite_file(wmo_index:int, year:int, *, output_dir:str = None, allow_downloads:bool = False) -> str:
"""
Given a WMO index and a year, retrieve the corresponding NOAA ISD Lite AMY file
:param wmo_index:
:param year:
:param output_dir: Optional output directory - if not specified, the file will be saved to a package directory.
If the directory already contains a NOAA ISD Lite file matching the requested WMO Index and year, then a new
file will not be downloaded from NOAA and that file's path will be returned
:param allow_downloads: Pass True to permit NOAA ISD Lite files and related information to be downloaded from
ncdc.noaa.gov if they are not already present in output_dir.
:return: The path to the NOAA ISD Lite file
"""
if output_dir is None: # pragma: no cover
output_dir = pkg_resources.resource_filename('diyepw', 'data/noaa_isd_lite_files')
_logger.info(f"get_noaa_isd_lite_file() - output_dir was not defined, will use {output_dir}")
if not os.path.exists(output_dir): # pragma: no cover
os.mkdir(output_dir)
_logger.info(f"get_noaa_isd_lite_file() - {output_dir} did not exist, so has been created")
# On the NOAA website, the ISD Lite files are named with a third number between WMO and year, but
# since we don't use that third number for anything and it complicates identifying a file for a
# WMO/Year combination, we simplify the name to only contain the values we care about
file_name = f"{wmo_index}-{year}.gz"
file_path = os.path.join(output_dir, file_name)
# Download the ISD Lite file if it's not already in the output directory
if not os.path.exists(file_path):
url = _get_noaa_isd_lite_file_url(year, wmo_index, allow_downloads)
if not allow_downloads:
raise DownloadNotAllowedError(
f"The ISD Lite file {file_path} is not present. Pass allow_downloads=True to allow the "
f"missing data to be automatically downloaded from {url}"
)
try:
with request.urlopen(url) as response:
with open(file_path, 'wb') as downloaded_file:
downloaded_file.write(response.read())
except URLError as e: # pragma: no cover
raise Exception(f'Failed to download {url} - are you connected to the internet?')
except Exception as e: # pragma: no cover
raise Exception(f"Error downloading from {url}: {e}")
return file_path
def _get_noaa_isd_lite_file_url(year:int, wmo_index:int, allow_downloads:bool) -> str:
catalog = _get_noaa_isd_lite_file_catalog(year, allow_downloads=allow_downloads)
wmo_index_row = catalog.loc[catalog['wmo_index'] == wmo_index]
if len(wmo_index_row) == 0:
raise Exception(f"Invalid WMO Inex: The NOAA ISD Lite catalog does not contain an entry for WMO Index {wmo_index}")
file_name = wmo_index_row['file_name'].iloc[0]
return f"https://www1.ncdc.noaa.gov/pub/data/noaa/isd-lite/{year}/{file_name}"
def _get_noaa_isd_lite_file_catalog(year:int, *, catalog_dir=None, allow_downloads:bool = False) -> pd.DataFrame:
"""
Retrieve the list of all NOAA ISD Lite files for North America (WMO indices starting with 7) for a given year.
If the file is not already present, one will be downloaded. Files are named after the year whose files they
describe.
:param year:
:param catalog_dir: The directory in which to look for the file, and into which the file will be written if
downloaded
:param allow_downloads: Pass True to permit the catalog of available NOAA ISD Lite files for North America to
be downloaded if it is not already present in catalog_dir
:return: A Pandas Dataframe containing a set of file names. The file names can be
appended to the URL https://www1.ncdc.noaa.gov/pub/data/noaa/isd-lite/{year}/ to download the files from
NOAA
"""
if catalog_dir is None:
catalog_dir = pkg_resources.resource_filename('diyepw', 'data/noaa_isd_lite_catalogs')
_logger.info(f"catalog_dir was not defined, using {catalog_dir}")
if not os.path.exists(catalog_dir): # pragma: no cover
raise Exception(f"Directory {catalog_dir} does not exist")
file_path = os.path.join(catalog_dir, str(year))
# If the catalog file already exists, we'll read it. If it doesn't, we'll download it, import it into a
# dataframe, and then save that so that it exists the next time we need it.
if os.path.exists(file_path):
_logger.info(f"Catalog file exists at {file_path}, using it instead of downloading it from NOAA")
catalog = | pd.read_csv(file_path) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
#导入数据集
def read_data(base_info_path,
annual_report_info_path,
tax_info_path,
change_info_path,
news_info_path,
other_info_path,
entprise_info_path,
):
base_info = pd.read_csv(base_info_path) # 企业基本信息
annual_report_info = pd.read_csv(annual_report_info_path)
tax_info = pd.read_csv(annual_report_info_path)
change_info = pd.read_csv(change_info_path)
news_info = pd.read_csv(news_info_path)
other_info = pd.read_csv(other_info_path)
entprise_info = pd.read_csv(entprise_info_path)
pd.to_datetime(tax_info['START_DATE'], format="%Y-%m-%d")
return base_info, annual_report_info, tax_info, change_info, news_info, other_info, entprise_info
df_x = pd.DataFrame(entprise_info['id'])
df_y = pd.DataFrame(entprise_info['label'])
x_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size = 0.3, random_state = 2021)
data = pd.concat([x_train, x_test]).reset_index(drop=True)
def get_base_info_feature(df, base_info):
off_data = base_info.copy()
off_data_isnull_rate=off_data.isnull().sum()/len(off_data)
big_null_name=off_data_isnull_rate[off_data_isnull_rate.values>=0.95].index
base_info.drop(big_null_name,axis=1,inplace=True)
base_info.fillna(-1, downcast = 'infer', inplace = True)
#对时间的处理
base_info['opfrom']=pd.to_datetime(base_info['opfrom'],format="%Y-%m-%d") #把数据转换为时间类型
base_info['pre_opfrom']=base_info['opfrom'].map(lambda x:x.timestamp() if x!=-1 else 0) #将时间类型转换为时间戳
base_info['opto']=pd.to_datetime(base_info['opto'],format='%Y-%m-%d')
base_info['pre_opto']=base_info['opto'].map(lambda x:x.timestamp() if x!=-1 else 0)
le=LabelEncoder()
base_info['industryphy']=le.fit_transform(base_info['industryphy'].map(str))
base_info['opscope']=le.fit_transform(base_info['opscope'].map(str))
base_info['opform']=le.fit_transform(base_info['opform'].map(str))
data = df.copy()
data=pd.merge(data, base_info, on='id', how='left')
# 行业类别基本特征
key=['industryphy']
prefixs = ''.join(key) + '_'
#该行业有多少企业经营
pivot=pd.pivot_table(data,index=key,values='id',aggfunc=lambda x:len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'id': prefixs+'different_id'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#行业广告经营特征
key=['industryco','adbusign']
#该行业有多少广告和不广告平均注册金
pivot=pd.pivot_table(data,index=key,values='regcap',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'regcap': prefixs+'mean_regcap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#细类行业特征
key=['industryco']
prefixs = ''.join(key) + '_'
#该行业有多少企业经营
pivot=pd.pivot_table(data,index=key,values='id',aggfunc=lambda x:len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'id': prefixs+'different_id'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#行业从业平均人数
pivot=pd.pivot_table(data,index=key,values='empnum',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'empnum': prefixs+'mean_empnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#行业从业人数最大
pivot=pd.pivot_table(data,index=key,values='empnum',aggfunc=np.max)
pivot=pd.DataFrame(pivot).rename(columns={'empnum': prefixs+'max_empnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#企业所有人数
data['all_people']=list(map(lambda x,y,z : x+y+z ,data['exenum'],data['empnum'],data['parnum']))
#企业实缴金额占注册多少
data['rec/reg']=list(map(lambda x,y : x/y if y!=0 else 0,data['reccap'],data['regcap']))
data.fillna(-1, downcast = 'infer', inplace = True)
#企业没人共交多少
data['mean_hand']=list(map(lambda x,y : x/y if y!=0 else 0,data['regcap'],data['all_people']))
data.fillna(-1, downcast = 'infer', inplace = True)
#经营范围(运动,材料)
key=['opscope']
prefixs = ''.join(key) + '_'
#同样经营范围有那些企业
pivot=pd.pivot_table(data,index=key,values='id',aggfunc=lambda x: len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'id': prefixs+'many_id'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种类型一个企业有多少从业人数
pivot=pd.pivot_table(data,index=key,values='empnum',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'empnum': prefixs+'mean_empnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
# 这种类型共企业有多少合伙人
pivot=pd.pivot_table(data,index=key,values='parnum',aggfunc=np.sum)
pivot=pd.DataFrame(pivot).rename(columns={'parnum': prefixs+'sum_parnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种类型一个企业有多少合伙人
pivot=pd.pivot_table(data,index=key,values='parnum',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'parnum': prefixs+'mean_parnum'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围平均注册金
pivot=pd.pivot_table(data[data['regcap'].map(lambda x : x!=-1)],index=key,values='regcap',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'regcap': prefixs+'mean_ragcap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围最大和最小注册金
pivot=pd.pivot_table(data[data['regcap'].map(lambda x : x!=-1)],index=key,values='regcap',aggfunc=np.max)
pivot=pd.DataFrame(pivot).rename(columns={'regcap': prefixs+'max_ragcap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围平均实缴金
pivot=pd.pivot_table(data[data['reccap'].map(lambda x : x!=-1)],index=key,values='reccap',aggfunc=np.mean)
pivot=pd.DataFrame(pivot).rename(columns={'reccap': prefixs+'mean_raccap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#这种范围最大和最小实缴金
pivot=pd.pivot_table(data[data['reccap'].map(lambda x : x!=-1)],index=key,values='reccap',aggfunc=np.max)
pivot=pd.DataFrame(pivot).rename(columns={'reccap':prefixs+'max_raccap'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#企业类型
key=['enttype']
prefixs = ''.join(key) + '_'
#企业类型有几个小类
pivot=pd.pivot_table(data,index=key,values='enttypeitem',aggfunc=lambda x:len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'enttypeitem':
prefixs+'different_item'}).reset_index()
data = pd.merge(data, pivot, on=key, how='left')
data.fillna(-1, downcast = 'infer', inplace = True)
#排序特征
key=['sort']
prefixs = ''.join(key) + '_'
#行业类别注册金正反序
data[prefixs+'industryphy_regcap_postive']=data.groupby('industryphy')['regcap'].rank(ascending=True)
data[prefixs+'industryphy_regcap_nagative']=data.groupby('industryphy')['regcap'].rank(ascending=False)
#行业类别投资金金正反序
data[prefixs+'industryphy_reccap_postive']=data.groupby('industryphy')['reccap'].rank(ascending=True)
data[prefixs+'industryphy_reccap_nagative']=data.groupby('industryphy')['reccap'].rank(ascending=False)
#企业类型业注册金正反序
data[prefixs+'enttype_regcap_postive']=data.groupby('enttype')['regcap'].rank(ascending=True)
data[prefixs+'enttype_regcap_nagative']=data.groupby('enttype')['regcap'].rank(ascending=False)
#企业类型投资金金正反序
data[prefixs+'enttype_reccap_postive']=data.groupby('enttype')['reccap'].rank(ascending=True)
data[prefixs+'enttype_reccap_nagative']=data.groupby('enttype')['reccap'].rank(ascending=False)
#经营限期注册金正反序
data[prefixs+'opfrom_regcap_postive']=data.groupby('pre_opfrom')['regcap'].rank(ascending=True)
data[prefixs+'opfrom_regcap_negative']=data.groupby('pre_opfrom')['regcap'].rank(ascending=False)
#经营限起投资金金正反序
data[prefixs+'opfrom_recap_postive']=data.groupby('pre_opfrom')['reccap'].rank(ascending=True)
data[prefixs+'opfrom_reccap_negative']=data.groupby('pre_opfrom')['reccap'].rank(ascending=False)
#经营限期☞注册金正反序
data[prefixs+'opto_regcap_postive']=data.groupby('pre_opto')['regcap'].rank(ascending=True)
data[prefixs+'opto_regcap_negative']=data.groupby('pre_opto')['regcap'].rank(ascending=False)
# #经营限止投资金金正反序
# data[prefixs+'opto_recap_postive']=data.groupby('pre_opto')['reccap'].rank(ascending=True)
data[prefixs+'opto_reccap_negative']=data.groupby('pre_opto')['reccap'].rank(ascending=False)
#enttypegb注册金正反序
data[prefixs+'enttypegb_regcap_postive']=data.groupby('enttypegb')['regcap'].rank(ascending=True)
data[prefixs+'enttypegb_regcap_negative']=data.groupby('enttypegb')['regcap'].rank(ascending=False)
#enttypegb投资金金正反序
data[prefixs+'enttypegb_recap_postive']=data.groupby('enttypegb')['reccap'].rank(ascending=True)
data[prefixs+'enttypegb_reccap_negative']=data.groupby('enttypegb')['reccap'].rank(ascending=False)
# #sdbusign注册金正反序
# data[prefixs+'adbusign_regcap_postive']=data.groupby('adbusign')['regcap'].rank(ascending=True)
# data[prefixs+'adbusign_regcap_negative']=data.groupby('adbusign')['regcap'].rank(ascending=False)
# #enttypegb投资金金正反序
data[prefixs+'adbusign_recap_postive']=data.groupby('adbusign')['reccap'].rank(ascending=True)
# data[prefixs+'adbusign_reccap_negative']=data.groupby('adbusign')['reccap'].rank(ascending=False)
return data
data = get_base_info_feature(data, base_info)
# x_train = get_base_info_feature(x_train, base_info)
# x_test = get_base_info_feature(x_test, base_info)
# In[13]:
def get_annual_report_info_feature(df, feat):
off_data=feat.copy()
off_data_isnull_rate=off_data.isnull().sum()/len(off_data)
big_null_name=off_data_isnull_rate[off_data_isnull_rate.values>=0.9].index
feat.drop(big_null_name,axis=1,inplace=True)
feat.fillna(-1,downcast = 'infer', inplace = True)
#企业年报特征
#企业
data = df.copy()
key=['id']
prefixs = ''.join(key) + '_'
#企业在几年内是否变更状态
pivot=pd.pivot_table(feat,index=key,values='STATE',aggfunc=lambda x:len(set(x)))
pivot=pd.DataFrame(pivot).rename(columns={'STATE':prefixs+'many_STATE'}).reset_index()
data=pd.merge(data, pivot, on=key, how='left')
data.fillna(-1,downcast = 'infer', inplace = True)
#企业资金总额
pivot=pd.pivot_table(feat,index=key,values='FUNDAM',aggfunc=np.sum)
pivot=pd.DataFrame(pivot).rename(columns={'FUNDAM':prefixs+'sum_FUNDAM'}).reset_index()
data=pd.merge(data, pivot, on=key, how='left')
data.fillna(-1,downcast = 'infer', inplace = True)
#企业从业人数
pivot=pd.pivot_table(feat,index=key,values='EMPNUM',aggfunc=np.sum)
pivot=pd.DataFrame(pivot).rename(columns={'EMPNUM':prefixs+'sum_EMPNUM'}).reset_index()
data=pd.merge(data, pivot, on=key, how='left')
data.fillna(-1,downcast = 'infer', inplace = True)
#企业有几年公布了从业人数
pivot=pd.pivot_table(feat[feat['EMPNUMSIGN'].map(lambda x: x==1)],index=key,values='EMPNUM',aggfunc=len)
pivot=pd.DataFrame(pivot).rename(columns={'EMPNUM':prefixs+'gongshi_many_EMPNUM '}).reset_index()
data=pd.merge(data, pivot, on=key, how='left')
data.fillna(-1,downcast = 'infer', inplace = True)
#企业有几年是开业
pivot=pd.pivot_table(feat[feat['BUSSTNAME'].map(lambda x: x=='开业')],index=key,values='BUSSTNAME',aggfunc=len)
pivot=pd.DataFrame(pivot).rename(columns={'BUSSTNAME':prefixs+'开业_many_year '}).reset_index()
data=pd.merge(data, pivot, on=key, how='left')
data.fillna(-1,downcast = 'infer', inplace = True)
return data
data = get_annual_report_info_feature(data, annual_report_info)
train = data[:x_train.shape[0]].reset_index(drop=True)
test = data[x_train.shape[0]:].reset_index(drop=True)
def get_model(train_x,train_y,valid_x,valid_y, my_type='lgb'):
if my_type == 'lgb':
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_error',
'num_leaves': 64,
'max_depth':7,
'learning_rate': 0.02,
'feature_fraction': 0.85,
'feature_fraction_seed':2021,
'bagging_fraction': 0.85,
'bagging_freq': 5,
'bagging_seed':2021,
'min_data_in_leaf': 20,
'lambda_l1': 0.5,
'lambda_l2': 1.2,
'verbose': -1
}
dtrain = lgb.Dataset(train_x, label=train_y)
dvalid = lgb.Dataset(valid_x, label=valid_y)
model = lgb.train(
params,
train_set = dtrain,
num_boost_round=10000,
valid_sets = [dtrain, dvalid],
verbose_eval=100,
early_stopping_rounds=600,
# categorical_feature=cat_cols,
)
elif my_type == 'xgb':
params = {'booster':'gbtree', #线性模型效果不如树模型
'objective':'binary:logistic',
'eval_metric':'auc',
'silent':1, #取0时会输出一大堆信息
'eta':0.01, #学习率典型值为0.01-0.2
'max_depth':7, #树最大深度,典型值为3-10,用来避免过拟合
'min_child_weight':5, #默认取1,用于避免过拟合,参数过大会导致欠拟合
'gamma':0.2, #默认取0,该参数指定了节点分裂所需的小损失函数下降值
'lambda':1, #默认取1.权重的L2正则化项
'colsample_bylevel':0.7,
'colsample_bytree':0.8, #默认取1,典型值0.5-1,用来控制每棵树随机采样的比例
'subsample':0.8, #默认取1,典型值0.5-1,用来控制对于每棵树,随机采样的比例
'scale_pos_weight':1 #在各类样本十分不平衡时,设定该参数为一个正值,可使算法更快收敛
}
dtrain = xgb.DMatrix(train_x, label = train_y)
# watchlist = [(dtrain,'train')] #列出每次迭代的结果
model = xgb.train(params,dtrain,num_boost_round = 1200)
elif my_type == 'cat':
model = CatBoostClassifier(
iterations=5000,
max_depth=10,
learning_rate=0.07,
l2_leaf_reg=9,
random_seed=2018,
fold_len_multiplier=1.1,
early_stopping_rounds=100,
use_best_model=True,
loss_function='Logloss',
eval_metric='AUC',
verbose=100)
model.fit(train_x,train_y,eval_set=[(train_x, train_y),(valid_x, valid_y)], plot=True)
return model
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score, fbeta_score, precision_score, recall_score, roc_auc_score
result = pd.DataFrame()
model_type = ['cat', 'lgb', 'xgb']
for my_type in model_type:
KF = StratifiedKFold(n_splits=5, random_state=2020, shuffle=True)
features = [i for i in train.columns if i not in ['id','dom', 'opfrom', 'opto', 'oploc']]
oof = np.zeros(len(train))
predictions = np.zeros((len(test)))
# 特征重要性
feat_imp_df = | pd.DataFrame({'feat': features, 'imp': 0}) | pandas.DataFrame |
"""
Helper functions for get_module_progress.py
***
All Canvas LMS REST API calls made using canvasapi python API wrapper:
https://github.com/ucfopen/canvasapi
***
@authors: <NAME>, <NAME>, <NAME>
"""
from ast import literal_eval
import datetime
import re
import os
import shutil
from tqdm import tqdm
import pandas as pd
import settings
from pathlib import Path
def create_dict_from_object(theobj, list_of_attributes):
"""given an object and list of attributes return a dictionary
Args:
theobj (a Canvas object)
list_of_attributes (list of strings)
Returns:
mydict
"""
def get_attribute_if_available(theobj, attrname):
if hasattr(theobj, attrname):
return {attrname: getattr(theobj, attrname)}
else:
return {attrname: None}
mydict = {}
for i in list_of_attributes:
mydict.update(get_attribute_if_available(theobj, i))
return mydict
def get_modules(course):
"""Returns all modules from specified course
Makes a call to the CanvasAPI through Python API wrapper.
Calls make_modules_dataframe() to convert response to properly formatted
Pandas dataframe. Returns it.
Args:
course (canvasapi.course.Course): The course obj.
from Canvas Python API wrapper
Returns:
DataFrame: Table with modules info for specified course
Raises:
KeyError: if request through canvasapi is unsuccessful or if dataframe creation and
handling results in errors
"""
try:
modules = course.get_modules(include=["items"], per_page=50)
attrs = [
"id",
"name",
"position",
"unlock_at",
"require_sequential_progress",
"another item",
"publish_final_grade",
"prerequisite_module_ids",
"published",
"items_count",
"items_url",
"items",
"course_id",
]
modules_dict = [create_dict_from_object(m, attrs) for m in modules]
modules_df = pd.DataFrame(modules_dict)
modules_df = modules_df.rename(
columns={
"id": "module_id",
"name": "module_name",
"position": "module_position",
}
)
except Exception:
raise KeyError("Unable to get modules for course: " + course.name)
else:
return modules_df
def get_items(modules_df, cname):
"""Returns expanded modules data
Given a modules dataframe, expand table data so that fields with a list get
broken up into indiviaul rows per list item & dictionaries are broken up
into separate columns.
Args:
module_df (DataFrame): modules DataFrame
Returns:
DataFrame: Table with all module info, a single row per item
and all item dict. attributes in a single column
Raises:
KeyError: if there is any issue expanding modules table or if module does not have items
"""
try:
expanded_items = _list_to_df(
modules_df[["module_id", "module_name", "course_id", "items"]], "items"
)
items_df = _dict_to_cols(expanded_items, "items", "items_")
items_df = _dict_to_cols(
items_df.reset_index(drop=True),
"items_completion_requirement",
"items_completion_req_",
)
except KeyError:
raise KeyError(
f'Unable to expand module items for "{cname}." Please ensure all modules have items'
)
else:
return items_df
def get_student_module_status(course):
"""Returns DataFrame with students' module progress
Given a course object, gets students registered in that course (API Request)
For each student, gets module info pertaining to that student (API Request)
Returns info in Pandas DataFrame table format.
Args:
course (canvasapi.course.Course): The course obj.
from Canvas Python API wrapper
Returns:
DataFrame: Table containing module progress data for each student.
Each student has a single entry per module in specified
course. EX.
row 0: student0, module0
row 1: student0, module1
row 2: student1, module0
row 3: student1, module1
"""
students_df = _get_students(course)
print("Getting student module info for " + course.name)
student_module_status = pd.DataFrame()
num_rows = len(list(students_df.iterrows()))
with tqdm(total=num_rows) as pbar:
for i, row in students_df.iterrows():
pbar.update(1)
sid = row["id"]
student_data = course.get_modules(
student_id=sid, include=["items"], per_page=50
)
attrs = [
"id",
"name",
"position",
"unlock_at",
"require_sequential_progress",
"publish_final_grade",
"prerequisite_module_ids",
"state",
"completed_at",
"items_count",
"items_url",
"items",
"course_id",
]
# make student data into dictionary
student_rows_dict = [
create_dict_from_object(m, attrs) for m in student_data
]
# make dictionary into df
student_rows = pd.DataFrame(student_rows_dict)
student_rows["student_id"] = str(sid)
student_rows["student_name"] = row["name"]
student_rows["sortable_student_name"] = row["sortable_name"]
student_module_status = student_module_status.append(
student_rows, ignore_index=True, sort=False
)
# note, kept getting sort error future warning
# might want to check this in future that Sort should be false
student_module_status = student_module_status.rename(
columns={
"id": "module_id",
"name": "module_name",
"position": "module_position",
}
)
return student_module_status
def get_student_items_status(course, module_status):
"""Returns expanded student module status data table
Args:
course (canvasapi.course.Course): The course obj.
from Canvas Python API wrapper.
module_status (DataFrame): student module status DataFrame
Returns:
DataFrame: Expanded table with same information as module_status DF.
Items list exapanded -> single row per item
Items dict. expanded -> single col per attribute
"""
try:
expanded_items = _list_to_df(module_status, "items")
except KeyError as e:
raise KeyError("Corse has no items completd by students")
expanded_items = _dict_to_cols(expanded_items, "items", "items_")
student_items_status = _dict_to_cols(
expanded_items, "items_completion_requirement", "item_cp_req_"
).reset_index(drop=True)
student_items_status["course_id"] = course.id
student_items_status["course_name"] = course.name
# pull out completed_at column as list
items_status_list = student_items_status["completed_at"].values.tolist()
# clean/format the datetime string (to be more interpretable in Tableau)
cleaned = map(__clean_datetime_value, items_status_list)
# put cleaned values back into dataframe
student_items_status["completed_at"] = list(cleaned)
student_items_status = student_items_status[
[
"completed_at",
"course_id",
"module_id",
"items_count",
"module_name",
"module_position",
"state",
"unlock_at",
"student_id",
"student_name",
"items_id",
"items_title",
"items_position",
"items_indent",
"items_type",
"items_module_id",
"item_cp_req_type",
"item_cp_req_completed",
"course_name",
]
]
return student_items_status
def __clean_datetime_value(datetime_string):
"""Given"""
if datetime_string is None:
return datetime_string
if isinstance(datetime_string, str):
x = datetime_string.replace("T", " ")
return x.replace("Z", "")
raise TypeError("Expected datetime_string to be of type string (or None)")
def write_data_directory(dataframes, cid):
"""Writes dataframes to directory titled by value of cid and items dataframe to
tableau directory
Iterates through dataframes dictionary and writes each one to disk (<key>.csv)
Makes *Course output* directory in data folder named <cid> (or writes to existing
if one already exists with that name)
Makes *Tableau* output directory called "Tableau" where all student_items dataframes will
be put for ease of import and union in tableau
Args:
dataframes (dictionary): dictionary of DataFrames
Format -> { name, DataFrame,... }
dir_name (string): directory name
"""
course_path = _make_output_dir(cid)
for name, dataframe in dataframes.items():
path = Path(f"{course_path}/{name}.csv")
dataframe.to_csv(path, index=False)
def clear_data_directory():
"""
Clears entire data directory except for Tableau folder
Directory path : module_progress/data
"""
root = os.path.dirname(os.path.abspath(__file__))[:-4]
data_path = Path(f"{root}/data")
for subdir in os.listdir(data_path):
path = data_path / subdir
if subdir != "Tableau" and subdir != ".gitkeep" and subdir != ".DS_Store":
shutil.rmtree(path, ignore_errors=False, onerror=None)
def write_tableau_directory(list_of_dfs):
"""Creates a directory titled Tableau containing 3 items:
course_entitlements.csv --> permissions table for Tableau server
module_data.csv --> unioned data for Tableau
status.csv --> details the success of the most recent run
Also creates a .zip with the contents of the Tableau folder in the 'archive' directory
"""
tableau_path = _make_output_dir("Tableau")
union = pd.concat(list_of_dfs, axis=0, ignore_index=True)
module_data_output_path = tableau_path / "module_data.csv"
union.to_csv(module_data_output_path, index=False)
root = os.path.dirname(os.path.abspath(__file__))[:-4]
# Copy the course_entitlements.csv into the Tableau folder
src = Path(f"{root}/course_entitlements.csv")
dst = Path(f"{root}/data/Tableau/course_entitlements.csv")
shutil.copyfile(src, dst)
current_dt = datetime.datetime.now()
dir_name = str(current_dt.strftime("%Y-%m-%d--%H-%M-%S"))
src = tableau_path
dst = Path(f"{root}/archive/{dir_name}")
shutil.make_archive(dst, "zip", src)
_output_status_table(tableau_path)
def _output_status_table(tableau_path):
"""
Creates .csv file for log folder that specifies run status for each course.
Log is titled by date time and table status info reflects most recent run.
"""
current_dt = datetime.datetime.now()
cols = ["Course Id", "Course Name", "Status", "Message", "Data Updated On"]
data = []
for cid, info in settings.status.items():
row = [cid, info["cname"], info["status"], info["message"], current_dt]
data.append(row)
dataframe = pd.DataFrame(data, columns=cols)
file_name = str(current_dt.strftime("%Y-%m-%d--%H-%M-%S")) + ".csv"
status_log_path = Path(f"{settings.ROOT_DIR}/status_log/{file_name}")
dataframe.to_csv(status_log_path, index=False)
status_path = tableau_path / "status.csv"
dataframe.to_csv(status_path, index=False)
def log_failure(cid, msg):
"""Adds failure log to global status object
Args:
cid (Integer): course id who's status has changed - used to create log entry
msg (String): description of the failure
"""
settings.status[str(cid)]["status"] = "Failed"
settings.status[str(cid)]["message"] = msg
def log_success(cid):
"""Adds success log to glbal status object
Args:
cid (Integer): course id who's status has changed - used to create log entry
"""
settings.status[str(cid)]["status"] = "Success"
settings.status[str(cid)][
"message"
] = "Course folder has been created in data directory"
def _get_students(course):
"""Returns DataFrame table with students enrolled in specified course
Makes a request to Canvas LMS REST API through Canvas Python API Wrapper
Calls make_dataframe to convert response to Pandas DataFrame. Returns
DataFrame.
Args:
course (canvasapi.course.Course): The course obj.
from Canvas Python API wrapper
Returns:
DataFrame: Students table
"""
# print("Getting student list")
students = course.get_users(
include=["test_student", "email"], enrollment_type=["student"], per_page=50
)
attrs = [
"id",
"name",
"created_at",
"sortable_name",
"short_name",
"sis_user_id",
"integration_id",
"login_id",
"pronouns",
]
students_data = [create_dict_from_object(s, attrs) for s in students]
students_df = | pd.DataFrame(students_data) | pandas.DataFrame |
import json
import re
import numpy as np
import pandas as pd
from pathlib import Path
from fastprogress import progress_bar
from src.dataset import NAME2CODE, BIRD_CODE, SCINAME2CODE
def create_ground_truth(train: pd.DataFrame):
labels = np.zeros((len(train), 264), dtype=int)
for i, row in progress_bar(train.iterrows(), total=len(train)):
ebird_code = BIRD_CODE[row.ebird_code]
labels[i, ebird_code] = 1
secondary_labels = eval(row.secondary_labels)
for sl in secondary_labels:
if NAME2CODE.get(sl) is not None:
second_code = NAME2CODE[sl]
labels[i, BIRD_CODE[second_code]] = 1
background = row["background"]
if isinstance(background, str):
academic_names = re.findall("\((.*>)\)", background)
academic_names = list(
filter(
lambda x: x is not None,
map(
lambda x: SCINAME2CODE.get(x),
academic_names
)
)
)
for bl in academic_names:
labels[i, BIRD_CODE[bl]] = 1
columns = list(BIRD_CODE.keys())
index = train["filename"].map(lambda x: x.replace(".mp3", ".wav")).values
labels_df = | pd.DataFrame(labels, index=index, columns=columns) | pandas.DataFrame |
import argparse
import glob
import math
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numba import jit, prange
from sklearn import metrics
from utils import *
@jit(nopython=True, nogil=True, cache=True, parallel=True, fastmath=True)
def compute_tp_tn_fp_fn(y_true, y_pred):
tp = 0
tn = 0
fp = 0
fn = 0
for i in prange(y_pred.size):
tp += y_true[i] * y_pred[i]
tn += (1-y_true[i]) * (1-y_pred[i])
fp += (1-y_true[i]) * y_pred[i]
fn += y_true[i] * (1-y_pred[i])
return tp, tn, fp, fn
def compute_precision(tp, fp):
return tp / (tp + fp)
def compute_recall(tp, fn):
return tp / (tp + fn)
def compute_f1_score(precision, recall):
try:
return (2*precision*recall) / (precision + recall)
except:
return 0
def compute_fbeta_score(precision, recall, beta):
try:
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
except:
return 0
def compute_accuracy(tp,tn,fp,fn):
return (tp + tn)/(tp + tn + fp + fn)
def compute_auc(GT, pred):
return metrics.roc_auc_score(GT, pred)
def compute_auprc(GT, pred):
prec, rec, thresholds = metrics.precision_recall_curve(GT, pred)
# print(prec, rec, thresholds)
plt.plot(prec, rec)
plt.show()
# return metrics.auc(prec, rec)
def compute_average_precision(GT, pred):
ratio = sum(GT)/np.size(GT)
return metrics.average_precision_score(GT, pred), ratio
def main(args):
#====== Numba compilation ======
# The 2 lines are important
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.uint8), np.array([0,1,0], dtype=np.uint8))
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.float32), np.array([0,1,0], dtype=np.float32))
#===============================
out = args.out
if not os.path.exists(os.path.dirname(out)):
os.makedirs(os.path.dirname(out))
model_name = args.model_name
number_epochs = args.epochs
batch_size = args.batch_size
NumberFilters = args.number_filters
lr = args.learning_rate
cv_fold = args.cv_fold
model_params = ['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', 'Empty col', 'Empty col2', 'Empty col3', 'CV']
param_values = [number_epochs, batch_size, NumberFilters, lr, '', '', '', '']
Params = pd.Series(param_values, index=model_params, name='Params values')
metrics_names = ['AUPRC','AUPRC - Baseline','F1_Score','Fbeta_Score','Accuracy','Recall','Precision','CV fold']
Metrics = pd.Series(metrics_names, index=model_params, name='Model\Metrics')
if not os.path.exists(out):
Folder_Metrics = pd.DataFrame(columns = model_params)
Image_Metrics = pd.DataFrame(columns = model_params)
else:
Metrics_file = pd.ExcelFile(out)
Folder_Metrics = | pd.read_excel(Metrics_file, 'Sheet1', index_col=0, header=None) | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
import os
import copy
import pandas
from os.path import join
from pandas.core.frame import DataFrame
from MyPythonDocx import *
def cal_va(df):
# df = DataFrame(page[1:], columns=page[0])
severity = ['嚴重', '高', '中', '低', '無']
vas = []
for idx in range(5):
mask = df['嚴重程度'] == severity[idx]
tmp = df[mask][['弱點名稱', '弱點描述']].values.tolist()
vas.append([])
for name in tmp:
if name and name not in vas[idx]:
vas[idx].append(name)
# print(vas)
return vas
def cal_risk_cnt(page):
try:
df = DataFrame(page[1:], columns=page[0])
except:
df = page
severity = ['嚴重', '高', '中', '低', '無']
cnts = []
for idx in range(5):
mask = df['嚴重程度'] == severity[idx]
cnts.append(df[mask].shape[0])
# total = set(tuple(x) for x in df[mask]['弱點名稱'])
# print(len(total))
# print(cnts)
return cnts
def check_va(data, data2):
# IP位址, Service Port, 弱點ID
total = ((row[0], row[3], row[4]) for row in data)
total2 = ((row[0], row[3], row[4]) for row in data2)
set1, set2 = set(total), set(total2)
not_repair_va = set1 & set2
new_va = set2 - not_repair_va
repaired_va = set1 - not_repair_va
elem = ('IP位址', 'Service Port', '弱點ID')
new_va.add(elem)
repaired_va.add(elem)
return list(new_va), list(not_repair_va), list(repaired_va)
def va_to_row(va, data):
res = []
tmp_va = copy.deepcopy(va)
for row in data:
for cod in tmp_va:
if row[0] == cod[0] and row[3] == cod[1] and row[4] == cod[2]:
res.append(row)
tmp_va.remove(cod)
break
return res
def doc_va_table(doc, data, idx):
title = ['風險等級', '風險名稱', '風險簡述']
severity = ['嚴重', '高', '中', '低', '無']
vas = cal_va(data)
tb = doc.tables[idx]
for i, row in enumerate(tb.rows):
if not i:
continue
Table.remove_row(tb, row)
for i in range(6):
if not i:
for j in range(3):
cell = tb.cell(i, j)
cell.text = title[j]
Table.set_cell_color(cell, PURPLE)
Parag.set_font(cell.paragraphs, size=Pt(12), name=u'標楷體')
else:
for x in vas[i-1]:
row_cells = tb.add_row().cells
row_cells[0].text = severity[i-1]
Parag.set_font(row_cells[0].paragraphs, size=Pt(12), name=u'標楷體')
row_cells[1].text = x[0]
row_cells[2].text = x[1]
for p in row_cells[2].paragraphs:
p.paragraph_format.alignment = 0
# print(x[0], ' finished!')
Table.set_content_font(tb, size=Pt(12), name=u'標楷體')
Table.col_widths(tb, 1.5, 2.7, 5)
def doc_risk_cnt_copare(doc, data, data2):
cnts = cal_risk_cnt(data)
cnts2 = cal_risk_cnt(data2)
# cnts3 = cal_risk_cnt(data_decrease)
tbl = doc.tables[2]
sums = [0, 0, 0]
for i in range(5):
tbl.cell(i+1, 1).text = str(cnts[i])
tbl.cell(i+1, 2).text = str(cnts2[i])
sub = cnts[i] - cnts2[i]
tbl.cell(i+1, 3).text = str(sub)
# res = cnts[i]-cnts2[i] if cnts[i]>cnts2[i] else 0
# tbl.cell(i+1, 3).text = str(res)
sums[0] = sums[0] + cnts[i]
sums[1] = sums[1] + cnts2[i]
sums[2] = sums[2] + sub
for i in range(3):
tbl.cell(6, i+1).text = str(sums[i])
Table.set_content_font(tbl, size=Pt(12), name=u'標楷體')
for p in doc.paragraphs:
if 'decrease_risk_cnt' in p.text:
p.text = p.text.replace('decrease_risk_cnt', str(sums[2]))
def ExcelToWord_second(word, excel, excel2, sheet, sheet2, date, consultant):
ip = word.split('/')[-1].replace('.docx','')
print(ip)
month, day = date.split('/')
doc = Document(word)
df_excel = pandas.read_excel(excel, sheet_name=None)
data = df_excel[sheet]
mask = data['IP位址'] == ip
new_data = data[mask]
if not new_data.empty:
df_excel2 = pandas.read_excel(excel2, sheet_name=None)
data2 = df_excel2[sheet2]
li_data = data.values.tolist()
li_data.insert(0, data.columns)
li_data2 = data2.values.tolist()
li_data2.insert(0, data2.columns)
_, not_repair_va, _ = check_va(li_data, li_data2)
li_new_data2 = va_to_row(not_repair_va, li_data)
df_new_data2 = DataFrame(li_new_data2[1:], columns=li_new_data2[0])
new_data2 = df_new_data2[df_new_data2['IP位址'] == ip]
doc_risk_cnt_copare(doc, new_data, new_data2)
if new_data2.empty:
new_data2 = | DataFrame(columns=('嚴重程度', '弱點名稱', '弱點描述')) | pandas.core.frame.DataFrame |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import itertools
import numpy as np
import pandas as pd
class SubnetOversizeException(Exception):
'''An :py:exc:`Exception` to be raised when the sub-nets are too big
to be efficiently linked. If you get this then either reduce your search range
or increase :py:attr:`Linker.MAX_SUB_NET_SIZE`'''
pass
class UnknownLinkingError(Exception):
pass
def points_to_arr(level):
""" Convert a list of Points to an ndarray of coordinates """
return np.array([p.pos for p in level])
def points_from_arr(coords, frame_no, extra_data=None):
""" Convert an ndarray of coordinates to a list of PointFindLink """
if extra_data is None:
return [Point(frame_no, pos) for pos in coords]
else:
return [Point(frame_no, pos, extra_data={key: extra_data[key][i]
for key in extra_data})
for i, pos in enumerate(coords)]
def coords_from_df(df, pos_columns, t_column):
"""A generator that returns ndarrays of coords from a DataFrame. Assumes
t_column to be of integer type. Float-typed integers are also accepted.
Empty frames will be returned as empty arrays of shape (0, ndim)."""
ndim = len(pos_columns)
grouped = iter(df.groupby(t_column)) # groupby sorts by default
# get the first frame to learn first frame number
cur_frame, frame = next(grouped)
cur_frame = int(cur_frame)
yield cur_frame, frame[pos_columns].values
cur_frame += 1
for frame_no, frame in grouped:
frame_no = int(frame_no)
while cur_frame < frame_no:
yield cur_frame, np.empty((0, ndim))
cur_frame += 1
yield cur_frame, frame[pos_columns].values
cur_frame += 1
def coords_from_df_iter(df_iter, pos_columns, t_column):
"""A generator that returns ndarrays of coords from a generator of
DataFrames. Also returns the first value of the t_column."""
ndim = len(pos_columns)
for df in df_iter:
if len(df) == 0:
yield None, np.empty((0, ndim))
else:
yield df[t_column].iloc[0], df[pos_columns].values
def verify_integrity(df):
"""Verifies that particle labels are unique for each frame, and that every
particle is labeled."""
is_labeled = df['particle'] >= 0
if not np.all(is_labeled):
frames = df.loc[~is_labeled, 'frame'].unique()
raise UnknownLinkingError("Some particles were not labeled "
"in frames {}.".format(frames))
grouped = df.groupby('frame')['particle']
try:
not_equal = grouped.nunique() != grouped.count()
except AttributeError: # for older pandas versions
not_equal = grouped.apply(lambda x: len( | pd.unique(x) | pandas.unique |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
### Read data from strip theory reference dataset
### Folder must contain List*.txt and Data*.*.bin files
from array import array
import pandas as pd
import matplotlib.pyplot as mpl
import os.path as path # to check either .csv file exists or not on disk
Ncfd = 2 # No. of CFD strips; same as the number of modes = 1 or 2
plot_data = True # plot data
data = {'nc':[],'md':[],'U':[],'d':[],'m':[],'L':[],'H':[],'Nt':[],'Dt':[],'tf':[],'ymax':[],'time':[],'y/d':[]} # dict object to create dataframe
# open List*.txt file
ftxt = open("List%d.txt" % Ncfd, "r")
line = ftxt.readline()
print(line[:-1])
for n in range(1000):
line = ftxt.readline()
if len(line) == 0:
break
print(line[:-1])
tmp = line.split()
nc = int(tmp[0]) # case number in List file
md = int(tmp[1]) # mode number
U = float(tmp[2]) # wind/air velocity [m/s]
d = float(tmp[3]) # cable diameter [m]
m = float(tmp[4]) # cable mass per unit length [kg/m]
L = float(tmp[5]) # cable length [m]
H = float(tmp[6]) # cable tension [N]
Nt = int(tmp[7]) # number of timesteps
Dt = float(tmp[8]) # timestep length [s]
tf = float(tmp[9]) # total time [s]
ymax = float(tmp[10]) # max(y) value [m]
filename = tmp[11] # = "Data%d.%d.bin" % (Ncfd, nc) # data file name
# open Data*.*.bin file
fdat=open(filename,"rb")
float_array = array('d')
float_array.fromfile(fdat, Nt)
time = float_array.tolist()
Fx = [[] for ncfd in range(Ncfd)]
Fy = [[] for ncfd in range(Ncfd)]
y = [[] for ncfd in range(Ncfd)]
for ncfd in range(Ncfd):
float_array = array('d')
float_array.fromfile(fdat, Nt)
Fx[ncfd] = float_array.tolist()
float_array = array('d')
float_array.fromfile(fdat, Nt)
Fy[ncfd] = float_array.tolist()
float_array = array('d')
float_array.fromfile(fdat, Nt)
y[ncfd] = float_array.tolist()
fdat.close()
# plot data
if plot_data:
fig3, axs = mpl.subplots(1)
for ncfd in range(Ncfd):
axs.plot(time, [y[ncfd][nt] / d for nt in range(Nt)])
for i,j in zip(time, [y[ncfd][nt] / d for nt in range(Nt)]):
# appending required data into dict
data['time'].append(i)
data['y/d'].append(j)
data['nc'].append(tmp[0])
data['md'].append(tmp[1])
data['U'].append(tmp[2])
data['d'].append(tmp[3])
data['m'].append(tmp[4])
data['L'].append(tmp[5])
data['H'].append(tmp[6])
data['Nt'].append(tmp[7])
data['Dt'].append(tmp[8])
data['tf'].append(tmp[9])
data['ymax'].append(tmp[10])
# can plot Fx and Fy in the same way
axs.set_xlabel('time [s]')
axs.set_ylabel('y / d')
axs.set_title('md = %d, H = %gN, U = %gm/s' % (md, H, U))
mpl.show()
ftxt.close()
| pd.DataFrame(data) | pandas.DataFrame |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.automl.feature.time_sequence import TimeSequenceFeatureTransformer
import tensorflow as tf
import pandas as pd
from zoo.zouwu.model.forecast import LSTMForecaster
from zoo.zouwu.model.forecast import MTNetForecaster
class TestZouwuModelForecast(ZooTestCase):
def setup_method(self, method):
tf.keras.backend.clear_session()
# super(TestZouwuModelForecast, self).setup_method(method)
self.ft = TimeSequenceFeatureTransformer()
self.create_data()
def teardown_method(self, method):
pass
def create_data(self):
def gen_train_sample(data, past_seq_len, future_seq_len):
data = pd.DataFrame(data)
x, y = self.ft._roll_train(data,
past_seq_len=past_seq_len,
future_seq_len=future_seq_len
)
return x, y
def gen_test_sample(data, past_seq_len):
test_data = pd.DataFrame(data)
x = self.ft._roll_test(test_data, past_seq_len=past_seq_len)
return x
self.long_num = 6
self.time_step = 2
look_back = (self.long_num + 1) * self.time_step
look_forward = 1
self.x_train, self.y_train = gen_train_sample(data=np.random.randn(
64, 4), past_seq_len=look_back, future_seq_len=look_forward)
self.x_val, self.y_val = gen_train_sample(data=np.random.randn(16, 4),
past_seq_len=look_back,
future_seq_len=look_forward)
self.x_test = gen_test_sample(data=np.random.randn(16, 4),
past_seq_len=look_back)
def test_forecast_lstm(self):
# TODO hacking to fix a bug
model = LSTMForecaster(target_dim=1, feature_dim=self.x_train.shape[-1])
model.fit(self.x_train,
self.y_train,
validation_data=(self.x_val, self.y_val),
batch_size=8,
distributed=False)
model.evaluate(self.x_val, self.y_val)
model.predict(self.x_test)
def test_forecast_mtnet(self):
# TODO hacking to fix a bug
model = MTNetForecaster(target_dim=1,
feature_dim=self.x_train.shape[-1],
long_series_num=self.long_num,
series_length=self.time_step
)
x_train_long, x_train_short = model.preprocess_input(self.x_train)
x_val_long, x_val_short = model.preprocess_input(self.x_val)
x_test_long, x_test_short = model.preprocess_input(self.x_test)
model.fit([x_train_long, x_train_short],
self.y_train,
validation_data=([x_val_long, x_val_short], self.y_val),
batch_size=32,
distributed=False)
model.evaluate([x_val_long, x_val_short], self.y_val)
model.predict([x_test_long, x_test_short])
def test_forecast_tcmf(self):
from zoo.zouwu.model.forecast import TCMFForecaster
import tempfile
model = TCMFForecaster(max_y_iterations=1,
init_FX_epoch=1,
max_FX_epoch=1,
max_TCN_epoch=1,
alt_iters=2)
horizon = np.random.randint(1, 50)
# construct data
id = np.arange(300)
data = np.random.rand(300, 480)
input = dict({'data': data})
with self.assertRaises(Exception) as context:
model.fit(input)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
input = dict({'id': id, 'y': data})
with self.assertRaises(Exception) as context:
model.is_distributed()
self.assertTrue('You should run fit before calling is_distributed()'
in str(context.exception))
model.fit(input)
assert not model.is_distributed()
with self.assertRaises(Exception) as context:
model.fit(input)
self.assertTrue('This model has already been fully trained' in str(context.exception))
with self.assertRaises(Exception) as context:
model.fit(input, incremental=True)
self.assertTrue('NotImplementedError' in context.exception.__class__.__name__)
with tempfile.TemporaryDirectory() as tempdirname:
model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, distributed=False)
yhat = model.predict(x=None, horizon=horizon)
yhat_loaded = loaded_model.predict(x=None, horizon=horizon)
yhat_id = yhat_loaded["id"]
assert (yhat_id == id).all()
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (300, horizon)
assert (yhat == yhat_loaded).all()
target_value = np.random.rand(300, horizon)
target_value = dict({"y": target_value})
model.evaluate(x=None, target_value=target_value, metric=['mse'])
def test_forecast_tcmf_without_id(self):
from zoo.zouwu.model.forecast import TCMFForecaster
import tempfile
model = TCMFForecaster(max_y_iterations=1,
init_FX_epoch=1,
max_FX_epoch=1,
max_TCN_epoch=1,
alt_iters=2)
horizon = np.random.randint(1, 50)
# construct data
id = np.arange(200)
data = np.random.rand(300, 480)
input = dict({'y': "abc"})
with self.assertRaises(Exception) as context:
model.fit(input)
self.assertTrue("the value of y should be an ndarray" in str(context.exception))
input = dict({'id': id, 'y': data})
with self.assertRaises(Exception) as context:
model.fit(input)
self.assertTrue("the length of the id array should be equal to the number of"
in str(context.exception))
input = dict({'y': data})
model.fit(input)
assert not model.is_distributed()
with self.assertRaises(Exception) as context:
model.fit(input)
self.assertTrue('This model has already been fully trained' in str(context.exception))
with tempfile.TemporaryDirectory() as tempdirname:
model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, distributed=False)
yhat = model.predict(x=None, horizon=horizon)
yhat_loaded = loaded_model.predict(x=None, horizon=horizon)
assert "id" not in yhat_loaded
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (300, horizon)
assert (yhat == yhat_loaded).all()
target_value = np.random.rand(300, horizon)
target_value_fake = dict({"data": target_value})
with self.assertRaises(Exception) as context:
model.evaluate(x=None, target_value=target_value_fake, metric=['mse'])
self.assertTrue("key y doesn't exist in y" in str(context.exception))
target_value = dict({"y": target_value})
model.evaluate(x=None, target_value=target_value, metric=['mse'])
def test_forecast_tcmf_xshards(self):
from zoo.zouwu.model.forecast import TCMFForecaster
from zoo.orca import OrcaContext
import zoo.orca.data.pandas
import tempfile
OrcaContext.pandas_read_backend = "pandas"
def preprocessing(df, id_name, y_name):
id = df.index
data = df.to_numpy()
result = dict({id_name: id, y_name: data})
return result
def postprocessing(pred_results, output_dt_col_name):
id_arr = pred_results["id"]
pred_results = pred_results["prediction"]
pred_results = np.concatenate((np.expand_dims(id_arr, axis=1), pred_results), axis=1)
final_df = | pd.DataFrame(pred_results, columns=["id"] + output_dt_col_name) | pandas.DataFrame |
import numpy as np
import pandas as pd
from matplotlib import *
# .........................Series.......................#
x1 = np.array([1, 2, 3, 4])
s = pd.Series(x1, index=[1, 2, 3, 4])
print(s)
# .......................DataFrame......................#
x2 = np.array([1, 2, 3, 4, 5, 6])
s = pd.DataFrame(x2)
print(s)
x3 = np.array([['Alex', 10], ['Nishit', 21], ['Aman', 22]])
s = pd.DataFrame(x3, columns=['Name', 'Age'])
print(s)
data = {'Name': ['Tom', 'Jack', 'Steve', 'Ricky'], 'Age': [28, 34, 29, 42]}
df = pd.DataFrame(data, index=['rank1', 'rank2', 'rank3', 'rank4'])
print(df)
data = [{'a': 1, 'b': 2}, {'a': 3, 'b': 4, 'c': 5}]
df = pd.DataFrame(data)
print(df)
d = {'one': pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two': pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
print(df)
# ....Adding New column......#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3])}
df = pd.DataFrame(data)
print(df)
df['three'] = pd.Series([1, 2], index=[1, 2])
print(df)
# ......Deleting a column......#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3]),
'three': pd.Series([1, 1], index=[1, 2])
}
df = pd.DataFrame(data)
print(df)
del df['one']
print(df)
df.pop('two')
print(df)
# ......Selecting a particular Row............#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3]),
'three': pd.Series([1, 1], index=[1, 2])
}
df = pd.DataFrame(data)
print(df.loc[2])
print(df[1:4])
# .........Addition of Row.................#
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['a', 'b'])
df = df.append(df2)
print(df.head())
# ........Deleting a Row..................#
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['a', 'b'])
df = df.append(df2)
# Drop rows with label 0
df = df.drop(0)
print(df)
# ..........................Functions.....................................#
d = {'Name': pd.Series(['Tom', 'James', 'Ricky', 'Vin', 'Steve', 'Smith', 'Jack']),
'Age': pd.Series([25, 26, 25, 23, 30, 29, 23]),
'Rating': pd.Series([4.23, 3.24, 3.98, 2.56, 3.20, 4.6, 3.8])}
df = pd.DataFrame(d)
print("The transpose of the data series is:")
print(df.T)
print(df.shape)
print(df.size)
print(df.values)
# .........................Statistics.......................................#
d = {'Name': pd.Series(['Tom', 'James', 'Ricky', 'Vin', 'Steve', 'Smith', 'Jack',
'Lee', 'David', 'Gasper', 'Betina', 'Andres']),
'Age': pd.Series([25, 26, 25, 23, 30, 29, 23, 34, 40, 30, 51, 46]),
'Rating': pd.Series([4.23, 3.24, 3.98, 2.56, 3.20, 4.6, 3.8, 3.78, 2.98, 4.80, 4.10, 3.65])
}
df = | pd.DataFrame(d) | pandas.DataFrame |
import pandas as pd
import warnings
import numpy as np
from matplotlib import pyplot as plt
warnings.simplefilter("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
online = False # if True: download xml files from github URL
# be careful: online version will not work if requirements from requirements.txt are not satisfied!
if online:
url_link_302_19 = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/302-19.xlsx?raw=true'
url_link_01_13_F_Debt_sme_subj = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/01_13_F_Debt_sme_subj.xlsx?raw=true'
url_link_Interpolationexp2 = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/Interpolationexp2.xlsx?raw=true'
def extract_data_before_2019y():
"""
Extracts data from the 302-19.xlsx file
:return: pandas dataframe with columns 'Дата', 'Задолженность', 'Просроченная задолженность'
"""
if online:
return pd.read_excel(url_link_302_19, usecols=[0, 5, 11], skiprows=list(range(7)),
names=['Дата', 'Задолженность', 'Просроченная задолженность'])
return pd.read_excel('data_folder/302-19.xlsx', usecols=[0, 5, 11], skiprows=list(range(7)),
names=['Дата', 'Задолженность', 'Просроченная задолженность'])
def extract_data_after_2018():
"""
Extracts data from the 01_13_F_Debt_sme_subj.xlsx file
:return: pandas dataframe with columns 'Дата', 'Задолженность', 'Просроченная задолженность'
"""
# read Задолженность from the page МСП Итого
# .T to make rows for entities and columns for properties
if online:
after_19y_debt = pd.read_excel(url_link_01_13_F_Debt_sme_subj, skiprows=1, nrows=1,
sheet_name='МСП Итого ').T
else:
after_19y_debt = pd.read_excel('data_folder/01_13_F_Debt_sme_subj.xlsx',
skiprows=1, nrows=1, sheet_name='МСП Итого ').T
after_19y_debt.reset_index(inplace=True)
# remove an odd row after transpose
after_19y_debt.drop(labels=0, axis=0, inplace=True)
after_19y_debt.columns = before_19y.columns[:2]
# change types of the columns for convenience
after_19y_debt[after_19y_debt.columns[0]] = pd.to_datetime(after_19y_debt[after_19y_debt.columns[0]])
after_19y_debt = after_19y_debt.astype({after_19y_debt.columns[1]: 'int32'}, copy=False)
# read Просроченная задолженность from the page МСП в т.ч. просроч.
if online:
after_19y_prosro4eno = pd.read_excel(url_link_01_13_F_Debt_sme_subj, skiprows=2, nrows=0,
sheet_name='МСП в т.ч. просроч.').T
else:
after_19y_prosro4eno = pd.read_excel('data_folder/01_13_F_Debt_sme_subj.xlsx', skiprows=2, nrows=0,
sheet_name='МСП в т.ч. просроч.').T
after_19y_prosro4eno.reset_index(inplace=True)
# remove an odd row after the transpose
after_19y_prosro4eno.drop(labels=0, axis=0, inplace=True)
# name the column
after_19y_prosro4eno.columns = ['Просроченная задолженность']
# concatenate Задолженность and Просроченная задолженность in one table and return it
return pd.concat([after_19y_debt, after_19y_prosro4eno], axis=1)
def extract_macro_parameters():
if online:
return pd.read_excel(url_link_Interpolationexp2, index_col=0, parse_dates=True)
return pd.read_excel('data_folder/Interpolationexp2.xlsx', index_col=0, parse_dates=True)
def transform_to_quarters_format(custom_table, date_column_name='Дата',
already_3month_correct_step=False):
"""
Transforms table from month format to quarters taking the last month element for each quarter
:param custom_table: Pandas dataframe
:param date_column_name: name of a column with dates
:param already_3month_correct_step: if the time step between custom_table rows is a 3 month instead of month
and correspond to 3, 6, 9, 12 months
:return: table in correct quarter format with averaged values in columns
"""
if not already_3month_correct_step:
# quarter of the first month in the data
first_quarter = (custom_table[date_column_name].dt.month[0] - 1) // 3 + 1
# creates array [1, 1, 1, 2, 2, 2, 3, 3, 3, ...], so i-th month will be from corresponding quarter
# in case when each row corresponds to a month
correct_quarters = np.ones((custom_table.shape[0] // 3 + 3, 3), dtype=int).cumsum(axis=0).flatten()
# assumption: the data is not missing a single month
# then quarters are from correct_quarters continuous part
custom_table['Квартал'] = correct_quarters[3*(first_quarter-1): custom_table.shape[0] + 3*(first_quarter-1)]
else:
# in case when each row corresponds to either 3, 6, 9 or 12 month (file with macro data)
debt_table_quarters = custom_table.copy()
debt_table_quarters.reset_index(inplace=True)
debt_table_quarters['Квартал'] = custom_table.index.month // 3
return debt_table_quarters
# take the last value (last month value) inside each quarter and assign those values to the resulting table
group = custom_table.groupby('Квартал')
debt_table_quaters_features = dict()
for feature in custom_table.columns:
if feature != date_column_name and feature != 'Квартал':
debt_table_quaters_features[feature] = group[feature].nth(2)
debt_table_quarters = pd.concat(debt_table_quaters_features, axis=1)
debt_table_quarters.reset_index(inplace=True)
return debt_table_quarters
if __name__ == '__main__':
# read the files
before_19y = extract_data_before_2019y()
after_19y = extract_data_after_2018()
new_features = extract_macro_parameters()
# concatenates old and new data
debt_table_total = pd.concat([before_19y, after_19y])
debt_table_total.reset_index(inplace=True)
debt_table_total.drop('index', 1, inplace=True)
debt_table_quarters_format = transform_to_quarters_format(debt_table_total, date_column_name='Дата')
debt_table_quarters_format['Уровень просроченной задолженности'] = \
debt_table_quarters_format['Просроченная задолженность'] / debt_table_quarters_format['Задолженность']
# plot data before quarters averaging
debt_table_total.plot(x='Дата', y=['Задолженность', 'Просроченная задолженность'])
plt.show()
# ... and after
debt_table_quarters_format.plot(x=['Квартал', 'Квартал'], y=['Задолженность', 'Просроченная задолженность'],
kind='scatter')
plt.show()
# add macro features:
interpolated_new_features = new_features.interpolate(method='time', limit_direction='both', downcast='infer')
interpolated_new_features_quarter_format = \
transform_to_quarters_format(interpolated_new_features, date_column_name='Отчетная дата (по кварталам)',
already_3month_correct_step=True)
all_features = | pd.concat([debt_table_quarters_format, interpolated_new_features_quarter_format], axis=1) | pandas.concat |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/01_data_provider.ipynb (unless otherwise specified).
__all__ = ['DataProvider', 'get_efficiently']
# Cell
from bs4 import BeautifulSoup as bs
import numpy as np
import os
import pandas as pd
from fastcore.foundation import patch
# Cell
class DataProvider():
def __init__(self, data_folder_path):
self.data_folder_path = data_folder_path
self.raw = os.path.join(data_folder_path, 'raw')
self.external = os.path.join(data_folder_path, 'external')
self.interim = os.path.join(data_folder_path, 'interim')
self.processed = os.path.join(data_folder_path, 'processed')
# Checking if folder paths exist
assert os.path.isdir(self.external), "External data folder not found."
assert os.path.isdir(self.raw), "Raw data folder not found."
assert os.path.isdir(self.interim), "Interim data folder not found."
assert os.path.isdir(self.processed), "Processed data folder not found."
# Phone screening files
self.phonescreening_data_path = os.path.join(self.raw, "phonescreening.csv")
self.phone_codebook_path = os.path.join(self.external, "phone_codebook.html")
# Basic assessment files
self.ba_codebook_path = os.path.join(self.external, "ba_codebook.html")
self.ba_data_path = os.path.join(self.raw, "ba.csv")
self.b07_participants_path = os.path.join(self.external, "b7_participants.xlsx")
# Movisense data
self.mov_berlin_path = os.path.join(self.raw, "mov_data_b.csv")
self.mov_dresden_path = os.path.join(self.raw, "mov_data_d.csv")
self.mov_mannheim_path = os.path.join(self.raw, "mov_data_m.csv")
self.mov_berlin_starting_dates_path = os.path.join(self.raw, "starting_dates_b.html")
self.mov_dresden_starting_dates_path = os.path.join(self.raw, "starting_dates_d.html")
self.mov_mannheim_starting_dates_path = os.path.join(self.raw, "starting_dates_m.html")
self.alcohol_per_drink_path = os.path.join(self.external,'alcohol_per_drink.csv')
#export
def get_efficiently(func):
"""
This decorator wraps around functions that get data and handles data storage.
If the output from the function hasn't been stored yet, it stores it in "[path_to_interim]/[function_name_without_get].parquet"
If the output from the function has been stored already, it loads the stored file instead of running the function (unless update is specified as True)
"""
def w(*args, update = False, columns = None, path = None, **kw):
_self = args[0] # Getting self to grab interim path from DataProvider
var_name = func.__name__.replace('__get_','').replace('get_','')
file_path = os.path.join(_self.interim, "%s.parquet"%var_name)
if os.path.exists(file_path) and (update == False):
result = pd.read_parquet(file_path, columns = columns)
else:
print("Preparing %s"%var_name)
result = func(_self)
result.to_parquet(file_path)
return result
w.__wrapped__ = func # Specifying the wrapped function for inspection
w.__doc__ = func.__doc__
w.__name__ = func.__name__
w.__annotations__ = {'cls':DataProvider, 'as_prop':False} # Adding parameters to make this work with @patch
return w
# Cell
@patch
def store_interim(self:DataProvider, df, filename):
path = os.path.join(self.interim,"%s.parquet"%filename)
df.to_parquet(path)
# Cell
@patch
def load_interim(self:DataProvider, filename):
return pd.read_parquet(os.path.join(self.interim,"%s.parquet"%filename))
# Cell
@patch
@get_efficiently
def get_phone_codebook(self:DataProvider):
tables = pd.read_html(open(self.phone_codebook_path,'r').read())
df = tables[1]
# Note that str.contains fills NaN values with nan, which can lead to strange results during filtering
df = df[df.LabelHinweistext.str.contains('Fragebogen:',na=False)==False]
df = df.set_index('#')
# Parsing variable name
df['variable'] = df["Variable / Feldname"].apply(lambda x: x.split(' ')[0])
# Parsing condition under which variable is displayed
df['condition'] = df["Variable / Feldname"].apply(lambda x: ' '.join(x.split(' ')[1:]).strip() if len(x.split(' '))>1 else '')
df['condition'] = df.condition.apply(lambda x: x.replace('Zeige das Feld nur wenn: ',''))
# Parsing labels for numerical data
df['labels'] = np.nan
labels = tables[2:-1]
try:
labels = [dict(zip(l[0],l[1])) for l in labels]
except:
display(table)
searchfor = ["radio","dropdown","yesno","checkbox"]
with_table = df['Feld Attribute (Feld-Typ, Prüfung, Auswahlen, Verzweigungslogik, Berechnungen, usw.)'].str.contains('|'.join(searchfor))
df.loc[with_table,'labels'] = labels
df = df.astype(str)
return df
# Cell
@patch
def determine_phone_b07(self:DataProvider, df):
# Some initial fixes
df.loc[df.center=='d','screen_caller'] = df.loc[df.center=='d','screen_caller'].str.lower().str.strip().replace('leo','<NAME>').replace('<NAME>','<NAME>').replace('<NAME>','<NAME>').replace('<NAME>','<NAME>').replace('dorothee','<NAME>')
# Cleaning screener list
dd_screeners = df[(df.center=='d')&(df.screen_caller.isna()==False)].screen_caller.unique()
def clean_screeners(dd_screeners):
dd_screeners = [y for x in dd_screeners for y in x.split('+')]
dd_screeners = [y for x in dd_screeners for y in x.split(',')]
dd_screeners = [y for x in dd_screeners for y in x.split('und')]
dd_screeners = [y.replace('(15.02.21)','') for x in dd_screeners for y in x.split('/')]
dd_screeners = [y.replace(')','').strip().lower() for x in dd_screeners for y in x.split('(')]
dd_screeners = sorted(list(set(dd_screeners)))
return dd_screeners
dd_screeners = clean_screeners(dd_screeners)
b07_screeners = ['<NAME>','<NAME>','<NAME>','<NAME>','borchardt','<NAME>','<NAME>','<NAME>','<NAME>']
s01_screeners = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'alice','<NAME> <NAME>', '<NAME>', '22.10.2021', 'sascha', '03.08.2021', '<NAME>', '<NAME>', '04.08.2021', '<NAME>', 'sacsha', '09.08.2021', 'ml', 'charlotte', '<NAME>', 'shereen', 'test', "<NAME>", 'benedikt']
known_dd_screeners = list(b07_screeners+s01_screeners)
dd_screeners = df[(df.center=='d')&(df.screen_caller.isna()==False)].screen_caller.unique()
# Checking if all Dresden phone screeners are accounted for
assert df[(df.center=='d')&(df.screen_caller)].screen_caller.str.contains('|'.join(known_dd_screeners)).mean()==1, "Unknown Dresden phone screener: %s"%', '.join(set(clean_screeners(dd_screeners))-set(known_dd_screeners))
# In general, if a screener from a project was involved, it was screened for that project
df['screened_for_b07'] = (df.center=='d') & (df.screen_caller.str.contains('|'.join(b07_screeners)))
df['screened_for_s01'] = (df.center!='d') | (df.screen_caller.str.contains('|'.join(s01_screeners)))
# We also exclude participants screened for C02 in Berlin
df.loc[(df.screen_purpose == 4) & (df.center=='b'), 'screened_for_s01'] = False
# Additionally, we also set it to true if it was specifically set
df.loc[df.screen_site_dd == 1, 'screened_for_s01'] = True
df.loc[df.screen_site_dd == 3, 'screened_for_s01'] = True
df.loc[df.screen_site_dd == 2, 'screened_for_b07'] = True
df.loc[df.screen_site_dd == 3, 'screened_for_b07'] = True
return df
# Cell
@patch
def check_participant_id(self:DataProvider,x):
'''This function checks whether a participant ID is numerical and lower than 20000.'''
if str(x) == x:
if x.isnumeric():
x = float(x)
else:
return False
if x > 20000:
return False
return True
# Cell
@patch
def test_check_participant_id(self:DataProvider):
failed = dp.check_participant_id('test10') == False # Example of a bad participant ID
passed = dp.check_participant_id('100') == True # Example of a good participant ID
return failed and passed
# Cell
@patch
def set_dtypes(self:DataProvider, data, codebook):
def number_or_nan(x):
try:
float(x)
return x
except:
return np.nan
'''This function automatically adjust data types of redcap data based on the redcap codebooks'''
# Parsing type
codebook['type'] = codebook["Feld Attribute (Feld-Typ, Prüfung, Auswahlen, Verzweigungslogik, Berechnungen, usw.)"].apply(lambda x: x.split(',')[0])
# Descriptives (not in data)
desc_columns = list(codebook[codebook.type.str.contains('descriptive')].variable)
# Datetime
dt_columns = codebook[(codebook.type.isin(['text (datetime_dmy)','text (date_dmy)']))].variable
dt_columns = list(set(data.columns).intersection(dt_columns))
# Numerical
num_columns = []
num_columns += list(codebook[codebook.type.str.contains('calc')].variable)
num_columns += list(codebook[codebook.type.str.contains('checkbox')].variable)
num_columns += list(codebook[codebook.type.str.contains('radio')].variable)
num_columns += list(codebook[codebook.type.str.contains('text \(number')].variable)
num_columns += list(codebook[codebook.type.str.contains('yesno')].variable)
num_columns += list(codebook[codebook.type.str.contains('dropdown')].variable)
num_columns += list(codebook[codebook.type.str.contains('slider')].variable)
num_columns = list(set(data.columns).intersection(num_columns))
# Text
text_columns = []
text_columns += list(codebook[(codebook.type.str.contains('text')) & (~codebook.type.str.contains('date_dmy|datetime_dmy'))].variable)
text_columns += list(codebook[(codebook.type.str.contains('notes'))].variable)
text_columns += list(codebook[(codebook.type.str.contains('file'))].variable)
text_columns = list(set(data.columns).intersection(text_columns))
assert len(set(num_columns).intersection(set(dt_columns)))==0, set(num_columns).intersection(set(dt_columns))
assert len(set(text_columns).intersection(set(dt_columns)))==0, set(text_columns).intersection(set(dt_columns))
for c in num_columns:
data[c].replace("A 'MySQL server has gone away' error was detected. It is possible that there was an actual database issue, but it is more likely that REDCap detected this request as a duplicate and killed it.", np.nan, inplace = True)
try:
data[c] = data[c].astype(float)
except:
data[c] = data[c].apply(number_or_nan).astype(float)
print("Values with wrong dtype in %s"%c)
data[text_columns] = data[text_columns].astype(str).replace('nan',np.nan)
for c in dt_columns:
data[c] = pd.to_datetime(data[c])
return data
# Cell
@patch
@get_efficiently
def get_phone_data(self:DataProvider):
df = pd.read_csv(self.phonescreening_data_path,
na_values = ["A 'MySQL server has gone away' error was detected. It is possible that there was an actual database issue, but it is more likely that REDCap detected this request as a duplicate and killed it."]
)
remove = ['050571', '307493', '345678', '715736', 'Ihloff', 'test',
'test002', 'test003', 'test004', 'test005', 'test01', 'test02',
'test03', 'test0722', 'test1', 'test34', 'test999', 'test2020',
'test20201', 'test345345', 'testt', 'test_10', 'test_11_26',
'test_neu', 'xx956','050262', '050335', '050402', '050416', '051005', '294932', '891752080', '898922719', '898922899', '917702419', '01627712983', 'meow', 'test0022', 'test246', 'test5647', 'test22222', 'test41514', 'testtt', 'test_057', 'tets','898923271', 'test001', 'test006', 'test007', 'test008', 'test11', 'test_23_12', 'test_n','50744', 'test0001a', 'test004', 'test03', 'tets']
df = df[~df.participant_id.astype(str).isin(remove)]
bad_ids = df[~df.participant_id.apply(self.check_participant_id)].participant_id.unique()
assert len(bad_ids)==0, "Bad participant IDs (should be added to remove): %s"%', '.join(["'%s'"%b for b in bad_ids])
self.get_phone_codebook()
df = self.set_dtypes(df, self.get_phone_codebook())
df['participant_id'] = df.participant_id.astype(int)
df['center'] = df.screen_site.replace({1:'b',2:'d',3:'m'})
df['screen_date'] = pd.to_datetime(df['screen_date'], errors = 'coerce')
#display(df[df.screen_caller.isna()])
df = self.determine_phone_b07(df)
return df
# Cell
@patch
@get_efficiently
def get_ba_codebook(self:DataProvider):
tables = pd.read_html(open(self.ba_codebook_path,"r").read())
df = tables[1]
# Note that str.contains fills NaN values with nan, which can lead to strange results during filtering
df = df[df.LabelHinweistext.str.contains('Fragebogen:',na=False)==False]
df = df.set_index('#')
# Parsing variable name
df['variable'] = df["Variable / Feldname"].apply(lambda x: x.split(' ')[0])
# Parsing condition under which variable is displayed
df['condition'] = df["Variable / Feldname"].apply(lambda x: ' '.join(x.split(' ')[1:]).strip() if len(x.split(' '))>1 else '')
df['condition'] = df.condition.apply(lambda x: x.replace('Zeige das Feld nur wenn: ',''))
# Parsing labels for numerical data
df['labels'] = np.nan
labels = tables[2:-1]
try:
labels = [dict(zip(l[0],l[1])) for l in labels]
except:
display(table)
searchfor = ["radio","dropdown","yesno","checkbox"]
with_table = df['Feld Attribute (Feld-Typ, Prüfung, Auswahlen, Verzweigungslogik, Berechnungen, usw.)'].str.contains('|'.join(searchfor))
df.loc[with_table,'labels'] = labels
df = df.astype(str)
return df
# Cell
@patch
@get_efficiently
def get_ba_data(self:DataProvider):
'''This function reads in baseline data from redcap, filters out pilot data, and creates movisens IDs.'''
df = pd.read_csv(self.ba_data_path)
df['center'] = df.groupby('participant_id').bx_center.transform(lambda x: x.ffill().bfill())
df['center'] = df.center.replace({1:'b',2:'d',3:'m'})
# Creating new movisense IDs (adding center prefix to movisense IDs)
for old_id in ['bx_movisens','bx_movisens_old','bx_movisens_old_2']:
new_id = old_id.replace('bx_','').replace('movisens','mov_id')
df[new_id] = df.groupby('participant_id')[old_id].transform(lambda x: x.ffill().bfill())
df[new_id] = df.center + df[new_id].astype('str').str.strip('0').str.strip('.').apply(lambda x: x.zfill(3))
df[new_id].fillna('nan',inplace = True)
df.loc[df[new_id].str.contains('nan'),new_id] = np.nan
# Removing test participants
remove = ['050744', 'hdfghadgfh', 'LindaEngel', 'test', 'Test001', 'Test001a', 'test0011', 'test0012', 'test0013', 'test0014', 'test0015', 'test002', 'test00229', 'test007', 'test01', 'test012', 'test013', 'test1', 'test2', 'test4', 'test12', 'test999', 'test2021', 'test345345', 'testneu', 'testtest', 'test_0720', 'test_10', 'test_GA', 'Test_JH','test0016','891752080', 'pipingTest', 'test0001', 'test00012', 'test0012a', 'test0015a', 'test0017', 'test10', 'test20212', 'testJohn01', 'test_00213', 'test_00233', 'test_00271', 'test_003', 'test_004', 'test_11_26', 'Test_MS','898922899', 'tesst', 'test0002', 'test0908', 'test092384750398475', 'test43', 'test123', 'test1233', 'test3425', 'test123456', 'test1234567', 'testfu3', 'test_888', 'test_999', 'test_98375983745', 'Test_Übung','050335', 'test003', 'test02', 'test111', 'test1111', 'test1234','test0000', 'test_CH','50744', 'test0001a', 'test004', 'test03', 'tets']
df = df[~df.participant_id.astype(str).isin(remove)]
# Checking participant ids (to find new test participants)
bad_ids = df[~df.participant_id.apply(self.check_participant_id)].participant_id.unique()
assert len(bad_ids)==0, "Bad participant IDs (should be added to remove): %s"%', '.join(["'%s'"%b for b in bad_ids])
# labeling B07 participant
b07_pps = pd.read_excel(self.b07_participants_path)['Participant ID'].astype(str)
df['is_b07'] = False
df.loc[df.participant_id.isin(b07_pps),'is_b07'] = True
# Setting dtypes based on codebook
df = self.set_dtypes(df, self.get_ba_codebook())
# Creating convenience variables
df['is_female'] = df.screen_gender.replace({1:0,2:1,3:np.nan})
# Filling in missings from baseline
df['is_female'].fillna(df.bx_sozio_gender.replace({1:0,2:1,3:np.nan}), inplace = True)
df['is_female'] = df.groupby('participant_id')['is_female'].transform(lambda x: x.ffill().bfill())
df['is_female'] = df['is_female'].astype(float)
return df
# Cell
@patch
def get_baseline_drinking_data(self:DataProvider):
# Getting relevant data
ba = self.get_ba_data(columns = ['participant_id','redcap_event_name','mov_id','bx_qf_alc_01','bx_qf_alc_02','bx_qf1_sum']).query("redcap_event_name=='erhebungszeitpunkt_arm_1'")
# Correct one variable for one participant. This participant reported drinking per three months but the data as logged as drinking per week
ba.loc[(ba.participant_id=='11303') & (ba.bx_qf_alc_02==2),'bx_qf_alc_02'] = 1
ba['drinking_days_last_three_month'] = ba['bx_qf_alc_01'].astype(float) * ba['bx_qf_alc_02'].replace({2:12})
ba['drinks_per_drinking_day_last_three_month'] = ba['bx_qf1_sum']
ba['drinks_per_day_last_three_month'] = (ba['drinking_days_last_three_month'] * ba['bx_qf1_sum'])/90
standard_last_three = ba[~ba.drinks_per_day_last_three_month.isnull()][['mov_id','drinks_per_day_last_three_month','drinks_per_drinking_day_last_three_month','drinking_days_last_three_month']]
standard_last_three.columns = ['participant','last_three_month','drinks_per_drinking_day_last_three_month','drinking_days_last_three_month']
standard_last_three = standard_last_three.groupby('participant').first()
return standard_last_three
# Cell
@patch
def get_duplicate_mov_ids(self:DataProvider):
'''This function creates a dictionary mapping old to new movisens IDs.'''
df = self.get_ba_data()
replace_dict_1 = dict(zip(df.mov_id_old, df.mov_id))
replace_dict_2 = dict(zip(df.mov_id_old_2, df.mov_id))
replace_dict = {**replace_dict_1, **replace_dict_2}
try:
del replace_dict[np.nan]
except:
pass
del replace_dict[None]
replace_dict['d033'] = 'd092' # This participant's data is currently missing in redcap, but they did change ID from 33 to 92
return replace_dict
# Cell
@patch
@get_efficiently
def get_mov_data(self:DataProvider):
"""
This function gets Movisense data
1) We create unique participnat IDs (e.g. "b001"; this is necessary as sites use overapping IDs)
2) We merge double IDs, so participants with two IDs only have one (for this duplicate_ids.csv has to be updated)
3) We remove pilot participants
4) We get starting dates (from the participant overviews in movisense; downloaded as html)
5) We calculate sampling days and end dates based on the starting dates
"""
# Loading raw data
mov_berlin = pd.read_csv(self.mov_berlin_path, sep = ';')
mov_dresden = pd.read_csv(self.mov_dresden_path, sep = ';')
mov_mannheim = pd.read_csv(self.mov_mannheim_path, sep = ';')
# Merging (participant numbers repeat so we add the first letter of location)
mov_berlin['location'] = 'berlin'
mov_dresden['location'] = 'dresden'
mov_mannheim['location'] = 'mannheim'
df = pd.concat([mov_berlin,mov_dresden,mov_mannheim])
df['participant'] = df['location'].str[0] + df.Participant.apply(lambda x: '%03d'%int(x))
df.drop(columns = 'Participant', inplace = True) # Dropping old participant column to avoid mistakes
df['trigger_date'] = pd.to_datetime(df.Trigger_date + ' ' + df.Trigger_time)
# Merging double IDs (for participants with several movisense IDs)
df['participant'] = df.participant.replace(self.get_duplicate_mov_ids())
# Removing pilot participants
df = df[~df.participant.astype(str).str.contains('test')]
df = df[~df.participant.isin(['m157', 'b010', 'b006', 'd001', 'd002', 'd042', 'm024', 'm028', 'm071', 'm079', 'm107'])]
# Adding starting dates to get sampling days
def get_starting_dates(path, pp_prefix = ''):
soup = bs(open(path).read())
ids = [int(x.text) for x in soup.find_all("td", class_ = 'simpleId')]
c_dates = [x.find_all("span")[0]['title'] for x in soup.find_all("td", class_ = 'coupleDate')]
s_dates = [x['value'] for x in soup.find_all("input", class_ = 'dp startDate')]
df = pd.DataFrame({'participant':ids,'coupling_date':c_dates,'starting_date':s_dates})
df['coupling_date'] = | pd.to_datetime(df.coupling_date) | pandas.to_datetime |
import itertools
from sklearn.model_selection import train_test_split
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
import pandas as pd
import re
PATTERN = re.compile(r"((?P<days1>[1-9]\d*)D(?P<amount1>[1-9]\d*[NP])_)?((?P<days2>[1-9]\d*)D(?P<amount2>[1-9]\d*[NP])_)?(?P<noshow>[1-9]\d*[NP])?")
def cancel_parser(policy: str, nights_num):
if nights_num <= 0:
nights_num = 1
match = PATTERN.match(policy)
if match is None:
return policy
else:
noshow = match.group("noshow")
noshow = 1 if noshow is None else int(noshow[:-1])/100 if noshow[-1] == 'P' else int(noshow[:-1]) / nights_num
days1 = match.group("days1")
if days1 is None:
days1 = 0
amount1 = noshow
else:
days1 = int(days1)
amount1 = match.group("amount1")
amount1 = int(amount1[:-1])/100 if amount1[-1] == 'P' else int(amount1[:-1])/nights_num
days2 = match.group("days2")
if days2 is None:
days2 = 0
amount2 = amount1
else:
days2 = int(days2)
amount2 = match.group("amount2")
amount2 = int(amount2[:-1])/100 if amount2[-1] == 'P' else int(amount2[:-1])/nights_num
return days1, amount1, days2, amount2, noshow
def agoda_preprocessor(full_data: np.ndarray):
# fill cancellation datetime which doesn't exist as 0
full_data.loc[full_data["cancellation_datetime"].isnull(), "cancellation_datetime"] = full_data["checkin_date"]
full_data['cancellation_datetime'] = pd.to_datetime(full_data["cancellation_datetime"])
features = data_preprocessor(full_data)
full_data["cancel_warning_days"] = (full_data['checkin_date'] - full_data['cancellation_datetime']).dt.days
full_data["days_cancelled_after_booking"] = (full_data["cancellation_datetime"] - full_data["booking_datetime"]).dt.days
labels = (7 <= full_data["days_cancelled_after_booking"]) & (full_data["days_cancelled_after_booking"] <= 43)
return features, np.asarray(labels).astype(int)
def load_agoda_dataset():
"""
Load Agoda booking cancellation dataset
Returns
-------
Design matrix and response vector in the following format:
- Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# clean data for unrealistic shit
full_data = pd.read_csv("../datasets/agoda_cancellation_train.csv").drop_duplicates()
features, labels = agoda_preprocessor(full_data)
return features, labels
def data_preprocessor(full_data):
# starting with the numerical and boolean columns
features = full_data[["hotel_star_rating",
"guest_is_not_the_customer",
"original_selling_amount",
"is_user_logged_in",
"is_first_booking",
"cancellation_policy_code",
]].fillna(0)
# how much the customer cares about his order, sums all it's requests
features["num_requests"] = (full_data["request_nonesmoke"].fillna(0) +
full_data["request_latecheckin"].fillna(0) +
full_data["request_highfloor"].fillna(0) +
full_data["request_largebed"].fillna(0) +
full_data["request_twinbeds"].fillna(0) +
full_data["request_airport"].fillna(0) +
full_data["request_earlycheckin"].fillna(0))
features["charge_option"] = full_data["charge_option"].apply(lambda x: 1 if x == "Pay Later" else 0)
# accom = {"":}
# features["accommadation_type_name"] = full_data["accommadation_type_name"].apply(lambda x: accom[x])
full_data['booking_datetime'] = pd.to_datetime(full_data['booking_datetime'])
full_data['checkin_date'] = pd.to_datetime(full_data['checkin_date'])
full_data['checkout_date'] = pd.to_datetime(full_data['checkout_date'])
# add date connected numerical columns
features["days_to_checkin"] = (full_data["checkin_date"] - full_data["booking_datetime"]).dt.days
features["num_nights"] = (full_data['checkout_date'] - full_data['checkin_date']).dt.days - 1
# deal with cancellation policy code
features['parsed_cancellation'] = features.apply(lambda x: cancel_parser(x['cancellation_policy_code'], x['num_nights']), axis=1)
features[['cd1', 'cp1', 'cd2', 'cp2', 'ns']] = pd.DataFrame(features['parsed_cancellation'].tolist(), index=features.index)
del features["cancellation_policy_code"]
del features['parsed_cancellation']
return features
def cross_validate(estimator, X: np.ndarray, y: np.ndarray, cv):
"""
Evaluate metric by cross-validation for given estimator
Parameters
----------
estimator: BaseEstimator
Initialized estimator to use for fitting the data
X: ndarray of shape (n_samples, n_features)
Input data to fit
y: ndarray of shape (n_samples, )
Responses of input data to fit to
cv: int
Specify the number of folds.
Returns
-------
validation_score: float
Average validation score over folds
"""
validation_scores = []
split_X, split_y = np.array_split(X, cv), np.array_split(y, cv)
for i in range(cv):
# create S\Si & Si
train_x, train_y = np.concatenate(np.delete(split_X, i, axis=0)), np.concatenate(np.delete(split_y, i, axis=0))
test_x, test_y = split_X[i], split_y[i]
# fit the estimator to the current folds
A = estimator.fit(train_x, train_y)
# predict over the validation fold and over the hole train set
validation_scores.append(metrics.f1_score(A.predict(test_x), test_y, average='macro'))
return np.array(validation_scores).mean()
def training_playground(X, y):
"""
Evaluate current model performances over previous weeks datasets.
Parameters
----------
X: the previous weeks unite dataset
y: the previous weeks unite labels
"""
# f1_scores = []
# for true, false in itertools.product(list(np.arange(0.6, 1, 0.05)), list(np.arange(0.03, 0.1, 0.01))):
# print(true, false)
# estimator = AgodaCancellationEstimator(true, false)
# f1_scores.append(cross_validate(estimator, X, y, cv=6))
#
# print(f1_scores)
# define train & test sets.
train_X, test_X, train_y, test_y = train_test_split(X.to_numpy(), y.to_numpy(), test_size=1/6)
# Fit model over data
prev_estimator = AgodaCancellationEstimator(0.6, 0.07).fit(train_X, train_y)
# Predict for test_X
y_pred = pd.DataFrame(prev_estimator.predict(test_X), columns=["predicted_values"])
# confusion matrix
cm = metrics.ConfusionMatrixDisplay(metrics.confusion_matrix(test_y, y_pred))
cm.plot()
plt.show()
# Performances:
print("Area Under Curve: ", metrics.roc_auc_score(test_y, y_pred))
print("Accuracy: ", metrics.accuracy_score(test_y, y_pred))
print("Recall: ", metrics.recall_score(test_y, y_pred))
print("Precision: ", metrics.precision_score(test_y, y_pred))
print("F1 Macro Score: ", metrics.f1_score(test_y, y_pred, average='macro'))
def evaluate_and_export(X, y, test_csv_filename):
"""
Export to specified file the prediction results of given estimator on given testset.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
X: the previous weeks unite dataset
y: the previous weeks unite labels
test_csv_filename: path to the current week test-set csv file
"""
f1_scores = []
range_of_weights = list(itertools.product(list(np.arange(0.6, 1, 0.05)), list(np.arange(0.03, 0.1, 0.01))))
for true, false in range_of_weights:
estimator = AgodaCancellationEstimator(true, false)
f1_scores.append(cross_validate(estimator, X, y, cv=6))
print(np.max(f1_scores))
true_weight, false_weight = range_of_weights[np.argmax(f1_scores)]
# Fit model over data
prev_estimator = AgodaCancellationEstimator(true_weight, false_weight).fit(X, y)
# Store model predictions over test set
test_set = pd.read_csv(test_csv_filename).drop_duplicates()
# predict over current-week test-set
X = data_preprocessor(test_set)
y_pred = pd.DataFrame(prev_estimator.predict(X), columns=["predicted_values"])
# export the current-week predicted labels
pd.DataFrame(y_pred, columns=["predicted_values"]).to_csv("342473642_206200552_316457340.csv", index=False)
def load_previous():
"""
Load Previous-weeks test-sets and labels
Returns
-------
Design matrix and response vector in the following format:
- Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
data_set = | pd.read_csv(f'testsets//t1.csv') | pandas.read_csv |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
def main():
URL = "https://en.wikipedia.org/wiki/{}_Copa_Am%C3%A9rica"
# Format [year, number of teams, normal page]
years = [[1993, 12, True], [1995, 12, True], [1997, 12, True], [1999, 12, True], [2001, 12, True], [2004, 12, True], [2007, 12, True], [2011, 12, False], [2015, 12, False], [2016, 16, True]]
for year in years:
print("Parsing year: {}".format(year[0]))
r = requests.get(URL.format(year[0]))
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, "html.parser")
# Get group data
games = soup.find_all("div", {"class": "footballbox"})
if year[2]: # Some pages differ from others and are harder to parse
num_of_group_games = year[1] * 6 / 4
else:
num_of_group_games = 0
data = []
for i, game in enumerate(games):
if i < num_of_group_games:
stage = "Group"
elif i < num_of_group_games + 4:
stage = "Quarter-finals"
elif i < num_of_group_games + 4 + 2:
stage = "Semi-finals"
elif i < num_of_group_games + 4 + 2 + 1:
stage = "Match for third place"
else:
stage = "Final"
home_team = game.find('th', {'class': 'fhome'}).findNext('a').get_text()
away_team = game.find('th', {'class': 'faway'}).findNext('a').get_text()
goals = game.find('th', {'class': 'fscore'}).get_text().split('–')
home_team_goals = re.findall('\d+', goals[0])[0]
away_team_goals = re.findall('\d+', goals[1])[0]
ftr = "D" if home_team_goals == away_team_goals else ("H" if home_team_goals > away_team_goals else "A")
data.append([stage, home_team, away_team, home_team_goals, away_team_goals, ftr])
# Save current year
df = | pd.DataFrame(data, columns = ["Stage","HomeTeam","AwayTeam","FTHG","FTAG","FTR"]) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar=pd.DataFrame(data['TotalDemand'])
regr = linear_model.LinearRegression()
regr.fit(xvar,yvar)
# predict=regr.predict(xvar)
#Error Measures
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
def sim(iteration,data,startyear,endyear,atrtable,Gdp_dis=None,gdpvalues=None,Adv_dis=None,advvalues=None,Ms_dis=None,msvalues=None,Pi_dis=None,pivalues=None):
preddata=pd.DataFrame()
simdata=pd.DataFrame()
#Errordf=pd.DataFrame()
Errormsr=pd.DataFrame()
date=pd.date_range(start=pd.datetime(startyear, 1, 1), end=pd.datetime(endyear+1, 1, 1),freq='A')
date=pd.DataFrame(date.strftime("%Y"))
#Fetching The Orignal Data Of Available Years of the Orignal Data That We Have Actually
m=len(date)
Arrayofdates=data['Date']
vari=[]
for var in Arrayofdates:
vari.append(var[:4])
Arrayofdates=pd.DataFrame(vari)
dates=[]
Fetchdata=[]
for i in range(0,m):
years=date.loc[i]
for j in range(0,len(Arrayofdates)):
if int(Arrayofdates.loc[j])==int(years):
da=data['TotalDemand'].loc[j]
Fetchdata.append(da) #Gives Data In the Given Range That we have actually
dates.extend(years) #Gives Years that we have data
for i in range(0,iteration):
df=pd.DataFrame()
#for The Gdp
S='flag'
for row in atrtable:
if row=='Gdp':
S='Gdp'
if S=='Gdp':
for row in Gdp_dis:
if row=='Normal':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Uniform':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Constant':
gdpdf=pd.DataFrame(np.random.choice([gdpvalues[0]],m))
else:
gdpdf=pd.DataFrame(np.random.uniform(-4,4,m))
else:
gdpdf=pd.DataFrame(np.random.uniform(0,0,m))
# for the pi dataframe
O='flag'
for row in atrtable:
if row=='Pi':
O='Pi'
if O=='Pi':
for row in Pi_dis:
if row=='Normal':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Uniform':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Constant':
pidf=pd.DataFrame(np.random.choice([pivalues[0]],m))
else:
pidf=pd.DataFrame(np.random.random_integers(80,120,m))
else:
pidf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Adv Dataframe
N='flag'
for row in atrtable:
if row=='Adv':
N='Adv'
if N=='Adv':
for row in Adv_dis:
if row=='Normal':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Uniform':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Constant':
advdf=pd.DataFrame(np.random.choice([advvalues[0]],m))
else:
advdf=pd.DataFrame(np.random.random_integers(500000,1000000,m))
else:
advdf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Ms dataframe
U='flag'
for row in atrtable:
if row=='Ms':
U='Ms'
if U=='Ms':
for row in Ms_dis:
if row=='Normal':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Uniform':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Constant':
msdf=pd.DataFrame(np.random.choice([msvalues[0]],m))
else:
msdf=pd.DataFrame(np.random.uniform(0.1,0.5,m))
else:
msdf=pd.DataFrame(np.random.uniform(0,0,m))
#Concatenating All the dataframes for Simulation Data
df=pd.concat([gdpdf,pidf,msdf,advdf],axis=1)
simid=pd.DataFrame(np.random.choice([i+1],m))
dd=pd.concat([simid,gdpdf,pidf,advdf,msdf],axis=1)
dd.columns=['Year','Gdp','Pi','Adv','Ms']
simdata=pd.concat([simdata,dd],axis=0)
#Predicting the Data And store in pred data through onhand Regression Method
dfs=pd.DataFrame(regr.predict(df))
datatable=pd.concat([simid,date,dfs],axis=1)
datatable.columns=['simid','Year','Total_Demand(Tonnes)']
preddata=pd.concat([datatable,preddata],axis=0)
datas=list()
#Geting Data With Respective Dates
# print(datatable)
for row in dates:
# print(dates)
datas.extend(datatable.loc[datatable['Year'] ==row, 'Total_Demand(Tonnes)'])
kkk=pd.DataFrame(datas)
me=ME(Fetchdata,kkk)
mae=MAE(Fetchdata,kkk)
mape=MAPE(Fetchdata,kkk)
dfe=pd.DataFrame([me,mae,mape],index=['ME','MAE','MAPE']).T
Errormsr=pd.concat([Errormsr,dfe],axis=0).reset_index(drop=True)
return preddata,simdata,Errormsr
preddata,simdata,Errormsr=sim(itr,data,sfrm,sto,atrtable,**kwargs)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
preddata.to_sql(con=engine, name='predicteddata',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
simdata.to_sql(con=engine2, name='simulationdata',index=False, if_exists='replace')
con.commit()
engine3 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Errormsr.to_sql(con=engine3, name='simerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `simerror`")
sdata = cnr.fetchall()
simerror = pd.DataFrame(sdata)
con.close()
return render_template('ysimulation.html',sayy=1,simerror=simerror.to_html(index=False))
return render_template('ysimulation.html')
##PROCURMENT PLANNING
@app.route('/procurementplanning')
def procurementplanning():
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerate", methods=['GET','POST'])
def criteriagenerate():
if request.method == 'POST':
global cnmbr
global vnmbr
cnmbr = int(request.form['cnmbr'])
vnmbr = int(request.form['vnmbr'])
if cnmbr == 0 or vnmbr==0:
return render_template('criterianumberask.html',warning='Data Invalid')
cmainlist=[]
global cnames
cnames = []
for i in range (1,cnmbr+1):
lst=[]
name='cname'+str(i)
lst.append(i)
lst.append(name)
cmainlist.append(lst)
cnames.append(name)
vmainlist=[]
global vnames
vnames = []
for i in range (1,vnmbr+1):
lst=[]
name='vname'+str(i)
lst.append(i)
lst.append(name)
vmainlist.append(lst)
vnames.append(name)
return render_template('vendorselection_criteriagenerate.html',cmainlist=cmainlist,vmainlist=vmainlist)
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerated", methods=['GET','POST'])
def criteriagenerated():
if request.method == 'POST':
global criterianames
criterianames=[]
for name in cnames:
criterianame = request.form[name]
criterianames.append(criterianame)
global vendornames
vendornames=[]
for name in vnames:
vendorname = request.form[name]
vendornames.append(vendorname)
mcrlst=[]
cn=len(criterianames)
k=1
global maincriteriaoption
maincriteriaoption=[]
global maincritriacri
maincritriacri=[]
for i in range(cn-1):
for j in range (i+1,cn):
cri='criteriaorder'+str(k)
opt='coption'+str(k)
crlst=[k,cri,criterianames[i],criterianames[j],opt]
mcrlst.append(crlst)
k=k+1
maincriteriaoption.append(opt)
maincritriacri.append(cri)
mvrlst=[]
vn=len(vendornames)
k=1
global mainvendoroption
mainvendoroption=[]
global mainvendorcri
mainvendorcri=[]
for z in criterianames:
mvrlst1=[]
vcri=[]
vopt=[]
for i in range(vn-1):
for j in range (i+1,vn):
cri='vendororder'+z+str(k)
opt='voption'+z+str(k)
vrlst=[k,cri,vendornames[i],vendornames[j],opt]
mvrlst1.append(vrlst)
k=k+1
vcri.append(cri)
vopt.append(opt)
mvrlst.append(mvrlst1)
mainvendorcri.append(vcri)
mainvendoroption.append(vopt)
return render_template('vendorselection_maincriteria.html',mcrlst=mcrlst,mvrlst=mvrlst)
return render_template('vendorselection_criteriagenerated.html')
def tablecreator(imp,val,crit):
n=len(imp)
for i in range(n):
if imp[i]==1:
val[i]=float(1/val[i])
fdata=pd.DataFrame(columns=[crit],index=[crit])
i=0
k=0
for index in fdata.index:
j=0
for columns in fdata.columns:
if i==j:
fdata[index][columns]=1
if i<j:
fdata[index][columns]=round((float(val[k])),2)
fdata[columns][index]=round((1/val[k]),2)
k=k+1
j=j+1
i=i+1
return fdata
@app.route("/criteriaread", methods=['GET','POST'])
def criteriaread():
if request.method == 'POST':
importances = []
values = []
for name1 in maincritriacri:
imp = int(request.form[name1])
importances.append(imp)
for name2 in maincriteriaoption:
val = int(request.form[name2])
values.append(val)
#global maincriteriadata
maincriteriadata=tablecreator(importances,values,criterianames)
mainimportances=[]
for crioption in mainvendorcri:
importance=[]
for option1 in crioption:
impc = int(request.form[option1])
importance.append(impc)
mainimportances.append(importance)
mainvalues=[]
for vendoroption in mainvendoroption:
vvalues=[]
for option2 in vendoroption:
valuev = int(request.form[option2])
vvalues.append(valuev)
mainvalues.append(vvalues)
maindf=[]
for z in range(len(criterianames)):
df=tablecreator(mainimportances[z],mainvalues[z],vendornames)
maindf.append(df)
dictmain={'crit':maincriteriadata}
names=criterianames
dfs=maindf
dictionary=dict((n,d) for (n,d) in zip(names,dfs))
def ahpmain(dictmain):
global wt_Crit
wt_Crit=[]
key=[]
key=list(dictmain.keys())
for i in key:
Crit=np.dot(dictmain[i],dictmain[i])
row_sum=[]
for j in range(len(Crit)):
row_sum.append(sum(Crit[j]))
wt_Crit.append([s/sum(row_sum) for s in row_sum])
Crit=[]
return wt_Crit
def ahp(dictmain,dictionary):
global output
main= ahpmain(dictmain)
submain= ahpmain(dictionary)
dd=pd.DataFrame(submain).T
df=pd.DataFrame(main).T
output=np.dot(dd,df)
return output,dd
yaxis,dd=ahp(dictmain,dictionary)
yax=pd.DataFrame(yaxis,index=vendornames,columns=['Score']).sort_values('Score',ascending=False).T
ynames=yax.columns
yval=yax.T.values
dd.index=vendornames
dd.columns=names
dd=dd.T
opq23=[]
for column in dd.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in dd[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
return render_template('vendorselection_ahp_final_output.html',ynames=ynames,yval=yval,dd=opq23,names=names)
return render_template('vendorselection_criteriagenerated.html')
#DETERMINISTIC STARTS
@app.route("/spt")
def spt():
return render_template('SinglePeriod.html')
@app.route("/ppbreak")
def ppbreak():
return render_template('pbreak.html')
@app.route('/pbrk', methods=['GET','POST'])
def pbrk():
return render_template('pbrk.html')
@app.route('/eoq', methods=['GET','POST'])
def eoq():
##setUpCost::setting up cost prior(>>setUpCost;<<moving rate)
AnnulaUnitsDemand=100##purchase demand of product per year
FixedCost=500 ##cost fixed for the product
AnnHoldingcost=0.25 ##remaining goods cost
UnitCost=445 ##purchasing cost
LeadTime=10 ##time b/w initiation and completion of a production process.
SafetyStock=100##extra stock
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
FixedCost=float(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=float(UnitCost)
LeadTime=float(LeadTime)
SafetyStock=float(SafetyStock)
sgap=1
pgap=1
HoldingCost=AnnHoldingcost*UnitCost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=round((EOQ*.75),0)
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totHoldCost+totOrderCost)
while (count < (EOQ*2)):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
# sstock=int(math.sqrt((LeadTime^2)+(int(ReorderPoint)^2)))
return render_template('eoq.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock)
########################EEEEppppppppppQQQQQQ############
########################EEEEppppppppppQQQQQQ############
@app.route('/eproduction', methods=['GET','POST'])
def eproduction():
AnnulaUnitsDemand=100
Prodrate=125
FixedCost=500
AnnHoldingcost=0.1
UnitCost=25000
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
Prodrate=request.form['Prodrate']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
Prodrate=int(Prodrate)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
if(Prodrate<=AnnulaUnitsDemand):
return render_template('eproduction.html',warning='Production date should not be least than Annual Demand',
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
pgap=round((1-(AnnulaUnitsDemand/Prodrate)),2)
HoldingCost=float(AnnHoldingcost*UnitCost)
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totOrderCost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eproduction.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
######################EEEEppppppppppQQQQQQ############
######################EEEEppppppppppQQQQQQ############
@app.route('/eoq_backorders', methods=['GET','POST'])
def eoq_backorders():
AnnulaUnitsDemand=12000
shortcost=1.1
FixedCost=8000
AnnHoldingcost=0.3
UnitCost=1
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
shortcost=request.form['shortcost']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
shortcost=int(shortcost)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
HoldingCost=float(AnnHoldingcost*UnitCost)
sgap=(shortcost+HoldingCost)/shortcost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/HoldingCost))*(math.sqrt(sgap)),2)
REOQ=round(math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost)*sgap),0)
totbackorder=EOQ*(HoldingCost/(shortcost+HoldingCost))
totOrderCost=round(((FixedCost*AnnulaUnitsDemand)/EOQ),2)
totHoldCost=round(((HoldingCost*((EOQ-totbackorder)**2))/(2*EOQ)),2)
totshortcost=round((shortcost*(totbackorder**2)/(2*EOQ)),2)
TotalCost=round((totOrderCost+totHoldCost+totshortcost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count= EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
shlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
shlist.append(totshortcost)
tclist.append(totOrderCost+totshortcost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eoq_backorders.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
shlist=shlist,sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,shortcost=shortcost,
LeadTime=LeadTime,SafetyStock=SafetyStock)
#################pbreak######################
@app.route("/pbreak_insert", methods=['GET','POST'])
def pbreak_insert():
if request.method == 'POST':
quantity = request.form.getlist("quantity[]")
price = request.form.getlist("price[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("CREATE TABLE IF NOT EXISTS `pbreaktable` (quantity int(8),price int(8))")
curr.execute("DELETE FROM `pbreaktable`")
conn.commit()
say=1
for i in range(len(quantity)):
quantity_clean = quantity[i]
price_clean = price[i]
if quantity_clean and price_clean:
curr.execute("INSERT INTO `pbreaktable`(`quantity`,`price`) VALUES('"+quantity_clean+"','"+price_clean+"')")
conn.commit()
else:
say=0
if say==0:
message="Some values were not inserted!"
else:
message="All values were inserted!"
return(message)
@app.route('/view', methods=['GET','POST'])
def view():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
return render_template('pbrk.html',username=username,ress =ress.to_html())
@app.route('/pbreakcalculate', methods=['GET','POST'])
def pbreakcalculate():
AnnulaUnitsDemand=10
FixedCost=1
AnnHoldingcost=0.1
UnitCost=445
LeadTime=10
SafetyStock=100
if request.method == 'POST':
if request.form['AnnulaUnitsDemand']:
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
if request.form['FixedCost']:
FixedCost=request.form['FixedCost']
FixedCost=float(FixedCost)
if request.form['AnnHoldingcost']:
AnnHoldingcost=request.form['AnnHoldingcost']
AnnHoldingcost=float(AnnHoldingcost)
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
conn.close()
datatable=pd.DataFrame(columns=['Quantity','Price','EOQ','TotalCost'])
mainlist=[]
Qu=ress['quantity']
Qm=0
for index, i in ress.iterrows():
tcl=[]
quantity = i['quantity']
price = i['price']
HoldingCost1=AnnHoldingcost*price
eoq1=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost1))),2)
REOQ=round(eoq1,0)
totOrderCost1=round((FixedCost*AnnulaUnitsDemand/eoq1),2)
totHoldCost1=round(((HoldingCost1*eoq1)/2),2)
totalcost1=float(round((totOrderCost1+totHoldCost1),2))
lst=[quantity,price,eoq1,totalcost1]
a=pd.DataFrame(lst).T
a.columns=['Quantity','Price','EOQ','TotalCost']
datatable=pd.concat([datatable,a],ignore_index=True)
name='TotalCost (Price='+str(a['Price'][0])+')'
tcl.append(name)
Qmin=1
Qmax=Qu[Qm]
qtylist2=[]
tclist1=[]
while (Qmin < Qmax):
qtylist2.append(Qmin)
tclist1.append(round((Qmin/2*totHoldCost1+AnnulaUnitsDemand/Qmin*FixedCost),2))
Qmin +=2
Qmin=Qmax+1
qtylist2.append(eoq1)
tclist1.append(totalcost1)
tcl.append(tclist1)
mainlist.append(tcl)
Eu=datatable['EOQ']
Qu=datatable['Quantity']
Tu=datatable['TotalCost']
minlst=[]
for i in range(len(Eu)):
if i ==0:
if Eu[i]<=Qu[i]:
minlst.append(i)
else:
if Eu[i]<=Qu[i] and Eu[i]>Qu[i-1]:
minlst.append(i)
if len(minlst)==0:
minnval='Solution not feasible'
else:
minval=Tu[minlst[0]]
minnval=Eu[minlst[0]]
for j in minlst:
if Tu[j]<minval:
minval=Tu[j]
minnval=Eu[j]
val1=0
for i in range(len(tclist1)):
if (round(minnval))==qtylist2[i]:
val1=i
minival=round(minval)
minnival=round(minnval)
NumOrders=round((AnnulaUnitsDemand/minnval),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
return render_template('pbreak.html',
NumOrders=NumOrders,OrderTime=OrderTime,REOQ=REOQ,ReorderPoint=ReorderPoint,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock,minnval=minnval,minval=minval,minival=minival,minnival=minnival,
datatable=datatable.to_html(index=False),mainlist=mainlist,
val1=val1,tclist1=tclist1,qtylist2=qtylist2)
#################Demand problalstic######################
@app.route('/demand', methods=['GET', 'POST'])
def demand():
cost=10
price=12
salvage=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
cdf=[]
sum=0
for row in data['Prob']:
sum=sum+row
cdf.append(sum)
cumm_freq=(pd.DataFrame(cdf)).values##y-axis
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
k=[row>CSl for row in cumm_freq]
count=1
for row in k:
if row==False:
count=count+1
demand=(data['Demand']).values
w=data['Demand'].loc[count]##line across x-axis
val=0
for i in range(len(cumm_freq)):
if(w==demand[i]):
val=i
return render_template('demand.html',cost=cost,price=price,salvage=salvage,
cumm_freq=cumm_freq,demand=demand,val=val)
@app.route('/normal', methods=['GET', 'POST'])
def normal():
cost=10
price=12
salvage=9
sd=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost1=cost-salvage
undercost1=price-cost
CSl=undercost1/(undercost1+overcost1)
zz=st.norm.ppf(CSl)##x-line
z=float(format(zz, '.2f'))
# Expecteddemand=round(mea+(z*sd))
mean = 0; sd = 1; variance = np.square(sd)
x = np.arange(-4,4,.01)##x-axis
f =(np.exp(-np.square(x-mean)/2*variance)/(np.sqrt(2*np.pi*variance)))##y-axis
val=0
for i in range(len(f)):
if(z==round((x[i]),2)):
val=i
return render_template('normal.html',x=x,f=f,val=val,cost=cost,price=price,salvage=salvage)
@app.route('/utype', methods=['GET','POST'])
def utype():
cost=10
price=12
salvage=2
mini=1
maxi=10
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
mini=request.form['mini']
maxi=request.form['maxi']
cost=int(cost)
price=int(price)
salvage=int(salvage)
mini=int(mini)
maxi=int(maxi)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
expdemand1=round(mini+((maxi-mini)*CSl))
# a=[mini,0]
# b=[mini,100]
# c=[maxi,0]
# d=[maxi,100]
# width = c[0] - b[0]
# height = d[1] - a[1]
lims = np.arange(0,maxi,1)
val=0
for i in range(len(lims)):
if(expdemand1==lims[i]):
val=i
return render_template('utype.html',x=lims,f=lims,val=val,cost=cost,price=price,salvage=salvage,mini=mini,maxi=maxi)
@app.route('/outputx', methods=['GET', 'POST'])
def outputx():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM `abc`")
all_data = cur.fetchall()
all_data = pd.DataFrame(all_data)
A_ccat=.8
B_ccat=.95
A_ucat=.1
B_ucat=.25
tot_cost=all_data['Cost'].sum()
tot_usage=all_data['Annual Usage'].sum()
all_data['perc_cost']=all_data['Cost']/tot_cost
all_data['perc_usage']=all_data['Annual Usage']/tot_usage
all_data.sort_values(by=['perc_cost'], inplace=True, ascending=False)
sort_data=all_data.reset_index()
sort_data['cum_cperc']=np.nan
sort_data['cum_uperc']=np.nan
sort_data['Class']=''
for i in range(len(sort_data)):
if(i==0):
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i])
# cperc_data.append(all_data['perc_cost'][i])
sort_data.set_value(i,'Class','A')
else:
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i]+sort_data['cum_cperc'][i-1])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i]+sort_data['cum_uperc'][i-1])
if(sort_data['cum_cperc'][i]<=A_ccat and sort_data['cum_uperc'][i]<=A_ucat):
sort_data.set_value(i,'Class','A')
elif(sort_data['cum_cperc'][i]<=B_ccat and sort_data['cum_uperc'][i]<=B_ucat):
sort_data.set_value(i,'Class','B')
else:
sort_data.set_value(i,'Class','C')
x7=sort_data[['cum_cperc']]
x1=x7*100
x3=np.round(x1)
x2=np.array([])
x5 = np.append(x2,x3)
y7= sort_data[['cum_uperc']]
y1=y7*100
y3=np.round(y1)
y2=np.array([])
y5 = np.append(y2,y3)
###############% of Total cost//
a= sort_data[(sort_data['Class']=='A')][['perc_cost']]
j=a.sum()
k=j*100
pd.DataFrame(k)
kf=k[0]
b= sort_data[(sort_data['Class']=='B')][['perc_cost']]
n=b.sum()
m=n*100
pd.DataFrame(m)
mf=m[0]
c= sort_data[(sort_data['Class']=='C')][['perc_cost']]
o=c.sum()
p=o*100
pd.DataFrame(p)
pf=p[0]
tes=k,m,p
t2 = np.array([])
te2 = np.append(t2,tes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f= sort_data[(sort_data['Class']=='A')][['Product number']]
v=f.sum()
pd.DataFrame(v)
vif=v[0]
f1= sort_data[(sort_data['Class']=='B')][['Product number']]
u=f1.sum()
pd.DataFrame(u)
uif=u[0]
f2= sort_data[(sort_data['Class']=='C')][['Product number']]
vf=f2.sum()
pd.DataFrame(vf)
kif=vf[0]
#################% of Total units // Annual Usage
t= sort_data[(sort_data['Class']=='A')][['perc_usage']]
i=t.sum()
p1=i*100
pd.DataFrame(p1)
nf=p1[0]
l= sort_data[(sort_data['Class']=='B')][['perc_usage']]
t=l.sum()
q1=t*100
pd.DataFrame(q1)
qf=q1[0]
u= sort_data[(sort_data['Class']=='C')][['perc_usage']]
w=u.sum()
s1=w*100
pd.DataFrame(s1)
sf=s1[0]
test=p1,q1,s1
tt2 = np.array([])
tte2 = np.append(tt2,test)
#############values//Cost*Annual Usage
sort_data['Value'] = sort_data['Cost'] * sort_data['Annual Usage']
fz= sort_data[(sort_data['Class']=='A')][['Value']]
vz=fz.sum()
pd.DataFrame(vz)
vzz=vz[0]
fz1= sort_data[(sort_data['Class']=='B')][['Value']]
uz=fz1.sum()
pd.DataFrame(uz)
uzf=uz[0]
fz2= sort_data[(sort_data['Class']=='C')][['Value']]
vzf=fz2.sum()
pd.DataFrame(vzf)
kzf=vzf[0]
h=[{'Scenario':'A','Values':vzz,'product number':vif,'perc_usage':nf,'perc_cost ':kf},
{'Scenario':'B','Values':uzf,'product number':uif,'perc_usage':qf,'perc_cost ':mf},
{'Scenario':'C','Values':kzf,'product number':kif,'perc_usage':sf,'perc_cost ':pf}]
df = pd.DataFrame(h)
lo=sort_data[['Product Description','Product number','Cost','Annual Usage','Class']]
cur = conn.cursor()
cur.execute("SELECT * FROM `abc1`")
all_data4 = cur.fetchall()
all_data4 = pd.DataFrame(all_data4)
lolz=all_data4[['Product number','Product Description','Cost','Annual Usage','Average Stay','Average Consumption','Criticality']]
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
curr = conn.cursor()
curr.execute("SELECT * FROM `fsn`")
all_data1 = curr.fetchall()
all_data1 = pd.DataFrame(all_data1)
F_cat=.2
S_cat=.5
tot_stay=all_data1['Average Stay'].sum()
tot_consupt=all_data1['Average Consumption'].sum()
all_data1['perc_stay']=all_data1['Average Stay']/tot_stay
all_data1['perc_cons']=all_data1['Average Consumption']/tot_consupt
all_data1.sort_values(by=['perc_stay'], inplace=True, ascending=True)
sort_data1=all_data1.reset_index()
sort_data1['cum_stay']=np.nan
sort_data1['cum_cons']=np.nan
sort_data1['Class']=''
for i in range(len(sort_data1)):
if(i==0):
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i])
sort_data1.set_value(i,'Class','F')
else:
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i]+sort_data1['cum_stay'][i-1])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i]+sort_data1['cum_cons'][i-1])
if(sort_data1['cum_stay'][i]<=F_cat) :
sort_data1.set_value(i,'Class','F')
elif(sort_data1['cum_stay'][i]<=S_cat):
sort_data1.set_value(i,'Class','S')
else:
sort_data1.set_value(i,'Class','N')
x71=sort_data1[['cum_stay']]
x11=x71*100
x31=np.round(x11)
x21=np.array([])
x51 = np.append(x21,x31)
y71= sort_data1[['cum_cons']]
y11=y71*100
y31=np.round(y11)
y21=np.array([])
y51 = np.append(y21,y31)
###############% of Total cost//
a1= sort_data1[(sort_data1['Class']=='F')][['perc_stay']]
j1=a1.sum()
k1=j1*100
pd.DataFrame(k1)
kf1=k1[0]
b1= sort_data1[(sort_data1['Class']=='S')][['perc_stay']]
n1=b1.sum()
m1=n1*100
pd.DataFrame(m1)
mf1=m1[0]
c1= sort_data1[(sort_data1['Class']=='N')][['perc_stay']]
o1=c1.sum()
p1=o1*100
pd.DataFrame(p1)
pf1=p1[0]
tes1=k1,m1,p1
t21 = np.array([])
te21 = np.append(t21,tes1)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f1= sort_data1[(sort_data1['Class']=='F')][['Product number']]
v1=f1.sum()
pd.DataFrame(v1)
vif1=v1[0]
f11= sort_data1[(sort_data1['Class']=='S')][['Product number']]
u1=f11.sum()
pd.DataFrame(u1)
uif1=u1[0]
f21= sort_data1[(sort_data1['Class']=='N')][['Product number']]
vf1=f21.sum()
pd.DataFrame(vf1)
kif1=vf1[0]
#################% of Total units // Annual Usage
t1= sort_data1[(sort_data1['Class']=='F')][['perc_cons']]
i1=t1.sum()
p11=i1*100
pd.DataFrame(p11)
nf1=p11[0]
l1= sort_data1[(sort_data1['Class']=='S')][['perc_cons']]
t1=l1.sum()
q11=t1*100
pd.DataFrame(q11)
qf1=q11[0]
u1= sort_data1[(sort_data1['Class']=='N')][['perc_cons']]
w1=u1.sum()
s11=w1*100
pd.DataFrame(s11)
sf1=s11[0]
test1=p11,q11,s11
tt21 = np.array([])
tte21 = np.append(tt21,test1)
#############values//Cost*Annual Usage
sort_data1['Value'] = sort_data1['Average Stay'] * sort_data1['Average Consumption']
fz1= sort_data1[(sort_data1['Class']=='F')][['Value']]
vz1=fz1.sum()
pd.DataFrame(vz1)
vzz1=vz1[0]
fz11= sort_data1[(sort_data1['Class']=='S')][['Value']]
uz1=fz11.sum()
pd.DataFrame(uz1)
uzf1=uz1[0]
fz21= sort_data1[(sort_data1['Class']=='N')][['Value']]
vzf1=fz21.sum()
pd.DataFrame(vzf1)
kzf1=vzf1[0]
h1=[{'Scenario':'F','Values':vzz1,'product number':vif1,'perc_cons':nf1,'perc_stay ':kf1},
{'Scenario':'S','Values':uzf1,'product number':uif1,'perc_cons':qf1,'perc_stay ':mf1},
{'Scenario':'N','Values':kzf1,'product number':kif1,'perc_cons':sf1,'perc_stay ':pf1}]
df1 = pd.DataFrame(h1)
lo1=sort_data1[['Product Description','Product number','perc_stay','perc_cons','Class']]
##############VVVVVVVVVEEEEEEEEEEEEDDDDDDDDD#########
##############VVVVVVVVVEEEEEEEEEEEEDDDDDDDDD#########
cur1 = conn.cursor()
cur1.execute("SELECT * FROM `ved`")
all_data2 = cur1.fetchall()
all_data2 = pd.DataFrame(all_data2)
all_data2['values']=all_data2['Class'] + all_data2["Criticality"]
AV= all_data2[(all_data2['values']=='AV')]
AV=AV.index.max()
AE= all_data2[(all_data2['values']=='AE')]
AE= AE.index.max()
AE=np.nan_to_num(AE)
AD= all_data2[(all_data2['values']=='AD')]
AD=AD.index.max()
AD=np.nan_to_num(AD)
BV=all_data2[(all_data2['values']=='BV')]
BV=BV.index.max()
BE=all_data2[(all_data2['values']=='BE')]
BE=BE.index.max()
BD=all_data2[(all_data2['values']=='BD')]
BD=BD.index.max()
BD=np.nan_to_num(BD)
CV=all_data2[(all_data2['values']=='CV')]
CV=CV.index.max()
CV=np.nan_to_num(CV)
CE=all_data2[(all_data2['values']=='CE')]
CE=CE.index.max()
CD=all_data2[(all_data2['values']=='CD')]
CD=CD.index.max()
###############################################
xx71=all_data2[['cum_cperc']]
xx71=xx71.astype(float)
xx11=xx71*100
xx31=xx11.round()
xx21=np.array([])
xx51 = np.append(xx21,xx31)
yy71= all_data2[['cum_uperc']]
yy71=yy71.astype(float)
yy11=yy71*100
yy31=yy11.round(0)
yy21=np.array([])
yy51 = np.append(yy21,yy31)
###############% of Total cost//
aa= all_data2[(all_data2['Criticality']=='V')][['perc_cost']]
jj=aa.sum()
kk=jj*100
#k=pd.DataFrame(k)
kkf=kk[0]
bb= all_data2[(all_data2['Criticality']=='E')][['perc_cost']]
nn=bb.sum()
mm=nn*100
# m=pd.DataFrame(m)
mmf=mm[0]
cc= all_data2[(all_data2['Criticality']=='D')][['perc_cost']]
oo=cc.sum()
pp=oo*100
# p=pd.DataFrame(p)
ppf=pp[0]
ttes=[kk,mm,pp]
ttes=pd.concat(ttes)
th2 = np.array([])
the2 = np.append(th2,ttes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
ff= all_data2[(all_data2['Criticality']=='V')][['Product number']]
vv=ff.sum()
pd.DataFrame(vv)
vvif=vv[0]
ff1= all_data2[(all_data2['Criticality']=='E')][['Product number']]
uu=ff1.sum()
pd.DataFrame(uu)
uuif=uu[0]
ff2= all_data2[(all_data2['Criticality']=='D')][['Product number']]
vvf=ff2.sum()
pd.DataFrame(vvf)
kkif=vvf[0]
#################% of Total units // Annual Usage
tt= all_data2[(all_data2['Criticality']=='V')][['perc_usage']]
ii=tt.sum()
pp1=ii*100
pd.DataFrame(pp1)
nnf=pp1[0]
ll= all_data2[(all_data2['Criticality']=='E')][['perc_usage']]
tq=ll.sum()
qq1=tq*100
pd.DataFrame(qq1)
qqf=qq1[0]
uw= all_data2[(all_data2['Criticality']=='D')][['perc_usage']]
wu=uw.sum()
sc1=wu*100
pd.DataFrame(sc1)
ssf=sc1[0]
testt=[pp1,qq1,sc1]
testt=pd.concat(testt)
ttt2 = np.array([])
ttte2 = np.append(ttt2,testt)
#############values//Cost*Annual Usage
all_data2['Value'] = all_data2['Cost'] * all_data2['Annual Usage']
fzz= all_data2[(all_data2['Criticality']=='V')][['Value']]
vzz=fzz.sum()
pd.DataFrame(vzz)
vzzz=vzz[0]
fzz1= all_data2[(all_data2['Criticality']=='E')][['Value']]
uzz=fzz1.sum()
pd.DataFrame(uzz)
uzzf=uzz[0]
fzz2= all_data2[(all_data2['Criticality']=='D')][['Value']]
vzzf=fzz2.sum()
pd.DataFrame(vzzf)
kzzf=vzzf[0]
hh=[{'Scenario':'V','Values':vzzz,'product number':vvif,'perc_usage':nnf,'perc_cost ':kkf},
{'Scenario':'E','Values':uzzf,'product number':uuif,'perc_usage':qqf,'perc_cost ':mmf},
{'Scenario':'D','Values':kzzf,'product number':kkif,'perc_usage':ssf,'perc_cost ':ppf}]
dff = pd.DataFrame(hh)
return render_template('inventoryclassification.html',
x=y5,y=x5,
barcost=te2 ,barusage=tte21,
s=df.to_html(index=False),
sam=lo.to_html(index=False),
tale=lolz.to_html(index=False),
x1=x51,y1=y51,
bar1=te21 ,bar2=tte2,
s1=df1.to_html(index=False),
sam1=lo1.to_html(index=False),
xx1=AV,xx2=AE,xx3=AD,
yy1=BV,yy2=BE,yy3=BD,
zz1=CV,zz2=CE,zz3=CD,
bb1=the2 ,bb2=ttte2,
zone1=yy51,zone2=xx51,
sammy=dff.to_html(index=False))
@app.route('/vendormanagement')
def vendormanagement():
return render_template('vendormanagement.html')
@app.route('/vendormanagementimport',methods=['POST','GET'])
def vendormanagementimport():
global vendordata
global vendordataview
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
vendordata = pd.read_sql("SELECT * from vendor_management", con=db)
db.close()
vendordata['POdate']=pd.to_datetime(vendordata['POdate'])
vendordata['POdate_year']=vendordata['POdate'].dt.year
vendordataview=vendordata.head(50)
return render_template('vendormanagementview.html',vendordataview=vendordataview.to_html(index=False))
@app.route('/vendormanagementview',methods=['POST','GET'])
def vendormanagementview():
return render_template('vendormanagementview.html',vendordataview=vendordataview.to_html(index=False))
@app.route('/vndrmngmnt1',methods=['POST','GET'])
def vndrmngmnt1():
VENDORID=sorted(vendordata['Vendorid'].unique())
if request.method=='POST':
vendorin=request.form['name1']
def Vendor(VendorId):
datasetcomb34=vendordata[['Vendorid','Vendor_name','Vendor_address','Vendormin_order']][vendordata['Vendorid']== VendorId]
return datasetcomb34.iloc[0,:]
snglvw=Vendor(vendorin)
singleview=pd.DataFrame(snglvw).T
return render_template('vendormanagement1.html',say=1,vendorin=vendorin,VENDORID=VENDORID,singleview=singleview.to_html(index=False))
return render_template('vendormanagement1.html',VENDORID=VENDORID)
@app.route('/vndrmngmnt2',methods=['POST','GET'])
def vndrmngmnt2():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10vendorspend(year,top_value):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['PO_Value'].sum()
x=gg1.nlargest(top_value).index.get_level_values(1)
y=gg1.nlargest(top_value).values
df=pd.DataFrame({'VendorID':x,'Total':y})
return df
vndrvspnd=top10vendorspend(SelectedYear,SelectedTop)
def top10vendoravgspend(top):
gg3=vendordata.groupby(['POdate_year','Vendorid'])['PO_Value'].mean()
xxx=gg3.nlargest(top).index.get_level_values(1)
yyy=round(gg3.nlargest(top),2).values
df=pd.DataFrame({'VendorID':xxx,'Mean':yyy})
return df
vndrvavgspnd=top10vendoravgspend(SelectedTop)
return render_template('vendormanagement2.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrvspnd.values,vndrvavg=vndrvavgspnd.values)
return render_template('vendormanagement2.html',pouyear=pouyear)
@app.route('/vndrmngmnt3',methods=['POST','GET'])
def vndrmngmnt3():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10POvendorvalue(year,top_value):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['Inventoryreplenished'].sum()
x=gg1.nlargest(top_value).index.get_level_values(1)
y=gg1.nlargest(top_value).values
df=pd.DataFrame({'VendorId':x,'Total':y})
return df
vndrval=top10POvendorvalue(SelectedYear,SelectedTop)
def top10POvendoravg(top):
gg3=vendordata.groupby(['POdate_year','Vendorid'])['Inventoryreplenished'].mean()
xxx=gg3.nlargest(top).index.get_level_values(1)
yyy=round(gg3.nlargest(top),2).values
df=pd.DataFrame({'VendorID':xxx,'Mean':yyy})
return df
vndrvavg=top10POvendoravg(SelectedTop)
return render_template('vendormanagement3.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrval.values,vndrvavg=vndrvavg.values)
return render_template('vendormanagement3.html',pouyear=pouyear)
@app.route('/vndrmngmnt4',methods=['POST','GET'])
def vndrmngmnt4():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10vendorPOcnt(year,top):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['POdate_year'].count()
x=gg1.nlargest(top).index.get_level_values(1)
y=gg1.nlargest(top).values
df=pd.DataFrame({'MatID':x,'Total_count':y})
return df
vndrvavgpoacnt=top10vendorPOcnt(SelectedYear,SelectedTop)
def top10vendorPOavg(top):
g=vendordata.groupby('Vendorid')['POdate_year'].size()
xx=g.nlargest(top).index.get_level_values(0)
yy=g.nlargest(top).values
dfexp7=pd.DataFrame({'VendorID':xx,'Average_count':yy})
return dfexp7
vndrvavgpoavg=top10vendorPOavg(SelectedTop)
return render_template('vendormanagement4.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrvavgpoacnt.values,vndrvavg=vndrvavgpoavg.values)
return render_template('vendormanagement4.html',pouyear=pouyear)
@app.route('/vendorperformanceanalysis')
def vendorperformanceanalysis():
return render_template('vendorperformanceanalysis.html',say=0)
@app.route('/vendorperformanceanalysisdata',methods=['POST','GET'])
def vendorperformanceanalysisdata():
if request.method=='POST':
global wdata
global wtdata
file1 = request.files['file1'].read()
file2 = request.files['file2'].read()
if len(file1)==0 or len(file2)==0:
return render_template('vendorperformanceanalysis.html',say=0,warning='Data Invalid')
data1=pd.read_csv(io.StringIO(file1.decode('utf-8')))
wdata=pd.DataFrame(data1)
data2=pd.read_csv(io.StringIO(file2.decode('utf-8')))
wtdata=pd.DataFrame(data2)
return render_template('vendorperformanceanalysis.html',say=1,data1=data1.to_html(index=False),data2=data2.to_html(index=False))
@app.route('/vendorperformanceanalys',methods=['POST','GET'])
def vendorperformanceanalys():
wt=[]
for ds in wtdata['Weight']:
wt.append(round((float(ds)),2))
treatment=[]
for ds in wtdata['Positive Attribute']:
if ds=='Yes':
treatment.append('+')
else:
treatment.append('-')
def normalize(df,alpha,treatment):
y=df.iloc[:,1:len(list(df))]
for i, j in zip(list(y),treatment):
if j== '-':
y[i]=y[i].min()/y[i]
elif j== '+':
y[i]=y[i]/y[i].max()
for i, t in zip(list(y),wt):
y[i]=y[i]*t
df['Score'] = y.sum(axis=1)
df=df.sort_values('Score', ascending=False)
df['Rank']=df['Score'].rank(ascending=False)
df['Rank']=df['Rank'].astype(int)
return df[['Rank','Vendor']]
dff=normalize(wdata,wt,treatment)
return render_template('vendorperformanceanalysisview.html',say=1,data=dff.to_html(index=False))
@app.route('/purchaseorderallocation')
def purchaseorderallocation():
return render_template('purchaseorderallocation.html')
@app.route('/purchaseorderallocationimport',methods=['POST','GET'])
def purchaseorderallocationimport():
global ddemand1
global dsupply1
global maxy1
global miny1
global Vcost1
global Vrisk1
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
ddemand1 = pd.read_sql("SELECT * from opt_demand", con=db)
dsupply1 = pd.read_sql("SELECT * from opt_supply", con=db)
maxy1 = pd.read_sql("SELECT * from opt_maxcapacity", con=db)
miny1 = pd.read_sql("SELECT * from opt_mincapacity", con=db)
Vcost1 = pd.read_sql("SELECT * from opt_vcost", con=db)
Vrisk1 = pd.read_sql("SELECT * from opt_vrisk", con=db)
db.close()
return render_template('purchaseorderallocationimport.html',ddemand=ddemand1.to_html(index=False),dsupply=dsupply1.to_html(index=False),
maxy=maxy1.to_html(index=False),miny=miny1.to_html(index=False),Vcost=Vcost1.to_html(index=False),Vrisk=Vrisk1.to_html(index=False))
@app.route('/purchaseorderallocationanalyse',methods=['POST','GET'])
def purchaseorderallocationanalyse():
ddemand=ddemand1.set_index("Product")
dsupply=dsupply1.set_index("Vendor")
maxy=maxy1.set_index("Vendors\Product List")
miny=miny1.set_index("Vendors\Product List")
Vcost =Vcost1.set_index("Vendors\Product List")
Vrisk = Vrisk1.set_index("Vendors\Product List")
demand=dict(zip(list(ddemand.index),ddemand.iloc[:,0].values))
supply=dict(zip(list(dsupply.index),dsupply.iloc[:,0].values))
max1=maxy.to_dict()
min1=miny.to_dict()
Vendors=list(dsupply.index)
Products=list(ddemand.index)
VcostNorm = Vcost.copy()
VriskNorm = Vrisk.copy()
if request.method=='POST':
CostWeight=float(request.form['CostWeight'])
RiskWeight=float(request.form['RiskWeight'])
Total=[]
for i in list(list(VcostNorm)):
Tot = VcostNorm[i].sum()
Total.append(Tot)
for i, j in zip(list(VcostNorm),Total):
VcostNorm[i]=VcostNorm[i]/j
Total=[]
for i in list(list(VriskNorm)):
Tot = VriskNorm[i].sum()
Total.append(Tot)
for i, j in zip(list(VriskNorm),Total):
VriskNorm[i]=VriskNorm[i]/j
risk=VriskNorm.to_dict()
cost=VcostNorm.to_dict()
Total_cost=defaultdict(dict)
Total_Risk=defaultdict(dict)
Total_Cost=pd.DataFrame(CostWeight*pd.DataFrame(cost))
Total_Risk=pd.DataFrame(RiskWeight*pd.DataFrame(risk))
Decision_var=(Total_Cost+Total_Risk).to_dict()
prob = pulp.LpProblem("Optimization", pulp.LpMinimize)
routes = [(w,b) for w in Products for b in Vendors]
x = LpVariable.dicts("route", (Products, Vendors), cat = 'LpInteger')
prob += lpSum([x[w][b] * Decision_var[w][b] for (w,b) in routes]),"Objective function"
for w in Products:
prob += lpSum([x[w][b] for b in Vendors]) == demand[w]
for b in Vendors:
prob += lpSum([x[w][b] for w in Products]) <= supply[b]
for w in Products:
for b in Vendors:
prob += x[w][b] <= max1[w][b]
for w in Products:
for b in Vendors:
prob += x[w][b] >= min1[w][b]
prob.writeLP("SO.lp")
prob.solve()
opt_status=pulp.LpStatus[prob.status]
if opt_status=='Optimal':
#print (pulp.value(prob.objective))
re=[]
res=[]
ress=[]
i=0
for variable in prob.variables():
re.append(variable.varValue)
res.append(variable.varValue)
i=i+1
if (i==len(Total_Cost)):
i=0
ress.append(re)
re=[]
Optimal_quantity1=pd.DataFrame(ress,columns=Vendors,index=Products).astype(int)
opq13=[]
for column in Optimal_quantity1.columns:
opq11=[]
opq12=[]
opq11.append(column)
for val in Optimal_quantity1[column]:
opq12.append(val)
opq11.append(opq12)
opq13.append(opq11)
Optimal_quantity2=Optimal_quantity1.T
opq23=[]
for column in Optimal_quantity2.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in Optimal_quantity2[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
VCran=[]
for column in Vcost.columns:
for val in Vcost[column].values:
VCran.append(val)
VRran=[]
for column in Vrisk.columns:
for val in Vrisk[column].values:
VRran.append(val)
Costproduct=[i*j for (i,j) in zip(res,VCran)]
sumCostproduct=sum(Costproduct)
Riskproduct=[i*j for (i,j) in zip(res,VRran)]
optrisk=sum(Riskproduct)/sum(res)
return render_template('purchaseorderallocationoutput.html',username=username,say=1,optrisk=optrisk,sumCostproduct=sumCostproduct,Optimal_quantity1=opq13,
Optimal_quantity2=opq23,grpi1=Optimal_quantity1.index,grpi2=Optimal_quantity2.index,warning2="The obtained solution was "+opt_status)
return render_template('purchaseorderallocationoutput.html',warning1="The obtained solution was "+opt_status)
return render_template('purchaseorderallocationoutput.html')
@app.route('/purchaseordermanagement')
def purchaseordermanagement():
return render_template('purchaseordermanagement.html')
@app.route('/poimport',methods=['POST','GET'])
def poimport():
global podata
global podatahead
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
podata = pd.read_sql("SELECT * from po_management", con=db)
db.close()
podata['POdate']=pd.to_datetime(podata['POdate'])
podata['PO_year']=podata['POdate'].dt.year
podata['Orderreceiveddate']=pd.to_datetime(podata['Orderreceiveddate'])
podata['Orderreceivedyear']=podata['Orderreceiveddate'].dt.year
podatahead=podata.head(50)
return render_template('purchaseordermanagementview.html',podatahead=podatahead.to_html(index=False))
@app.route('/purchaseordermanagementview')
def purchaseordermanagementview():
return render_template('purchaseordermanagementview.html',podatahead=podatahead.to_html(index=False))
@app.route('/pomtype1',methods=['POST','GET'])
def pomtype1():
PONO=sorted(podata['POno'].unique())
if request.method=='POST':
SelectedPOno=int(request.form['name1'])
def POSingle(POno):
podat=podata[['POno','POammendmentdate','POdate','POverificationdate','PO_Value']][podata['POno']== POno]
return podat.iloc[0,:]
snglvw=POSingle(SelectedPOno)
svpodata=pd.DataFrame(snglvw).T
return render_template('purchaseordermanagement1.html',say=1,sayy=1,PONO=PONO,svpodata=svpodata.to_html(index=False),SelectedPOno=SelectedPOno)
return render_template('purchaseordermanagement1.html',say=1,PONO=PONO)
@app.route('/pomtype2',methods=['POST','GET'])
def pomtype2():
uyear=sorted(podata['PO_year'].unique())
if request.method=='POST':
SelectedYear=int(request.form['name1'])
podata.loc[(podata.PO_Value >= 0) & (podata.PO_Value < 10000), 'PO_Group'] = '0-10K'
podata.loc[(podata.PO_Value >= 10000) & (podata.PO_Value < 50000), 'PO_Group'] = '10K-50K'
podata.loc[(podata.PO_Value >= 50000) & (podata.PO_Value < 100000), 'PO_Group'] = '50K-100K'
podata.loc[(podata.PO_Value >= 100000) & (podata.PO_Value < 500000), 'PO_Group'] = '100K-500K'
podata.loc[(podata.PO_Value >= 500000) & (podata.PO_Value < 1000000), 'PO_Group'] = '500K-1M'
podata.loc[podata.PO_Value >= 1000000, 'PO_Group'] = '>1M'
podata.loc[podata.PO_Group == '0-10K', 'PO_GroupNo'] = 1
podata.loc[podata.PO_Group == '10K-50K', 'PO_GroupNo'] = 2
podata.loc[podata.PO_Group == '50K-100K', 'PO_GroupNo'] = 3
podata.loc[podata.PO_Group == '100K-500K', 'PO_GroupNo'] = 4
podata.loc[podata.PO_Group == '500K-1M', 'PO_GroupNo'] = 5
podata.loc[podata.PO_Group == '>1M', 'PO_GroupNo'] = 6
def top10POyrcount(year):
x=[]
y=[]
gg1=podata[(podata['PO_year']==year)].groupby(['PO_year','PO_GroupNo','PO_Group'])['PO_year'].size()
x=gg1.index.get_level_values(2)
z=gg1.index.get_level_values(1)
y=gg1.values
df=pd.DataFrame({'z':z, 'PO Value':x,'Total Count':y})
df=df.sort_values('z')
df=df.drop('z',axis=1)
return df
df=top10POyrcount(SelectedYear)
return render_template('purchaseordermanagement2.html',say=1,sayy=1,uyear=uyear,data=df.values,SelectedYear=SelectedYear)
return render_template('purchaseordermanagement2.html',say=1,uyear=uyear)
@app.route('/pomtype3',methods=['POST','GET'])
def pomtype3():
uyear=sorted(podata['PO_year'].unique())
if request.method=='POST':
SelectedYear=int(request.form['name1'])
podata.loc[(podata.Inventoryreplenished >= 0) & (podata.Inventoryreplenished < 100), 'Inventory_Group'] = '0-100'
podata.loc[(podata.Inventoryreplenished >= 100) & (podata.Inventoryreplenished < 200), 'Inventory_Group'] = '100-200'
podata.loc[(podata.Inventoryreplenished >= 200) & (podata.Inventoryreplenished < 300), 'Inventory_Group'] = '200-300'
podata.loc[(podata.Inventoryreplenished >= 300) & (podata.Inventoryreplenished < 400), 'Inventory_Group'] = '300-400'
podata.loc[(podata.Inventoryreplenished >= 400) & (podata.Inventoryreplenished < 500), 'Inventory_Group'] = '400-500'
podata.loc[podata.Inventoryreplenished >= 500,'Inventory_Group'] = '>500'
def top10poinvyrcount(year):
x=[]
y=[]
gg1=podata[(podata['PO_year']==year)].groupby(['PO_year','Inventory_Group'])['Inventory_Group'].size()
x=gg1.index.get_level_values(1)
y=gg1.values
df=pd.DataFrame({'Inventory Value':x,'Total Count':y})
df=df.sort_values('Inventory Value')
return df
df=top10poinvyrcount(SelectedYear)
return render_template('purchaseordermanagement3.html',say=1,sayy=1,uyear=uyear,data=df.values,SelectedYear=SelectedYear)
return render_template('purchaseordermanagement3.html',say=1,uyear=uyear)
@app.route('/pomtype5',methods=['POST','GET'])
def pomtype5():
uyear=sorted(podata['PO_year'].unique())
if request.method=='POST':
SelectedYear=int(request.form['name1'])
podata['date_diff']=podata['Orderreceiveddate']-podata['POdate']
podata.loc[(podata.date_diff >= '15 days') & (podata.date_diff < '18 days'), 'date_diff_Group'] = '15-18'
podata.loc[(podata.date_diff >= '18 days') & (podata.date_diff < '21 days'), 'date_diff_Group'] = '18-20'
podata.loc[(podata.date_diff >= '21 days') & (podata.date_diff <= '23 days'), 'date_diff_Group'] = '20-23'
def topleadyear(year):
x=[]
y=[]
gg1=podata[(podata['PO_year']==year)].groupby(['PO_year','date_diff_Group'])['date_diff_Group'].size()
x=gg1.index.get_level_values(1)
y=gg1.values
df=pd.DataFrame({'Lead_Time':x,'Total Count':y})
return df
df=topleadyear(SelectedYear)
return render_template('purchaseordermanagement5.html',say=1,sayy=1,uyear=uyear,data=df.values,SelectedYear=SelectedYear)
return render_template('purchaseordermanagement5.html',say=1,uyear=uyear)
@app.route('/pomtype4',methods=['POST','GET'])
def pomtype4():
pocdata=podata.groupby('PO_year')['PO_year'].size()
year=pocdata.index.get_level_values(0)
count=pocdata.values.astype(int)
df=pd.DataFrame({'Year':year,'PO_Count':count})
return render_template('purchaseordermanagement4.html',data=df.values)
#Aggregate Planning
@app.route("/aggregate",methods = ['GET','POST'])
def aggregate():
if request.method== 'POST':
from_date=request.form['from']
to_date=request.form['to']
factory=request.form['typedf']
connection = pymysql.connect(host='localhost',
user='user',
password='',
db='test',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
x=connection.cursor()
x.execute("select * from `agggendata`")
connection.commit()
data=pd.DataFrame(x.fetchall())
fromdifftodata= data[(data['Month'] > from_date) & (data['Month'] < to_date )]
datas=fromdifftodata[fromdifftodata['Factory']==factory]
global forecastedplaniingdata
forecastedplaniingdata=pd.concat([datas['Month'],datas['Demand_Forecast']],axis=1)
dataforecast=pd.concat([datas['Month'],datas['Factory'],datas['Demand_Forecast']],axis=1)
return render_template('aggregatedataview.html',datafile=dataforecast.to_html(index=False),graphdata=datas.values)
return render_template('aggregate.html')
@app.route('/optimize',methods=["GET","POST"])
def optimize():
if request.method=="POST":
formDate_val=request.form['formDate']
ToDate_val=request.form['ToDate']
InitialWorkforce_val =request.form['InitialWorkforce']
InitialInventory_val=request.form['InitialInventory']
InitialStockouts_val=request.form['InitialStockouts']
LaborHours_val=request.form['LaborHours']
MaterialCost_val=request.form['MaterialCost']
InventoryHoldingCost_val=request.form['InventoryHoldingCost']
MarginalCostStockOut_val=request.form['MarginalCostStockOut']
HTCost_val=request.form['HTCost']
LayoffCost_val=request.form['LayoffCost']
RegularTimeCost_val=request.form['RegularTimeCost']
OverTimeCost_val=request.form['OverTimeCost']
CostSubcontracting_val=request.form['CostSubcontracting']
# =============================================================================
# #Wr = workforce size for Month t, t = 1, ... , 6
# #Rt = number of employees hired at the beginning of Month t, t = 1, ... , 6
# #Lr =number of employees laid off at the beginning of Month t, t = 1, ... , 6
# #Pt = number of units produced in Month t, t = 1, ... , 6
# #It = inventory at the end of Month t, t = 1, ... , 6
# #St = number of units stocked out/backlogged at the end of Month t, t = 1, ... , 6
# #Ct = number of units subcontracted for Month t, t = 1, ... , 6
# #Ot =number of overtime hours worked in Month t, t = 1, ... , 6
# =============================================================================
# Assign spreadsheet filename to `file`
forcast = forecastedplaniingdata[(forecastedplaniingdata['Month']>formDate_val) & (forecastedplaniingdata['Month']<ToDate_val )]
datas=pd.concat([forcast['Month'],forcast['Demand_Forecast']],axis=1).reset_index(drop=True)
dat=datas['Month'].astype(str)
dta=pd.concat([dat,datas['Demand_Forecast']],axis=1)
# Print the sheet names
Dem_forecast=dta
period = []
for x in range(len(dta)):
period.append(x)
Ini_Workforce=int(InitialWorkforce_val)
Ini_Inventory=int(InitialInventory_val)
Ini_Stock_Out=int(InitialStockouts_val)
#Regular-time labor cost
# RC=Parameters['Cost'][5]
RC=int(RegularTimeCost_val)
#Overtime labor cost
OC=int(OverTimeCost_val)
#Cost of hiring and layoffs
# HR=Parameters['Cost'][3]
HR=int(HTCost_val)
# LC=Parameters['Cost'][4]
LC=int(LayoffCost_val)
#Cost of holding inventory
# HC=Parameters['Cost'][1]
HC=int(InventoryHoldingCost_val)
#Cost of stocking out
# SC=Parameters['Cost'][2]
SC=int(MarginalCostStockOut_val)
#Cost of subcontracting
# SCC=Parameters['Cost'][7]
SCC=int(CostSubcontracting_val)
#Material cost
# MC=Parameters['Cost'][0]
MC=int(MaterialCost_val)
#Production Rate
kk=int(LaborHours_val)
PR=(1/kk)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Aggregate Planning problem",LpMinimize)
Workforce= pulp.LpVariable.dict("Workforce",(time for time in period),lowBound=0,cat='Integer')
Hired = pulp.LpVariable.dict("Hired",(time for time in period),lowBound=0,cat='Integer')
Laid_off = pulp.LpVariable.dict("Laid_off",(time for time in period),lowBound=0,cat='Integer')
Production = pulp.LpVariable.dict("Production",(time for time in period),lowBound=0,cat='Integer')
Inventory = pulp.LpVariable.dict("Inventory",(time for time in period),lowBound=0,cat='Integer')
Stock_Out = pulp.LpVariable.dict("Stock_Out",(time for time in period),lowBound=0,cat='Integer')
Subcontract = pulp.LpVariable.dict("Subcontract",(time for time in period),lowBound=0,cat='Integer')
Overtime_Hrs = pulp.LpVariable.dict("Overtime_Hrs",(time for time in period),lowBound=0,cat='Integer')
model += pulp.lpSum(
[RC * Workforce[time] for time in period]
+ [HR * Hired[time] for time in period]
+ [LC * Laid_off[time] for time in period]
+ [MC * Production[time] for time in period]
+ [HC * Inventory[time] for time in period]
+ [SC * Stock_Out[time] for time in period]
+ [SCC * Subcontract[time] for time in period]
+ [OC * Overtime_Hrs[time] for time in period]
)
for time in period:
if(time==0):
model += pulp.lpSum(Workforce[time]-Ini_Workforce-Hired[time]+Laid_off[time])==0
model += pulp.lpSum(Ini_Inventory+Production[time]+Subcontract[time]\
-Dem_forecast['Demand_Forecast'][time]-Ini_Stock_Out-Inventory[time]+Stock_Out[time])==0
else:
model += pulp.lpSum(Workforce[time]-Workforce[time-1]-Hired[time]+Laid_off[time])==0
model += pulp.lpSum(Inventory[time-1]+Production[time]+Subcontract[time]\
-Dem_forecast['Demand_Forecast'][time]-Stock_Out[time-1]-Inventory[time]+Stock_Out[time])==0
model += pulp.lpSum(Production[time]-40*Workforce[time]+(Overtime_Hrs[time]*PR))<=0
model += pulp.lpSum(Overtime_Hrs[time]-10*Workforce[time])<=0
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
#Storing Name and Values
Name=[]
values=[]
for v in model.variables():
Name.append(v.name)
values.append(v.varValue)
#counting no of hired
count=0
for k in range(0,len(Name)):
val=Name[k]
if val[0:5]=='Hired':
count=count+1
Name_df=pd.DataFrame(Name)
valdf=pd.DataFrame(values)
Namearray=pd.DataFrame(Name_df.values.reshape(count, int(len(Name)/count), order='F'))
Valuesarray=pd.DataFrame(valdf.values.reshape(count, int(len(Name)/count), order='F'))
kk=pd.DataFrame(Namearray.iloc[0])
kk.columns=['val']
Namesofcol = kk['val'].map(lambda x: x.lstrip('+-').rstrip('_0'))
Valuesarray.columns = [Namesofcol]
opt = pd.DataFrame(Valuesarray)
datasor = pd.concat([opt['Inventory'],opt['Stock_Out'],opt['Subcontract'],opt['Production'],opt['Hired'],opt['Laid_off'],opt['Workforce'],opt['Overtime_Hrs']],axis=1)
dd = pd.DataFrame(Dem_forecast)
dfss = | pd.concat([dd,datasor],axis=1) | pandas.concat |
#!/usr/bin/python
print('process_financials_q - initiating.')
import os
import pandas as pd
cwd = os.getcwd()
input_folder = "0_input"
temp_folder = "temp"
financials_temp = "financials_q"
from pathlib import Path
paths = Path(os.path.join(cwd,input_folder,temp_folder,financials_temp)).glob('**/*.csv')
financials_table = []
for path in paths:
path_in_str = str(path)
try:
fundamentals_parse = pd.read_csv(path,low_memory=False)
if not fundamentals_parse.empty:
financials_table.append(fundamentals_parse)
print(path_in_str)
else:
pass
except:
pass
# export
financials_table = | pd.concat(financials_table) | pandas.concat |
import os
import zipfile
import requests
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
def download_file(url, filename):
r = requests.get(url, stream=True)
total_size = int(r.headers.get("content-length", 0))
block_size = 1024 # 1 Kibibyte
t = tqdm(total=total_size, unit="iB", unit_scale=True)
with open(filename, "wb") as f:
for data in r.iter_content(block_size):
t.update(len(data))
f.write(data)
t.close()
def load_VSN_data():
data_dir = Path("./data") / "vehicle_sensor"
if not data_dir.exists():
data_dir.mkdir(parents=True)
subdirs = [f for f in data_dir.iterdir() if f.is_file()]
if not subdirs:
url = "http://www.ecs.umass.edu/~mduarte/images/event.zip"
zip_file = data_dir / "original_data.zip"
download_file(url, zip_file)
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(data_dir)
data_dir = data_dir / "events" / "runs"
x = []
y = []
task_index = []
for root, dir, file_names in os.walk(data_dir):
if "acoustic" not in root and "seismic" not in root:
x_tmp = []
for file_name in file_names:
if "feat" in file_name:
dt_tmp = pd.read_csv(
os.path.join(root, file_name),
sep=" ",
skipinitialspace=True,
header=None,
).values[:, :50]
x_tmp.append(dt_tmp)
if len(x_tmp) == 2:
x_tmp = np.concatenate(x_tmp, axis=1)
x.append(x_tmp)
task_index.append(
int(os.path.basename(root)[1:]) * np.ones(x_tmp.shape[0])
)
y.append(
int("aav" in os.path.basename(os.path.dirname(root)))
* np.ones(x_tmp.shape[0])
)
x = np.concatenate(x)
y = np.concatenate(y)
task_index = np.concatenate(task_index)
argsort = np.argsort(task_index)
x = x[argsort]
y = y[argsort]
task_index = task_index[argsort]
split_index = np.where(np.roll(task_index, 1) != task_index)[0][1:]
x = np.split(x, split_index)
y = np.split(y, split_index)
df = pd.DataFrame()
feature_cols = []
for i, (p_x, p_y) in enumerate(zip(x, y)):
p_df = pd.DataFrame(p_x)
feature_cols = p_df.columns.astype(str)
p_df["vehicle"] = str(i)
p_df["y"] = p_y
df = | pd.concat([df, p_df], axis=0) | pandas.concat |
from c0101_retrieve_ref import retrieve_ref
from c0102_timestamp import timestamp_source
from c0103_trim_record_to_max import trim_record_to_max
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0107_decide_inclusion import decide_inclusion
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
from c0110_find_temp_end import find_temp_end
from c0111_retrieve_analyzed import retrieve_analyzed
from c0112_plot_truncate import plot_truncate
from c0113_plot_acc import plot_acc
from c0202_machineLearningBasic import machineLearningBasic
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statistics
def statisticSegments():
"""
Calculate and save statistics from each record
"""
print("begin statistical calculation")
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
segment_list = retrieve_ref('segment_list')
analysis_type = 'truncate'
for study in study_list:
df_meta = retrieve_meta(study)
source_path = list(df_meta['source_path'])
dfStatistics = pd.DataFrame()
statistics_types = ['mean', 'median', 'pVariance', 'stdev' 'quan']
quan_types = [10, 20, 30, 40, 50, 60, 70, 80, 90]
for record in source_path:
dfStatistics['source_path'] = source_path
for sensor in sensor_list:
for segment in segment_list:
for statis in statistics_types:
colName = str(sensor + '_' + segment + '_' + statis )
if statis == 'quan':
for quanNum in quan_types:
colName = str(sensor + '_' + segment + '_' + statis + '_' + str(quanNum) )
dfStatistics[colName] = [None] * len(source_path)
analyzed_path = os.path.join(study, 'analyzed')
if not os.path.isdir(analyzed_path): os.mkdir(analyzed_path)
analyzed_path = os.path.join(study, 'analyzed', 'statistics')
if not os.path.isdir(analyzed_path): os.mkdir(analyzed_path)
analyzed_file = os.path.join(analyzed_path, 'statisticsSegments.csv')
print('analyzed_file = ' + str(analyzed_file))
dfStatistics.to_csv(analyzed_file)
# retrieve statistics file
df = | pd.read_csv(analyzed_file) | pandas.read_csv |
import os, sys
import argparse
import pandas as pd
import numpy as np
from models import AVAILABLE_MODELS
from server import cpapi
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from tqdm import tqdm
def get_data(repo_url, repo_data_directory, data_format='tgz', dataset_id=None):
clone_dir = '/tmp/emission_data'
print(f'Cloning repo {repo_url} to directory {clone_dir}...')
os.system(f'git clone {repo_url} {clone_dir}/')
if (data_format=='tgz'):
print(f'Unzipping with tar...')
os.system(f'for i in {clone_dir}/{repo_data_directory}/*.tgz; do tar -zxvf "$i" -C {clone_dir}/ ;done')
elif (data_format=='csv'):
os.system(f'for i in {clone_dir}/{repo_data_directory}/*.csv; do cp "$i" {clone_dir}/ ;done')
else:
raise ValueError('Source data format not recognized. Only tgz and csv supported.')
# Remove known garbage file in textile source data v. 1.0.0
garbage_file = f'{clone_dir}/._textile-v1.0.0-5.csv'
if (os.path.isfile(garbage_file)):
print(f'Removing garbage file {garbage_file}')
os.system(f'rm {garbage_file}')
content = sorted(filter(lambda x: x.endswith('.csv'), os.listdir(clone_dir)))
return pd.concat((pd.read_csv(f'{clone_dir}/{f}') for f in tqdm(content, desc="Reading csv")))
def get_data_from_dir(local_data_dir=None, dataset_id=None):
print(f'Using source data from local dir {local_data_dir}')
content = sorted(filter(lambda x: x.endswith('.csv'), os.listdir(local_data_dir)))
return pd.concat((pd.read_csv(f'{local_data_dir}/{f}') for f in tqdm(content, desc="Reading csv")))
def prepare_data(local_data=False, local_data_dir=None, repo_url=None, repo_data_directory=None, data_format='tgz', dataset_id=None, random_state=42):
X = None
print("Loading csv files, this may take a while...")
if (local_data):
X = get_data_from_dir(local_data_dir,dataset_id)
else:
X = get_data(repo_url, repo_data_directory, data_format, dataset_id)
X = X[~X['co2_total'].isna()]
y = X['co2_total'].copy()
X = X.drop('co2_total', axis=1)
print('Split to training and testing data')
return train_test_split(X, y, test_size=0.2, random_state=random_state)
def do_train(model_name, base_dir=None, local_data=False, local_data_dir=None,
repo_url='https://github.com/Compensate-Operations/emission-sample-data.git',
repo_data_directory='datasets/textile-v1.0.0',
data_format='tgz',
dataset_id=None,
random_state=42,
save_test=False):
if (base_dir == None):
base_dir = os.environ.get('MODEL_DIR', './')
X_train, X_test, y_train, y_test = prepare_data(local_data, local_data_dir, repo_url, repo_data_directory, data_format, dataset_id, random_state)
print('Data preparation complete. Starting training of model.')
model = AVAILABLE_MODELS[model_name]()
model.train(X_train, X_test, y_train, y_test, base_dir)
def do_eval(model_name, local_data=False, local_data_dir=None,
repo_url='https://github.com/Compensate-Operations/emission-sample-data.git',
repo_data_directory='datasets/textile-v1.0.0',
data_format='tgz',
dataset_id=None,
random_state=42,
save_test=False):
X_train, X_test, y_train, y_test = prepare_data(local_data, local_data_dir, repo_url, repo_data_directory, data_format, dataset_id, random_state)
preds = {}
print('Data preparation complete. Starting training and evaluation of model.')
model = AVAILABLE_MODELS[model_name]()
model.train(X_train, X_test, y_train, y_test, base_dir)
r2_score, rmse_score, y_pred = model.eval(X_test, y_test)
preds[model_name]=y_pred
if save_test:
pd.concat([X_test.reset_index(drop=True), y_test.reset_index(drop=True), | pd.DataFrame(preds) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import argparse
import time
import sys
kinetics_filename=sys.argv[1]
time_lag_file=sys.argv[2]
output_filename=sys.argv[3]
final_output_filename=sys.argv[4]
# print(final_output_filename)
df=pd.read_csv(kinetics_filename)
#dropping null values
df=df.dropna()
df['co_cited_year']=df['co_cited_year'].astype(int)
df['frequency']=df['frequency'].astype(int)
y_df=pd.read_csv(time_lag_file)
y_df=y_df[['cited_1','cited_2','cited_1_year','cited_2_year','first_co_cited_year']]
# print(df.head())
zero_df=pd.merge(df,y_df,on=['cited_1','cited_2'],how='inner')
#Faulty co-cited data should be eliminated here
zero_df=zero_df[zero_df['co_cited_year']>=zero_df['first_co_cited_year']]
zero_df['pfcy']=zero_df[['cited_1_year','cited_2_year']].max(axis=1)
zero_df=zero_df.drop(columns=['cited_1_year','cited_2_year'])
print('printing zero df')
print(zero_df)
#First write code to generate 0 rows of data
#Get all data where pfcy
#temp_df=zero_df.groupby(by=['cited_1','cited_2'],as_index=False)['first_co_cited_year','pfcy'].min()
temp_df=zero_df.groupby(by=['cited_1','cited_2'],as_index=False)['first_co_cited_year','co_cited_year','pfcy'].min()
# print(temp_df.head())
#print(temp_df[(temp_df['cited_1']==14949207) & (temp_df['cited_2']==17184389)])
#temp_df=temp_df[temp_df['pfcy'] < temp_df['first_co_cited_year']]
temp_df=temp_df[temp_df['pfcy'] < temp_df['co_cited_year']]
print('debug prints')
print(temp_df)
#temp_df.columns=['cited_1','cited_2','first_co_cited_year','pfcy']
print('debug prints')
print(temp_df)
temp_df['diff']=temp_df['co_cited_year']-temp_df['pfcy']
temp_df['cited_1']=temp_df['cited_1'].astype(int)
temp_df=temp_df.loc[temp_df.index.repeat(temp_df['diff'])]
temp_df['rank']=temp_df.groupby(by=['cited_1','cited_2'])['pfcy'].rank(method='first')
temp_df['rank']=temp_df['rank'].astype(int)
temp_df['rank']=temp_df['rank']-1
temp_df['co_cited_year']=temp_df['pfcy']+temp_df['rank']
temp_df['frequency']=0
temp_df=temp_df[['cited_1','cited_2','co_cited_year','frequency','first_co_cited_year']]
print('printing temp df')
print(temp_df)
# print(df[(df['cited_1']==4532) & (df['cited_2']==10882054)][['cited_1','cited_2','co_cited_year','frequency','first_co_cited_year']])
# tt=tt[['cited_1','cited_2','first_co_cited_year','','']]
#Merge df with y_df so that it gets first_co_cited_year column
print('length of original df',len(df))
df=pd.merge(df,y_df[['cited_1','cited_2','first_co_cited_year']],on=['cited_1','cited_2'],how='inner')
print(df.head())
print('length of original df',len(df))
#Faulty co-cited data should be eliminated here as well
print('lenght before eliminating wrong data',len(df))
df=df[df['co_cited_year']>=df['first_co_cited_year']]
print('lenght after eliminating wrong data',len(df))
print('Total data points',len(df))
final_df=df.append(temp_df).sort_values(by=['cited_1','cited_2','co_cited_year'])
print('Total data points',len(final_df))
# print(final_df[(final_df['cited_1']==4532) & (final_df['cited_2']==10882054)])
final_df=final_df.copy()
final_df.reset_index(inplace=True,drop=True)
# print(final_df[(final_df['cited_1']==4532) & (final_df['cited_2']==10882054)])
final_df['cited_1']=final_df['cited_1'].astype(int)
final_df['co_cited_year']=final_df['co_cited_year'].astype(int)
#Now generating peak_frequency,first_peak_year,min_frequency
temp_df=final_df.groupby(by=['cited_1','cited_2'],as_index=False)['frequency'].max()
print(temp_df.head())
temp_df=pd.merge(final_df,temp_df,on=['cited_1','cited_2','frequency'],how='inner')
temp_df=temp_df.groupby(by=['cited_1','cited_2'],as_index=False)['frequency','co_cited_year'].min()
print(temp_df.head())
temp_df.columns=['cited_1','cited_2','peak_frequency','first_peak_year']
print(temp_df.head())
final_df=pd.merge(final_df,temp_df,on=['cited_1','cited_2'],how='inner')
print('Size',len(final_df))
# final_df=pd.merge(final_df,temp_df,on=['cited_1','cited_2'],how='inner')
# print('Size',len(final_df))
final_df=final_df[final_df['co_cited_year'] <= final_df['first_peak_year']]
# expected size for bin 221381
print('Size after filtering from peak year',len(final_df))
final_df['co_cited_year_ranks']=final_df.groupby(by=['cited_1','cited_2'])['co_cited_year'].rank(method='first')
temp_df=final_df[final_df['co_cited_year_ranks']==1][['cited_1','cited_2','frequency']]
temp_df.columns=['cited_1','cited_2','min_frequency']
temp_df=temp_df.drop_duplicates()
final_df=final_df.drop(columns=['co_cited_year_ranks'])
print('Adding min frequency')
final_df= | pd.merge(final_df,temp_df,on=['cited_1','cited_2'],how='inner') | pandas.merge |
import numpy as np
import pandas as pd
#importing datetime libarary
import datetime
from datetime import date, timedelta
#
from collections import defaultdict
df = pd.read_excel('googling_filtered.xlsx')
df['Date'] = [datetime.datetime.strptime(x ,'%b %d, %Y') for x in df['Date']]
df['Date'] = [x.to_pydatetime().date() for x in df['Date']]
#searching datas within 30 days
end_date = date.today()
start_date = end_date - datetime.timedelta(days=30)
def risk_cal(df = df, startdate = start_date, enddate = end_date):
# Search the data recorded within 30days in a speicific country
mask = (df['Date'] >= startdate) & (df['Date'] <= enddate)
df = df[mask]
#make an empty dictionary
factor_dict = defaultdict(list)
for time in df['Date'].unique():
frequency_factor = len(df[df['Date'] == time])
time_factor = enddate - time
time_factor = time_factor / timedelta (days=1)
time_factor = 1 / ( time_factor + 1 )
factor_dict['Date'].append(time)
factor_dict['factor'].append(frequency_factor * time_factor)
factor_frame = pd.DataFrame(factor_dict)
if factor_frame.empty:
return None
risk = factor_frame['factor'].sum() * 100
return risk
Risk_Score = defaultdict(list)
while start_date <= end_date:
dates = start_date #.strftime("%Y-%m-%d")
print(dates , risk_cal(df = df, startdate = start_date, enddate = end_date))
Risk_Score['Date'].append(dates)
Risk_Score['Risk_Score'].append(risk_cal(df = df, enddate = dates))
start_date += datetime.timedelta(days=1)
Risk_Score = | pd.DataFrame(Risk_Score) | pandas.DataFrame |
import gzip
import pickle
from os import path
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from openTSNE import nearest_neighbors
from openTSNE import utils
with utils.Timer("Loading data...", verbose=True):
with gzip.open(path.join("data", "macosko_2015.pkl.gz"), "rb") as f:
data = pickle.load(f)
x = data["pca_50"]
y, cluster_ids = data["CellType1"], data["CellType2"]
results = []
n_reps = 5
for sample_size in range(1000, 8_001, 1000):
print("Sample size:", sample_size)
indices = np.random.choice(range(x.shape[0]), size=sample_size)
sample = x[indices]
for i in range(n_reps):
start = time()
nn = nearest_neighbors.BallTree(metric="euclidean", n_jobs=1)
nn.build(sample, k=15)
results.append(("Ball Tree (1 core)", sample_size, time() - start))
for i in range(n_reps):
start = time()
nn = nearest_neighbors.Annoy(metric="euclidean", n_jobs=1)
nn.build(sample, k=15)
results.append(("Annoy (1 core)", sample_size, time() - start))
for i in range(n_reps):
start = time()
nn = nearest_neighbors.BallTree(metric="euclidean", n_jobs=4)
nn.build(sample, k=15)
results.append(("Ball Tree (4 cores)", sample_size, time() - start))
for i in range(n_reps):
start = time()
nn = nearest_neighbors.Annoy(metric="euclidean", n_jobs=4)
nn.build(sample, k=15)
results.append(("Annoy (4 cores)", sample_size, time() - start))
df = | pd.DataFrame(results, columns=["method", "size", "time"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[47]:
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras import optimizers
from keras.layers import Dense
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from pandas import ExcelFile
from pandas import ExcelWriter
from scipy import ndimage
from scipy.stats import randint as sp_randint
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn import datasets
from sklearn import metrics
from sklearn import pipeline
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
from tensorflow.python.framework import ops
import keras
import matplotlib.pyplot as plt
import numpy as np
import openpyxl
import pandas as pd
import scipy
import tensorflow as tf
import xlsxwriter
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
import tensorflow as tf
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
import keras.backend as K
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
import tensorflow as tf
import keras
from keras import backend as K
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
import pandas as pd
import numpy as np
import os
from matplotlib import pyplot as plt
import keras
from sklearn.utils import shuffle
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D, Input, GlobalMaxPooling2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
from sklearn.utils import shuffle
from multiprocessing import freeze_support
from sklearn import preprocessing
from rdkit import Chem
from mordred import Calculator, descriptors
from padelpy import from_smiles
from padelpy import padeldescriptor
# In[2]:
from padelpy import from_smiles
# In[48]:
train_data = pd.read_csv('train.csv')
# In[49]:
test_data = pd.read_csv('test.csv')
# In[50]:
X_train_smiles=np.array(train_data ['smiles'])
# In[51]:
X_test_smiles=np.array(test_data ['smiles'])
# In[52]:
print(X_train_smiles.shape)
print(X_test_smiles.shape)
# In[53]:
print(X_train_smiles.shape[0])
# In[54]:
Y_train=np.array(train_data ['ACTIVITY'])
# In[55]:
Y_test=np.array(test_data ['ACTIVITY'])
# In[56]:
#full_data = pd.read_csv('data.csv')
# In[57]:
charset = set("".join(list(train_data.smiles))+"!E")
char_to_int = dict((c,i) for i,c in enumerate(charset))
int_to_char = dict((i,c) for i,c in enumerate(charset))
embed = max([len(smile) for smile in train_data.smiles]) + 5
print (str(charset))
print(len(charset), embed)
# In[58]:
char_to_int
# In[59]:
def vectorize(smiles):
one_hot = np.zeros((smiles.shape[0], embed , len(charset)),dtype=np.int8)
for i,smile in enumerate(smiles):
#encode the startchar
one_hot[i,0,char_to_int["!"]] = 1
#encode the rest of the chars
for j,c in enumerate(smile):
one_hot[i,j+1,char_to_int[c]] = 1
#Encode endchar
one_hot[i,len(smile)+1:,char_to_int["E"]] = 1
#Return two, one for input and the other for output
return one_hot[:,0:-1,:], one_hot[:,1:,:]
# In[60]:
X_train, _ = vectorize(X_train_smiles)
X_test, _ = vectorize(X_test_smiles)
# In[61]:
X_train[8].shape
# In[62]:
vocab_size=len(charset)
# In[ ]:
# In[ ]:
# In[63]:
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
# In[64]:
print(np.shape(np.argmax(X_train, axis=2)))
# In[65]:
print(np.shape(X_train))
# In[66]:
print(np.shape(X_test))
# In[67]:
dataX_train=np.argmax(X_train, axis=2)
dataX_test=np.argmax(X_test, axis=2)
dataY_train=Y_train
dataY_test=Y_test
# In[68]:
print('dataX_test Shape: '+str(np.shape(dataX_test)))
print('dataY_test Shape: '+str(np.shape(dataY_test)))
# In[69]:
print('dataX_train Shape: '+str(np.shape(dataX_train)))
print('dataY_train Shape: '+str(np.shape(dataY_train)))
# In[70]:
data_y_train = (np.array(dataY_train, dtype=np.float32)).reshape(dataY_train.shape[0],1)
# In[71]:
print('data_y_train Shape: '+str(np.shape(data_y_train)))
# In[72]:
data_y_test = (np.array(dataY_test, dtype=np.float32)).reshape(dataY_test.shape[0],1)
# In[73]:
print('data_y_test Shape: '+str(np.shape(data_y_test)))
# In[74]:
data_x_test=dataX_test
# In[75]:
data_x_train=dataX_train
# In[76]:
print(np.shape(data_x_train))
# In[77]:
Max_len=data_x_train.shape[1]
# In[78]:
X = tf.placeholder(tf.float32, [None, Max_len])
Y = tf.placeholder(tf.float64, [None, 1])
# In[79]:
py_x =keras.layers.Embedding(1025, 400, input_length=Max_len)(X)
# In[80]:
py_x=keras.layers.Conv1D(192,10,activation='relu')(py_x)
py_x=keras.layers.BatchNormalization()(py_x)
py_x=keras.layers.Conv1D(192,5,activation='relu')(py_x)
py_x=keras.layers.Conv1D(192,3,activation='relu')(py_x)
py_x=keras.layers.Flatten()(py_x)
# In[81]:
py_x1_keras = keras.layers.Dense(100, activation='relu')(py_x)
py_x1_keras = keras.layers.Dropout(0.7)(py_x1_keras)
# In[82]:
py_x1 = keras.layers.Dense(1, activation='linear')(py_x1_keras)
# In[83]:
cost1 = tf.losses.mean_squared_error(labels=Y, predictions=py_x1)
# In[84]:
train_op1 = tf.train.AdamOptimizer(learning_rate = 5e-6).minimize(cost1)
# In[85]:
prediction_error1 = tf.sqrt(cost1)
# In[86]:
import tensorflow as tf
# In[ ]:
# In[87]:
batch_size = 32
# In[88]:
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
SAVER_DIR = "model_ld50"
saver = tf.train.Saver()
ckpt_path = os.path.join(SAVER_DIR, "model_ld50")
ckpt = tf.train.get_checkpoint_state(SAVER_DIR)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
best_rmse = 10
best_idx = 0
LD50_R2_train = []
#LD50_R2_valid = []
LD50_R2_test = []
LD50_RMSE_train = []
#LD50_RMSE_valid = []
LD50_RMSE_test = []
LD50_MAE_train = []
#LD50_MAE_valid = []
LD50_MAE_test = []
steps=[]
for i in range(5000):
steps.append(i)
training_batch = zip(range(0, len(data_x_train), batch_size),
range(batch_size, len(data_x_train)+1, batch_size))
#for start, end in tqdm.tqdm(training_batch):
for start, end in training_batch:
sess.run(train_op1, feed_dict={X: data_x_train[start:end], Y: data_y_train[start:end]})
merr_train_1 = sess.run(prediction_error1, feed_dict={X: data_x_train, Y: data_y_train})
print('Epoch Number: '+str(i))
print('RMSE_Train: '+str(merr_train_1))
LD50_RMSE_train.append(merr_train_1)
train_preds1 = sess.run(py_x1, feed_dict={X: data_x_train})
train_r1 = r2_score(data_y_train, train_preds1)
train_mae = mean_absolute_error(data_y_train, train_preds1)
print('R^2_Train: '+str(train_r1))
LD50_R2_train.append(train_r1)
print('MAE_Train: '+str(train_mae))
LD50_MAE_train.append(train_mae)
print(" ")
merr_test_1 = sess.run(prediction_error1, feed_dict={X: data_x_test, Y: data_y_test})
print('Epoch Number: '+str(i))
print('RMSE_test: '+str(merr_test_1))
LD50_RMSE_test.append(merr_test_1)
test_preds1 = sess.run(py_x1, feed_dict={X: data_x_test})
test_r1 = r2_score(data_y_test, test_preds1)
test_mae = mean_absolute_error(data_y_test, test_preds1)
print('R^2_test: '+str(test_r1))
LD50_R2_test.append(test_r1)
print('MAE_test: '+str(test_mae))
LD50_MAE_test.append(test_mae)
print(" ")
if best_rmse > merr_test_1:
best_idx = i
best_rmse = merr_test_1
save_path = saver.save(sess, ckpt_path)
print('model saved!')
print("###########################################################################")
# In[89]:
####################################################################
#=========================== test part ============================#
####################################################################
saver = tf.train.Saver()
ckpt_path = os.path.join(SAVER_DIR, "model")
ckpt = tf.train.get_checkpoint_state(SAVER_DIR)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, ckpt.model_checkpoint_path)
print("model loaded successfully!")
test_rmse = sess.run(prediction_error1, feed_dict={X: data_x_test, Y: data_y_test})
print('RMSE of the test after loading the best model: '+str(test_rmse))
test_preds = sess.run(py_x1, feed_dict={X: data_x_test})
test_r = r2_score(data_y_test, test_preds1)
test_mae = mean_absolute_error(data_y_test, test_preds1)
print('R^2_test after loading the best model: '+str(test_r))
print('MAE_test after loading the best model: '+str(test_mae))
print(test_preds)
test_preds = pd.DataFrame(test_preds)
print(test_preds)
test_preds.columns = ['C1DS_out']
writer = | pd.ExcelWriter('test_preds_C1DS.xlsx',engine='xlsxwriter') | pandas.ExcelWriter |
import os, sys
import numpy as np
import pandas as pd
import subprocess
import glob
import json
import csv
import pickle
from Bio.Seq import Seq
from itertools import product
#--------------------------------------------------------------------------------------------
def codon_translationrates_indprofiles(data_scikit, data_mm, codon_seq):
"""
extracts distributions of codon translation rates from scikit ribo data
scikit_data: dictionary of scikit_data per gene with profiles of each experiment
codon_sequence: dictionary of mRNA sequence in codons
"""
trim = 20 # omit first and last 20 codons
codon_duplets = [(x+y) for x in codons_nonstop for y in codons_nonstop]
ALL_TR = []
ALL2_TR = []
TR_df = pd.DataFrame(columns=codons_nonstop)
TR2_df = pd.DataFrame(columns=codon_duplets)
TR2_raw_df = pd.DataFrame(columns=['codonpair', 'class', 'RD'])
codon_counts = np.zeros(( 20, len(codons_nonstop) ))
codon_duplet_counts = np.zeros(( 20, len(codon_duplets) ))
list_orfs = list( data_scikit.keys() )
counter_cp = 0
coverage = np.zeros(( 20 ))
for experiment in range( 20 ):
codon_TR = [[] for i in range( len(codons_nonstop) )]
codon2_TR = [[] for i in range( len(codon_duplets) )]
coverage = []
print("Analyzing experiment", experiment)
for ix, orf in enumerate(list_orfs):
current_data = data_scikit[orf]
current_mm = data_mm[orf]
current_seq = np.array( codon_seq[orf] )
print( np.sum( current_data[:,current_mm], 1)/np.shape(current_data[:,current_mm])[1] )
#print( np.mean(current_data[current_mm]) )
if current_data.shape[1] == len(current_mm):
for pos in range(trim, len(current_seq) - (trim+2) ):
if current_mm[pos]:
current_codon = current_seq[pos]
current_codon_idx = codons_nonstop.index(current_codon)
current_TR = current_data[experiment,pos]
codon_TR[current_codon_idx].append(current_TR)
codon_counts[experiment, current_codon_idx] += 1
if current_mm[pos] and current_mm[pos+1]:
current_codon_pair = current_seq[pos] + current_seq[pos+1]
current_codon_pair_idx = codon_duplets.index(current_codon_pair)
current_pair_TR = (float(current_data[experiment, pos]) + float(current_data[experiment, pos+1]) )/2.
codon2_TR[current_codon_pair_idx].append(current_pair_TR)
codon_duplet_counts[experiment, current_codon_pair_idx] += 1
if current_codon_pair in good_inhibitory:
TR2_raw_df.loc[len(TR2_raw_df)] = (current_codon_pair, 'ind', current_pair_TR)
else:
counter_cp += 1
if counter_cp % 10 == 0: # thin out 1 in 10 to reduce file size -> thin out more for faster run time!
TR2_raw_df.loc[len(TR2_raw_df)] = ('non', 'ind', current_pair_TR)
TR_mean = [ np.around( np.mean(np.array(codon_TR[x])), 5) for x in range( len(codons_nonstop) ) ]
TR_median = [ np.around( np.median(np.array(codon_TR[x])), 5) for x in range( len(codons_nonstop) ) ]
TR_df.loc[experiment] = TR_mean
TR2_mean = [ np.around( np.mean(np.array(codon2_TR[x])), 3) for x in range( len(codon_duplets) ) ]
TR2_df.loc[experiment] = TR2_mean
TR_df.to_csv("../data/figures/figure2/codon_rates.txt", header=True, index=False, sep='\t')
TR2_df.to_csv("../data/figures/figure2/codon_duplets_rates.txt", header=True, index=False, sep='\t')
TR2_raw_df.to_csv("../data/figures/figure2/codonpair_rates_raw10_ind.txt", header=True, index=False, sep='\t')
np.savetxt("../data/figures/figure2/codon_counts.txt", codon_counts, fmt='%i')
np.savetxt("../data/figures/figure2/codon_duplet_counts.txt", codon_duplet_counts, fmt='%i')
return TR_df
#--------------------------------------------------------------------------------------------
def codon_translationrates_consensus(data_consensus, data_mean, data_mm, codon_seq):
"""
extracts distributions of codon translation rates from scikit ribo data
scikit_data: dictionary of scikit_data per gene with profiles of each experiment
codon_sequence: dictionary of mRNA sequence in codons
"""
trim = 20 # omit first and last 20 codons
list_orfs = list( data_consensus.keys() )
codon_TR = [[] for i in range( len(codons_nonstop) )]
codon_TR_naive = [[] for i in range( len(codons_nonstop) )]
codon_duplets = [(x+y) for x in codons_nonstop for y in codons_nonstop]
codon2_TR = [[] for i in range( len(codon_duplets) )]
codon2_TR_naive = [[] for i in range( len(codon_duplets) )]
TR2_cons_df = | pd.DataFrame(columns=['codonpair', 'class', 'RD']) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
| pd.DataFrame({"l": [10]}) | pandas.DataFrame |
####################################################################
# EXPLORATORY DATA ANALYSIS & DATA CLEANING of Hepatitis Data
# Author: <NAME> (<EMAIL>)
# Date: 20th April, 2021
####################################################################
#######################
## Import Libraries ##
#######################
# The iconic trio
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams, font_manager
# Set plots style
plt.style.use('fivethirtyeight')
# Import seaborn for style and beauty
import seaborn as sns
# Set context
sns.set_context('paper')
# custom
from custom import helper
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
##############################
## Load Dataset ##
##############################
raw_data = pd.read_csv("data/hepatitis.data")
print('Data Loaded Successfully!')
#############################################################
## Name the Column Heads of the Dataset Appropriately ##
#############################################################
column_heads = [
"Class",
"AGE",
"SEX",
"STEROID",
"ANTIVIRALS",
"FATIGUE",
"MALAISE",
"ANOREXIA",
"LIVER BIG",
"LIVER FIRM",
"SPLEEN PALPABLE",
"SPIDERS",
"ASCITES",
"VARICES",
"BILIRUBIN",
"ALK PHOSPHATE",
"SGOT",
"ALBUMIN",
"PROTIME",
"HISTOLOGY"
]
print(f'Total number of columns: {len(column_heads)}')
## Assign column head names to the dataset
raw_data.columns = column_heads
## Convert column head names to snakecase ##
raw_data.columns = raw_data.columns.str.lower().str.replace(' ', '_')
## Create a Copy of the Dataset
df = raw_data.copy()
## Create Folders to keep figures and tables
helper.create_folder('./csv_tables/')
helper.create_folder('./figures/')
####################################
## Treat Missing Values ##
####################################
### Missing Attribute Values: (indicated by "`?`")
# Replace `?` with `NaNs`
df.replace('?', np.nan, inplace=True)
# Get missing values
missing_values = helper.missing_data(df)
missing_values.to_csv("csv_tables/missing_values.csv", index=True)
print('Missing Values Info Saved Successfully!')
### Check Total Number of Missing Values
total_number_of_misssing_values = missing_values.loc['Total', :].sum()
print(f'Total number of missng values: {total_number_of_misssing_values}')
### Get Column Heads with Missing Values
columns_with_missing_values = list(missing_values.columns[missing_values.loc['Total', :] > 0])
### Get the Median Value of Columns with Missing Values
median_values = df[columns_with_missing_values].median()
### Replace Missing Values with Median Values
df.fillna(value=median_values, inplace=True)
print('Missing Values Treated!')
###############################################
## Get Column Names and their Data Types ##
###############################################
dataset_columns = pd.DataFrame({'column_names':list(df.columns)})
data_types = []
for column in df.columns:
dtype = str(df[column].dtypes)
data_types.append(dtype)
dataset_columns['data_type'] = data_types
dataset_columns.to_csv("csv_tables/column_heads_of_dataset.csv", index=True)
###############################################################
## Treat Datatypes of the Column Heads ##
###############################################################
### Convert Columns with Integer Values to the `int` type
# Get all Columns of type `object`
object_columns_to_convert_to_ints = df.columns[df.dtypes == 'object']
# Columns to Omit
columns_to_omit = ['bilirubin', 'albumin']
#### Drop Columns to omit from the list
object_columns_to_convert_to_ints = object_columns_to_convert_to_ints.drop(columns_to_omit)
#### Convert Columns with `Integer` Values to the `int` type
df[object_columns_to_convert_to_ints] = df[object_columns_to_convert_to_ints].astype(int)
#### Convert Columns with `Float` Values to the `float` type
object_columns_to_convert_to_floats = ['bilirubin', 'albumin']
df[object_columns_to_convert_to_floats] = df[object_columns_to_convert_to_floats].astype(float)
###############################################
## Check Duplicated Values ##
###############################################
print(f'The number of Data Obersvation is: {len(df)}')
total_number_of_duplicated_values = df.duplicated().sum()
print(f'Total number of duplicated values: {total_number_of_duplicated_values}')
##########################################################
## Create Another copy of the Dataset ##
##########################################################
treated_df = df.copy()
#######################################################
## Transform Categorical Columns ##
#######################################################
# Convert the "class" column head to object type
treated_df['class'].replace(
{
1: 'Die',
2: 'Live',
}, inplace=True
)
# Convert the "sex" column head to object type
treated_df['sex'].replace(
{
1: 'Male',
2: 'Female',
}, inplace=True
)
# Columns with binary ("yes" and "no") values
yes_no_columns = [
'steroid',
'antivirals',
'fatigue',
'malaise',
'anorexia',
'liver_big',
'liver_firm',
'spleen_palpable',
'spiders',
'ascites',
'varices',
'histology',
]
# Convert binary column heads to object type
for column in yes_no_columns:
treated_df[column].replace(
{
1: 'No',
2: 'Yes',
}, inplace=True
)
###################################################################
## Get Statistical Summary of Full Dataset ##
###################################################################
data_statistical_summary = df.describe(include='all')
data_statistical_summary.to_csv("csv_tables/data_statistical_summary.csv", index=True)
####################################################################
## Statistical Summary of Categorical Features ##
####################################################################
statistical_summary_of_categorical_columns = treated_df.describe(include=[object])
statistical_summary_of_categorical_columns.to_csv("csv_tables/statistical_summary_of_categorical_columns.csv", index=True)
##################################################################
## Statistical Summary of Numerical Features ##
##################################################################
statistical_summary_of_numerical_columns = treated_df.describe(include=[np.number])
statistical_summary_of_numerical_columns.to_csv("csv_tables/statistical_summary_of_numerical_columns.csv", index=True)
####################################################################
## Summary of Individual Categorical Columns ##
####################################################################
categorical_columns = treated_df.select_dtypes(np.object).columns.values.tolist()
print('Saving column(s) summary')
for column in categorical_columns:
summary_df = treated_df[column].value_counts().reset_index()
summary_df.columns = [column, 'frequency']
percentage = (treated_df[column].value_counts() / treated_df[column].count() * 100).values.tolist()
summary_df['percentage'] = percentage
total_df = pd.DataFrame(summary_df.sum(axis=0).to_dict(), index=[0])
total_df.loc[0, column] = 'Total'
final_summary_df = pd.concat([summary_df, total_df], axis=0, ignore_index=True)
final_summary_df.to_csv(f"csv_tables/summary_table_of_{column}.csv", index=False)
print('*' * 10)
###################################################
## Statistical Summary Per Gender ##
###################################################
satistical_summary_per_gender = treated_df.groupby('sex').describe(include='all')
satistical_summary_per_gender = satistical_summary_per_gender.T
satistical_summary_per_gender.to_csv("csv_tables/satistical_summary_per_gender.csv", index=True)
#################################################################################
## Statistical Summary of Numerical Features per Gender ##
#################################################################################
satistical_summary_of_numerical_columns_per_gender = treated_df.groupby('sex').describe(include=[np.number])
satistical_summary_of_numerical_columns_per_gender = satistical_summary_of_numerical_columns_per_gender.T
satistical_summary_of_numerical_columns_per_gender.to_csv("csv_tables/satistical_summary_of_numerical_columns_per_gender.csv", index=True)
###################################################################################
## Statistical Summary of Categorical Features per Gender ##
###################################################################################
satistical_summary_of_categorical_columns_per_gender = treated_df.groupby('sex').describe(include=[object])
satistical_summary_of_categorical_columns_per_gender = satistical_summary_of_categorical_columns_per_gender.T
satistical_summary_of_categorical_columns_per_gender.to_csv("csv_tables/satistical_summary_of_categorical_columns_per_gender.csv", index=True)
print('All Statistical Summary Info has been Saved Successfully!')
#################################################################################################
## Replace `1s` and `2s` in the Categorical Columns with `0s` and `1s` ##
#################################################################################################
cols = [
'class',
'sex',
'steroid',
'antivirals',
'fatigue',
'malaise',
'anorexia',
'liver_big',
'liver_firm',
'spleen_palpable',
'spiders',
'ascites',
'varices',
'histology',
]
for col in cols:
df[col].replace(
{
1:0,
2:1,
}, inplace=True
)
####################################
## Check Outlier Info ##
####################################
columns_to_check_for_outliers = ['age', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime']
outliers = helper.outlier_info(df[columns_to_check_for_outliers])
outliers.to_csv("csv_tables/outlier_info.csv", index=True)
### Check Total Number of Outliers
total_number_of_outliers = outliers.loc['Number of Outliers', :].sum()
print(f'Total number of outliers is: {total_number_of_outliers}')
#########################
## Detect Outliers ##
#########################
for i, column in enumerate(df[columns_to_check_for_outliers]):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 8), dpi=300, clear=False)
df[column].hist(bins=10, ax=ax1)
ax = sns.boxplot(x=column, data=df, ax=ax2, color='deepskyblue')
ax = sns.stripplot(x=column, data=df, color="maroon", jitter=0.2, size=4.5)
ax1.set_title('Distribution of ' + column, fontsize=22)
ax2.set_title('Boxplot of ' + column, fontsize=22)
plt.setp(ax1.get_xticklabels(), fontsize=15)
plt.setp(ax1.get_yticklabels(), fontsize=15)
plt.setp(ax2.get_xticklabels(), fontsize=15)
ax2.set_xlabel(ax2.get_xlabel(), fontsize=18)
plt.grid(b=True, axis='both', color='white', linewidth=0.5)
fig.tight_layout()
plt.savefig(f"figures/Outlier{i}.png", dpi=600, transparent=True)
print('Outlier Info Has Been Saved Successfully!')
##################################################
## Correlation of Dataset Features ##
##################################################
# Get Correlation betwwen target variable and data features
correlation_with_target_variable = df.select_dtypes(np.number).corr()['class'].sort_values(ascending=False)
correlation_with_target_variable = | pd.DataFrame(correlation_with_target_variable) | pandas.DataFrame |
import pandas as pd
weekdays = {0: 'monday', 1: 'tuesday', 2: 'wednesday', 3: 'thursday', 4: 'friday', 5: 'saturday', 6: 'sunday'}
periods = {'days':7, 'hours': 168, 'minutes': 10080}
week_indices = {'days': [f'{weekdays[x]}' for x in range(7)],
'hours': [f'{weekdays[x//24]}-{str(x%24).zfill(2)}:00' for x in range(168)],
'minutes': [f'{weekdays[x//1440]}-{str(x//60).zfill(2)}:{str(x%60).zfill(2)}' for x in range(10080)]}
def timeseries_to_week_lists(timeseries: pd.Series, resampled_at: str = 'hours'):
parsed_periods = periods[resampled_at]
if not isinstance(timeseries, pd.Series):
raise ValueError('Only functions on a pandas Series object.')
if resampled_at not in periods.keys():
raise ValueError('Does not support that ')
weeks = []
# Auto-add the first-item to our current week...
# so that we avoid any issues with a week starting exactly at sunday-midnight
current_week = [timeseries[0]]
# Iterate through the remaining periods and add them into our nested lists as appropriate
for index_date, signal in timeseries[1:].iteritems():
if index_date.weekday() == 0 and index_date.hour == 0 and index_date.minute == 0:
# Found the end of one week make sure it was a complete week
while len(current_week) < parsed_periods:
current_week.insert(0, None)
# add that completed list to our master list-of-lists
weeks.append(current_week)
# start the new week off
current_week = [signal]
else:
current_week.append(signal)
# Make sure we capture anything left of the final week...
while len(current_week) < parsed_periods:
current_week.append(None)
# add that final week into the master
weeks.append(current_week)
return weeks
def split_weeks(timeseries: pd.Series, resampled_at: str = 'hours') -> pd.DataFrame:
week_lists = timeseries_to_week_lists(timeseries, resampled_at)
final_index = week_indices[resampled_at]
return pd.DataFrame({f'week_{x}': row_data for x, row_data in enumerate(week_lists)}, index=final_index)
def split_overlapping_weeks(timeseries: pd.Series, additional_periods=12, resampled_at: str = 'hours') -> pd.DataFrame:
weeks = timeseries_to_week_lists(timeseries, resampled_at)
final_index = week_indices[resampled_at]
final_index = [str('-') + x for x in final_index[-additional_periods:]] + final_index + \
[str('+') + x for x in final_index[:additional_periods]]
# empty dataframe for the final result
result_df = | pd.DataFrame(index=final_index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.externals import joblib
import os
def log_loss(predictions,actual,eps=1e-15):
'''take an array of prediction probabilities (clipped to avoid undefined values) and measures accuracy while
also factoring for confidence'''
#assert (max(predictions)<=1 and min(predictions)>=0), 'Please make sure to use predict_proba'
p_clipped = np.clip(predictions,eps,1-eps)
loss = -1 * np.mean((actual * np.log(p_clipped)) + ((1-actual) * np.log(1-p_clipped)))
return loss
def sigmoid(array):
sig = 1 / (1 + np.exp(-array))
return sig
class BinaryClassifier:
def __init__(self,regularization=None):
'''initializing the object with the option to select regularization
Regularization will be a dict with type (ridge/lasso) and lambda value'''
if regularization is None:
self.penalty_type = None
self.penalty_lambda_ = 0
else:
self.penalty_type = list(regularization.keys())[0]
self.penalty_lambda_ = regularization.get(self.penalty_type)
def _gradient_descent(self, X, y, lr=.1, pandas=False, full_history=False, weights=None, early_stopping=True):
if pandas or (isinstance(X,pd.DataFrame) & isinstance(y,pd.DataFrame)):
X = X.values
y = y.values
Xnames = X.columns
ynames = y.columns
else:
X = X
y = y
Xnames = [i for i in range(X.shape[1])]
'''learning rate for gradient descent algorithim'''
self.lr = lr
m = len(X)
n_features = X.shape[1]
'''creating the weights, which will typically be all zeros'''
if weights is None:
self.init_weights = np.zeros(n_features)
else:
self.init_weights = weights
if self.penalty_type is 'lasso':
reg_loss = (self.penalty_lambda_/m)
reg_gradient = (-2*self.penalty_lambda_/m)
elif self.penalty_type is 'ridge':
reg_loss = (self.penalty_lambda_/2)
reg_gradient = (-2*self.penalty_lambda_/m)
else:
reg_loss = 0
reg_gradient = 0
weights_list = []
scores_list = []
weights = self.init_weights
for i in range(5000):
if self.penalty_type is 'ridge':
gradient_suffix = reg_gradient * weights
loss_suffix = np.sum(reg_loss * np.square(weights)/m)
elif self.penalty_type is 'lasso':
gradient_suffix = reg_gradient * np.where(weights==0,0,np.where(weights>0,1,-1))
loss_suffix = np.sum(reg_loss * np.abs(weights)/m)
else:
gradient_suffix = 0
loss_suffix = 0
lr = self.lr
'''p = prediction probabilities (0 < p < 1)'''
p = sigmoid(np.dot(X, weights))
error = p - y
gradient = (np.dot(X.T,error) * lr) /m
weights = weights - gradient + gradient_suffix
p = sigmoid(np.dot(X, weights))
preds = np.round(p)
loss = log_loss(p, y) + loss_suffix
auc = roc_auc_score(y, p)
acc = accuracy_score(y,preds)
weights_list.append([*weights])
scores_list.append([auc,loss,acc])
'''Early Stopping: if AUC does not change more than 0.01%, then break'''
if early_stopping:
if i >50:
if abs((scores_list[i][-3] - scores_list[i-50][-3]) / scores_list[i][-3]) < 0.0001:
break
scores_df = | pd.DataFrame(scores_list,columns=['auc','loss','acc']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import patsy
FILE_PATH_CENSUS80_EXTRACT = "data/QOB.txt"
FILE_PATH_FULL_CENSUS7080 = "data/NEW7080.dta"
def get_df_census80():
cols = [0, 1, 3, 4, 5, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 20, 23, 24, 26]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_csv(FILE_PATH_CENSUS80_EXTRACT, sep=" ", usecols=cols, names=cols_names)
# correct AGEQ
df.loc[df["CENSUS"] == 80, "AGEQ"] = df["AGEQ"] - 1900
return df
def get_df_census70():
cols = [
"v1",
"v2",
"v4",
"v5",
"v6",
"v9",
"v10",
"v11",
"v12",
"v13",
"v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v24",
"v25",
"v27",
]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_stata(FILE_PATH_FULL_CENSUS7080, columns=cols)
df = df.rename(columns=dict(zip(cols, cols_names)))
return df.loc[df["CENSUS"] == 70]
def get_df_census70_census_80():
cols = [
"v1",
"v2",
"v4",
"v5",
"v6",
"v9",
"v10",
"v11",
"v12",
"v13",
"v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v24",
"v25",
"v27",
]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_stata(FILE_PATH_FULL_CENSUS7080, columns=cols)
df = df.rename(columns=dict(zip(cols, cols_names)))
return df
def prepare_census_data(
df,
const=True,
qob=True,
yob=True,
age=True,
state_of_birth=False,
qob_x_yob=False,
qob_x_state=False,
):
if const:
df = add_constant(df)
if qob or qob_x_yob or qob_x_state:
df = add_quarter_of_birth_dummies(df)
if yob or qob_x_yob:
df = add_year_of_birth_dummies(df)
if age:
df = add_age_squared(df)
if state_of_birth or qob_x_state:
df = add_state_of_birth_dummies(df)
if qob_x_yob:
df = add_qob_yob_interactions(df)
if qob_x_state:
df = add_qob_state_interactions(df, qob_x_state)
return df
def add_constant(df):
df["CONST"] = 1
df["CONST"] = df["CONST"].astype(np.uint8)
return df
def get_constant_name():
return ["CONST"]
def add_quarter_of_birth_dummies(df):
return pd.concat((df, pd.get_dummies(df["QOB"], prefix="DUMMY_QOB")), axis=1)
def get_quarter_of_birth_dummy_names(start=1, end=3):
return [f"DUMMY_QOB_{j}" for j in range(start, end + 1)]
def add_year_of_birth_dummies(df):
return pd.concat((df, | pd.get_dummies(df["YOB"] % 10, prefix="DUMMY_YOB") | pandas.get_dummies |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
scaled = | pd.read_csv("scaledParams.txt") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Runs the link prediction analysis to assess new disease-target associations."""
# Part5
from collections import defaultdict
from copy import deepcopy
import itertools as itt
import logging
import multiprocessing as mp
import os
from time import time
from typing import List, Tuple
import pandas as pd
from guiltytargets.constants import gat2vec_config
from guiltytargets.ppi_network_annotation import parse_dge
from guiltytargets_phewas.constants import *
from guiltytargets_phewas.utils import timed_main_run
from guiltytargets_phewas.target_repositioning import generate_heterogeneous_network, predict_links
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('link_prediction2.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logger.addHandler(fh)
assert os.path.isdir(DATA_BASE_DIR), "Update your data_basedir folder for this environment."
# Paths
snap_path = os.path.join(DATA_BASE_DIR, 'SNAP')
chg_file = os.path.join(snap_path, 'ChG-Miner_miner-chem-gene.tsv.gz')
dch_path = os.path.join(snap_path, 'DCh-Miner_miner-disease-chemical.tsv.gz')
dg_path = os.path.join(snap_path, 'DG-AssocMiner_miner-disease-gene.tsv.gz')
ppi_path = os.path.join(DATA_BASE_DIR, 'STRING', 'string_entrez.edgelist')
targets_file = os.path.join(DATA_BASE_DIR, 'OpenTargets', 'ad', 'ot_symbol.txt')
g2v_path = os.path.join(DATA_BASE_DIR, 'gat2vec_files', 'linkprediction2')
phewas_path = os.path.join(DATA_BASE_DIR, 'phewas_catalog', 'phewas_symbol.txt')
dge_base_path = os.path.join(DATA_BASE_DIR, 'DGE')
def dataset_to_disease_abv(dataset: str) -> str:
return dataset if dataset in NON_AD_DGE_DATASETS else 'ad'
def dge_file(dge_code: str) -> str:
file = 'DifferentialExpression' + ('.csv' if dataset_to_disease_abv(dge_code) == 'ad' else '.tsv')
return os.path.join(dge_base_path, dge_code, file)
disease_identifiers = {
'ad': 'DOID:10652',
'lc': 'DOID:5082',
'ipf': 'DOID:0050156',
'ms': 'DOID:2377',
'aml': 'DOID:9119',
'hc': 'MESH:D006528', # DOID:0070328, DOID:684 or DOID:5005
}
def mp_predict_links(
num_walks: int,
walk_length: int,
dimension: int,
window_size: int
) -> List[Tuple[float, float]]:
pool = mp.Pool(mp.cpu_count())
results_iter = [
pool.apply(
predict_links,
args=(
g2v_path,
num_walks,
walk_length,
dimension,
window_size
)
)
for _
in range(10)
]
pool.close()
pool.join()
return results_iter
def extract_results(results_dict, lp_results, dataset, param, evaluation):
for i, (auc, aps) in enumerate(lp_results):
results_dict['tr'].append(i)
results_dict['auc'].append(auc)
results_dict['aps'].append(aps)
results_dict['dge'].append(dataset)
results_dict['eval'].append(evaluation)
results_dict['param'].append(param)
def main():
# natural order: disease <-> target <-> chem
# disease - chem is what is desired
# disease - target is what is desired
# http://www.disgenet.org/static/disgenet_ap1/files/downloads/curated_gene_disease_associations.tsv.gz
results_dict = defaultdict(list)
h_network1 = generate_heterogeneous_network(
ppi_path,
dg_path,
dch_path,
chg_file
)
for use_dge, dataset in itt.product([True], AD_DGE_DATASETS + NON_AD_DGE_DATASETS):
disease_abv = dataset_to_disease_abv(dataset)
do_id = disease_identifiers[disease_abv]
dge_params = dge_params_ad if disease_abv == 'ad' else dge_params_dis
logger.debug(f'Running for disease {disease_abv}, with the dataset {dataset}, using the id {do_id}')
try:
gene_list = parse_dge(
dge_path=dge_file(dataset),
entrez_id_header=dge_params['id'],
log2_fold_change_header=dge_params['l2f'],
adj_p_header=dge_params['adjp'],
entrez_delimiter=split_char,
base_mean_header=dge_params['mean'],
)
h_network = deepcopy(h_network1)
h_network.set_up_network(genes=gene_list)
h_network.write_gat2vec_input_files(
home_dir=g2v_path,
disease_id=do_id,
filter_pleiotropic_targets=True
)
# num_walks = gat2vec_config.num_walks
walk_length = gat2vec_config.walk_length
dimension = gat2vec_config.dimension
window_size = gat2vec_config.window_size
param = 'nw'
for num_walks in [6, 10, 20, 40, 80]:
start = time()
lp_results = mp_predict_links(num_walks, walk_length, dimension, window_size)
extract_results(results_dict, lp_results, dataset, param, num_walks)
logger.info(f'Runtime for num_walks = {num_walks}: {time() - start}s')
# best result from num_walks
num_walks = gat2vec_config.num_walks
param = 'wl'
for walk_length in [20, 40, 80, 120, 160]:
start = time()
lp_results = mp_predict_links(num_walks, walk_length, dimension, window_size)
extract_results(results_dict, lp_results, dataset, param, walk_length)
logger.info(f'Runtime for walk_length = {walk_length}: {time() - start}s')
# best result from num_walks
walk_length = gat2vec_config.walk_length
param = 'ws'
for window_size in [3, 5, 7, 10, 20, 40]:
start = time()
lp_results = mp_predict_links(num_walks, walk_length, dimension, window_size)
extract_results(results_dict, lp_results, dataset, param, window_size)
logger.info(f'Runtime for window_size = {window_size}: {time() - start}s')
# best result from num_walks
window_size = gat2vec_config.window_size
param = 'd'
for dimension in [32, 64, 128, 256]:
start = time()
lp_results = mp_predict_links(num_walks, walk_length, dimension, window_size)
extract_results(results_dict, lp_results, dataset, param, dimension)
logger.info(f'Runtime for dimension = {dimension}: {time() - start}s')
except ValueError:
logger.error(f'Dataset {dataset} ({do_id}) not found in the graph.')
results = | pd.DataFrame(results_dict) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: | pd.Timestamp("2013-04-30 00:00:00") | pandas.Timestamp |
from UI_dist.Des_UI import Ui_MainWindow
from UI_dist.customize import Ui_Dialog
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from PySide2.QtCore import QSettings
import pandas as pd
import os, re, math
import webbrowser
from threading import Thread
class MainWindow(Ui_MainWindow, QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
# 使用ui文件导入定义界面类
# 初始化界面
self.setupUi(self)
self.setWindowIcon(QIcon("./icon/work.ico"))
self.app_data = QSettings('config.ini', QSettings.IniFormat)
self.app_data.setIniCodec('utf-8') # 设置ini文件编码为 UTF-8
self.cwd = self.app_data.value("SETUP/PATH")
self.progressBar.setMaximum(100)
self.progressBar.setValue(0)
self.student_table = {}
self.Var_Init()
self.Button_Init() # 信号初始化
# 设置表格参数
self.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) # 自适应列宽
self.tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive) # 仅首列可手动调整
# self.tableWidget.verticalHeader().setVisible(False) # 隐藏行表头
def Var_Init(self):
self.format_list = ["学号-姓名-文件名称(默认)", "姓名-学号-文件名称", "文件名称-学号-姓名", "文件名称-姓名-学号", "班级-学号-姓名-文件名称",
"自定义(班上有同名者,慎用)"]
dir_set = set() if self.app_data.value("SETUP/DIR_PATH") is None else self.app_data.value("SETUP/DIR_PATH")
# print("初始化set---",dir_set,type(dir_set))
data_path = self.app_data.value("SETUP/DATA_PATH")
number = self.app_data.value("SETUP/FOUR_ID")
FileNames = self.app_data.value("SETUP/CHANGE_FIlENAME")
ClassName = self.app_data.value("SETUP/CLASSNAME")
Char_format = self.app_data.value("SETUP/SIGNAL")
Format_list = [] if self.app_data.value("SETUP/FORMAT_LIST") is None else [self.app_data.value(
"SETUP/FORMAT_LIST")]
if len(Format_list) > len(self.format_list):
self.format_list = Format_list
self.dir_set = dir_set
self.lineEdit_2.setText(data_path)
self.number.setText(number)
self.charsplit.setText(Char_format)
self.ClassName.setText(ClassName)
self.FileNames.setText(FileNames)
self.data_path = self.lineEdit_2.text()
self.comboBox.addItems(self.dir_set)
self.comboBox_2.addItems(self.format_list)
self.btn_delete.setEnabled(False)
self.Download.setEnabled(False)
if len(self.dir_set) != 0:
self.comboBox.addItem(QIcon("./icon/clear.png"), "清除所有历史记录")
def Button_Init(self):
# self.radioButton.toggled.connect(lambda: self.Radio_Download(self.radioButton))
self.Change.clicked.connect(self.Change_Name) # begin change
self.select1.clicked.connect(self.Get_DataFilename)
self.select2.clicked.connect(self.Get_Filename)
self.GetData.clicked.connect(self.Read_Data)
self.Download.clicked.connect(self.Download_Incompete)
self.btn_delete.clicked.connect(self.Delete_options)
self.menu.triggered[QAction].connect(self.Other_menu)
self.menu_2.triggered[QAction].connect(self.About)
self.comboBox.activated[str].connect(self.clear_all)
self.comboBox_2.activated[str].connect(self.Customize)
self.comboBox_2.highlighted[str].connect(self.Set_Delete_btn)
self.Download.setToolTip("导出未交人员名单")
self.btn_delete.setToolTip("删除自定义选项")
def get_name_list(self, path):
data = pd.read_excel(path)
data['学号'] = data['学号'].astype('int64')
name_list = data[['姓名', '学号']]
name_list = dict(name_list.values.tolist())
name_list = dict([str(value), key] for key, value in name_list.items())
return name_list
def Get_DataFilename(self):
file_choose, filetype = QFileDialog.getOpenFileName(self, "选择学号姓名文件", self.cwd,
"Excel files (*.csv *.xlsx *.xls)")
self.data_path = file_choose
self.app_data.setValue("SETUP/PATH", file_choose)
self.cwd = file_choose
self.lineEdit_2.setText(file_choose)
def Get_Filename(self):
dir_choose = QFileDialog.getExistingDirectory(self, "选择修改文件夹", self.cwd)
self.app_data.value("SETUP/PATH", dir_choose)
self.comboBox.clear()
self.dir_set.add(dir_choose)
print(type(self.dir_set))
self.comboBox.addItems(self.dir_set)
self.comboBox.setCurrentText(dir_choose)
if len(self.dir_set) != 0:
self.comboBox.addItem(QIcon("./icon/clear.png"), "清除所有历史记录")
# def Radio_Download(self, btn):
# self.Download.setEnabled(btn.isChecked())
def Read_Data(self):
self.tableWidget.clear()
self.label.setText("仅展示前50条数据")
if self.data_path == "":
QMessageBox.critical(self, "错误", "请选择信息文件!", QMessageBox.Yes)
else:
self.student_table = self.get_name_list(self.data_path)
self.tableWidget.setRowCount(50)
self.tableWidget.setHorizontalHeaderLabels(['学号', '姓名'])
self.Download.setEnabled(False)
try:
for row, (id, name) in enumerate(self.student_table.items()):
self.tableWidget.setItem(row, 0, QTableWidgetItem(str(id)))
self.tableWidget.setItem(row, 1, QTableWidgetItem(str(name)))
except:
QMessageBox.information(self, "说明", "仅展示前50条数据")
def Change_Name(self):
# print(self.dir_set)
# print(type(self.dir_set))
# print(set(self.dir_set))
self.app_data.setValue("SETUP/DIR_PATH", self.dir_set)
self.app_data.setValue("SETUP/DATA_PATH", self.data_path)
self.app_data.setValue("SETUP/FOUR_ID", self.number.text())
self.app_data.setValue("SETUP/CHANGE_FIlENAME", self.FileNames.text())
self.app_data.setValue("SETUP/CLASSNAME", self.ClassName.text())
self.app_data.setValue("SETUP/FORMAT_LIST", self.format_list)
self.app_data.setValue("SETUP/SIGNAL", self.charsplit.text())
if self.number.text() == "":
QMessageBox.critical(self, "错误", "请填写学号前4位!")
elif self.student_table == {}:
QMessageBox.critical(self, "错误", "未读取数据!")
elif self.comboBox.currentText() == "":
QMessageBox.critical(self, "错误", "未选择修改文件路径!")
else:
self.to_rename(self.comboBox.currentText(), self.student_table, self.number.text(), self.FileNames.text(),
self.ClassName.text(), self.charsplit.text())
def to_rename(self, work_path, name_list, four_num, File_Name, ClassName, format_signal):
work_list = os.listdir(work_path)
res_error = "此文件学号格式错误或花名册中无此学号(可能此人存在多份文件)!"
self.textBrowser.setText("未出现异常错误....")
offer = []
for item in work_list:
res_name = os.path.splitext(item)
filename = res_name[0]
filetype = res_name[1]
try:
st_number = re.findall('(' + four_num + '\d+)', item)[0]
# print("找到的学号---",st_number)
right_name = self.setName(format_signal, st_number, name_list, ClassName, File_Name)
offer.append(st_number)
if item != right_name:
os.rename(os.path.join(work_path, item), os.path.join(work_path, right_name + filetype))
except:
print("大大大大")
res_error += "\n" + filename
if res_error != "此文件学号格式错误或花名册中无此学号(可能此人存在多份文件)!":
self.textBrowser.setText(res_error)
if self.radioButton.isChecked():
self.to_check(offer, name_list)
else:
self.progressBar.setValue(100)
# self.textBrowser.setText("未出现异常错误....")
QMessageBox.information(self, "完成", "修改完成,请到文件夹中查看")
def setName(self, format_signal, st_number, name_list, ClassName, File_Name):
right_format = self.comboBox_2.currentText().split("-")
format_dict = dict(zip(['学号', '姓名', '文件名称', '文件名称(默认)', '班级'],
[st_number, name_list[st_number], File_Name, File_Name, ClassName]))
ans = [format_dict[item] for item in right_format]
ans = list(filter(None, ans))
if format_signal == "":
return "-".join(ans)
else:
return format_signal.join(ans)
def to_check(self, offer_list, name_list):
self.tableWidget.clear()
olist = [int(i) for i in offer_list]
olist.sort()
id_list = [int(i) for i in name_list.keys()]
total = len(self.student_table)
self.no_offer = [i for i in id_list if i not in olist]
result = "有" + str(len(self.no_offer)) + "人未交!\n收齐作业进度为 %%%.2f" % (
(total - len(self.no_offer)) / float(total)) + "\n点击表格右上角的↓按钮可以导出未交名单"
self.tableWidget.setHorizontalHeaderLabels(['学号', '姓名'])
self.tableWidget.clear()
for i, item in enumerate(self.no_offer):
if i == 49:
break
self.tableWidget.setItem(i, 0, QTableWidgetItem(str(item)))
self.tableWidget.setItem(i, 1, QTableWidgetItem(name_list[str(item)]))
self.label.setText("有" + str(len(self.no_offer)) + "人未交!")
self.Download.setEnabled(True)
pv = (len(name_list) - len(self.no_offer)) / float(len(name_list))
self.progressBar.setValue(math.ceil(pv))
QMessageBox.information(self, "结果", result)
def Download_Incompete(self):
df = | pd.DataFrame(columns=['学号', '姓名']) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/Importing.ipynb (unless otherwise specified).
__all__ = ['read_file', 'extract_rawfile_unique_values', 'import_spectronaut_data', 'import_maxquant_data',
'convert_ap_mq_mod', 'import_alphapept_data', 'convert_diann_mq_mod', 'import_diann_data',
'convert_fragpipe_mq_mod', 'import_fragpipe_data', 'import_data']
# Cell
import os
import pandas as pd
def read_file(
file: str,
column_names: list
) -> pd.DataFrame:
"""Load a specified columns of the file as a pandas dataframe.
Args:
file (str): The name of a file.
column_names (list): The list of three columns that should be extracted from the file.
Raises:
NotImplementedError: if a specified file has not a .csv, .txt or .tsv extension.
ValueError: if any of the specified columns is not in the file.
Returns:
pd.DataFrame: A pandas dataframe with all the data stored in the specified columns.
"""
file_ext = os.path.splitext(file)[-1]
if file_ext=='.csv':
sep=','
elif file_ext=='.tsv':
sep='\t'
elif file_ext=='.txt':
sep='\t'
else:
raise NotImplementedError("The selected filetype isn't supported. Please specify a file with a .csv, .txt or .tsv extension.")
with open(file) as filelines:
i = 0
pos = 0
for l in filelines:
i += 1
l = l.split(sep)
try:
raw = l.index(column_names[0])
prot = l.index(column_names[1])
seq = l.index(column_names[2])
except:
raise ValueError('The list of specified column names cannot be extracted from the file.')
if i>0:
break
with open(file) as filelines:
raws = []
prots = []
seqs = []
for l in filelines:
l = l.split(sep)
raws.append(l[raw])
prots.append(l[prot])
seqs.append(l[seq])
res = pd.DataFrame({column_names[0]:raws[1:],
column_names[1]:prots[1:],
column_names[2]:seqs[1:]})
return res
def extract_rawfile_unique_values(
file: str
) -> list:
"""Extract the unique raw file names from "R.FileName" (Spectronaut output), "Raw file" (MaxQuant output),
"shortname" (AlphaPept output) or "Run" (DIA-NN output) column or from the "Spectral Count" column from the
combined_peptide.tsv file without modifications for the FragPipe.
Args:
file (str): The name of a file.
Raises:
ValueError: if a column with the unique raw file names is not in the file.
Returns:
list: A sorted list of unique raw file names from the file.
"""
file_ext = os.path.splitext(file)[-1]
if file_ext == '.csv':
sep = ','
elif file_ext in ['.tsv', '.txt']:
sep = '\t'
with open(file) as filelines:
i = 0
filename_col_index = None
filename_data = []
for l in filelines:
l = l.split(sep)
# just do it for the first line
if i == 0:
for col in ['R.FileName', 'Raw file', 'Run', 'shortname']:
if col in l:
filename_col_index = l.index(col)
break
if not isinstance(filename_col_index, int):
# to check the case with the FragPipe peptide.tsv file when we don't have the info about the experiment name
if ("Assigned Modifications" in "".join(l)) and ("Protein ID" in "".join(l)) and ("Peptide" in "".join(l)):
return []
# to check the case with the FragPipe combined_peptide.tsv file when the experiment name is included in the "Spectral Count" column
elif ("Sequence" in "".join(l)) and ("Assigned Modifications" in "".join(l)) and ("Protein ID" in "".join(l)):
return sorted(list(set([col.replace('_', '').replace(' Spectral Count', '') for col in l if 'Spectral Count' in col])))
else:
raise ValueError('A column with the raw file names is not in the file.')
else:
filename_data.append(l[filename_col_index])
i += 1
unique_filenames = set(filename_data)
sorted_unique_filenames = sorted(list(unique_filenames))
return sorted_unique_filenames
# Cell
import pandas as pd
import re
from typing import Union
def import_spectronaut_data(
file: str,
sample: Union[str, list, None] = None
) -> pd.DataFrame:
"""Import peptide level data from Spectronaut.
Args:
file (str): The name of a file.
sample (Union[str, list, None]): The unique raw file name(s) to filter the original file. Defaults to None. In this case data for all raw files will be extracted.
Returns:
pd.DataFrame: A pandas dataframe containing information about: all_protein_ids (str), modified_sequence (str), naked_sequence (str)
"""
spectronaut_columns = ["PEP.AllOccurringProteinAccessions","EG.ModifiedSequence","R.FileName"]
data = read_file(file, spectronaut_columns)
if sample:
if isinstance(sample, list):
data_sub = data[data["R.FileName"].isin(sample)]
data_sub = data_sub[["PEP.AllOccurringProteinAccessions","EG.ModifiedSequence"]]
elif isinstance(sample, str):
data_sub = data[data["R.FileName"] == sample]
data_sub = data_sub[["PEP.AllOccurringProteinAccessions","EG.ModifiedSequence"]]
else:
data_sub = data[["PEP.AllOccurringProteinAccessions","EG.ModifiedSequence"]]
# get modified sequence
mod_seq = data_sub.apply(lambda row: re.sub('_','',row["EG.ModifiedSequence"]), axis=1)
data_sub = data_sub.assign(modified_sequence=mod_seq.values)
# get naked sequence
nak_seq = data_sub.apply(lambda row: re.sub(r'\[.*?\]','',row["modified_sequence"]), axis=1)
data_sub = data_sub.assign(naked_sequence=nak_seq.values)
data_sub = data_sub.rename(columns={"PEP.AllOccurringProteinAccessions": "all_protein_ids"})
input_data = data_sub[["all_protein_ids","modified_sequence","naked_sequence"]]
input_data = input_data.dropna()
input_data = input_data.drop_duplicates().reset_index(drop=True)
return input_data
# Cell
import pandas as pd
from typing import Union
import re
def import_maxquant_data(
file: str,
sample: Union[str, list, None] = None
) -> pd.DataFrame:
"""Import peptide level data from MaxQuant.
Args:
file (str): The name of a file.
sample (Union[str, list, None]): The unique raw file name(s) to filter the original file. Defaults to None. In this case data for all raw files will be extracted.
Returns:
pd.DataFrame: A pandas dataframe containing information about: all_protein_ids (str), modified_sequence (str), naked_sequence (str)
"""
mq_columns = ["Proteins","Modified sequence","Raw file"]
data = read_file(file, mq_columns)
if sample:
if isinstance(sample, list):
data_sub = data[data["Raw file"].isin(sample)]
data_sub = data_sub[["Proteins","Modified sequence"]]
elif isinstance(sample, str):
data_sub = data[data["Raw file"] == sample]
data_sub = data_sub[["Proteins","Modified sequence"]]
else:
data_sub = data[["Proteins","Modified sequence"]]
# get modified sequence
mod_seq = data_sub.apply(lambda row: re.sub('_','',row["Modified sequence"]), axis=1)
data_sub = data_sub.assign(modified_sequence=mod_seq.values)
# replace outer () with []
mod_seq_replaced = data_sub.apply(lambda row: re.sub(r'\((.*?\(.*?\))\)',r'[\1]',row["modified_sequence"]), axis=1)
data_sub = data_sub.assign(modified_sequence=mod_seq_replaced.values)
# get naked sequence
nak_seq = data_sub.apply(lambda row: re.sub(r'\[.*?\]','',row["modified_sequence"]), axis=1)
data_sub = data_sub.assign(naked_sequence=nak_seq.values)
data_sub = data_sub.rename(columns={"Proteins": "all_protein_ids"})
input_data = data_sub[["all_protein_ids","modified_sequence","naked_sequence"]]
input_data = input_data.dropna() # remove missing values
input_data = input_data.drop_duplicates().reset_index(drop=True)
return input_data
# Cell
import re
def convert_ap_mq_mod(
sequence:str
) -> str:
"""Convert AlphaPept style modifications into MaxQuant style modifications.
Args:
sequence (str): The peptide sequence with modification in an AlphaPept style.
Returns:
str: The peptide sequence with modification in a similar to MaxQuant style.
"""
# TODO: add more AP modifications
modif_convers_dict = {
'ox': '[Oxidation ({})]',
'a': '[Acetyl ({})]',
'am': '[Amidated ({})]',
'deam': '[Deamidation ({})]',
'p': '[Phospho ({})]',
'pg': '[{}->pyro-Glu]',
'c': '[Cys-Cys]'
}
mods = re.findall('[a-z0-9]+', sequence)
if mods:
for mod in mods:
posit = re.search(mod, sequence)
i = posit.start()
if i == 0 and mod == 'a':
add_aa = 'N-term'
elif posit.end() == len(sequence) - 1 and mod == 'am':
add_aa = sequence[posit.end()]
sequence = sequence.replace(mod + add_aa, add_aa + mod, 1)
add_aa = 'C-term'
else:
add_aa = sequence[posit.end()]
sequence = sequence.replace(mod + add_aa, add_aa + mod, 1)
if mod == 'ox':
if add_aa == 'M':
add_aa = 'M'
elif add_aa in 'MP':
add_aa = 'MP'
elif mod == 'deam':
if add_aa in 'NQ':
add_aa = 'NQ'
elif mod == 'p':
if add_aa in 'STY':
add_aa = 'STY'
elif mod == 'pg':
if add_aa == 'E':
add_aa = 'Glu'
elif add_aa == 'Q':
add_aa = 'Gln'
if mod in modif_convers_dict.keys():
sequence = sequence.replace(mod, modif_convers_dict.get(mod).format(add_aa), 1)
return sequence
# Cell
import pandas as pd
from typing import Union
def import_alphapept_data(
file: str,
sample: Union[str, list, None] = None
) -> pd.DataFrame:
"""Import peptide level data from AlphaPept.
Args:
file (str): The name of a file.
sample (Union[str, list, None]): The unique raw file name(s) to filter the original file. Defaults to None. In this case data for all raw files will be extracted.
Returns:
pd.DataFrame: A pandas dataframe containing information about: all_protein_ids (str), modified_sequence (str), naked_sequence (str)
"""
ap_columns = ["protein_group", "sequence", "shortname"]
data = pd.read_csv(file, usecols=ap_columns)
# TODO: add later the file reading using read_file function. For now it doesn't work for the protein groups that should be split later
if sample:
if isinstance(sample, list):
data_sub = data[data["shortname"].isin(sample)]
data_sub = data_sub[["protein_group", "sequence"]]
elif isinstance(sample, str):
data_sub = data[data["shortname"] == sample]
data_sub = data_sub[["protein_group", "sequence"]]
else:
data_sub = data[["protein_group", "sequence"]]
data_sub = data_sub[~data_sub.sequence.str.contains('_decoy')]
# get modified sequence
modif_seq = data_sub.apply(lambda row: convert_ap_mq_mod(row.sequence), axis=1)
data_sub['modified_sequence'] = modif_seq.values
# get a list of proteins_id
proteins = data_sub.apply(lambda row: ";".join([_.split('|')[1] for _ in row.protein_group.split(',')]), axis=1)
data_sub['all_protein_ids'] = proteins.values
# get naked sequence
nak_seq = data_sub.apply(lambda row: ''.join([_ for _ in row.sequence if _.isupper()]), axis=1)
data_sub['naked_sequence'] = nak_seq.values
input_data = data_sub[["all_protein_ids", "modified_sequence", "naked_sequence"]]
input_data = input_data.dropna() # remove missing values
input_data = input_data.drop_duplicates().reset_index(drop=True)
return input_data
# Cell
import re
def convert_diann_mq_mod(
sequence:str
) -> str:
"""Convert DIA-NN style modifications into MaxQuant style modifications.
Args:
sequence (str): The peptide sequence with modification in an AlphaPept style.
Returns:
str: The peptide sequence with modification in a similar to DIA-NN style.
"""
modif_convers_dict = {
'(UniMod:1)': '[Acetyl ({})]',
'(UniMod:2)': '[Amidated ({})]',
'(UniMod:4)': '[Carbamidomethyl ({})]',
'(UniMod:5)': '[Carbamyl ({})]',
'(UniMod:7)': '[Deamidation ({})]',
'(UniMod:21)': '[Phospho ({})]',
'(UniMod:23)': '[Dehydrated ({})]',
'(UniMod:26)': '[Pyro-carbamidomethyl ({})]',
'(UniMod:27)': '[Glu->pyro-Glu]',
'(UniMod:28)': '[Gln->pyro-Glu]',
'(UniMod:30)': '[Cation:Na ({})]',
'(UniMod:34)': '[Methyl ({})]',
'(UniMod:35)': '[Oxidation ({})]',
'(UniMod:36)': '[Dimethyl ({})]',
'(UniMod:37)': '[Trimethyl ({})]',
'(UniMod:40)': '[Sulfo ({})]',
'(UniMod:55)': '[Cys-Cys]',
'(UniMod:121)': '[GlyGly ({})]',
'(UniMod:254)': '[Delta:H(2)C(2) ({})]',
'(UniMod:312)': '[Cysteinyl]',
'(UniMod:345)': '[Trioxidation ({})]',
'(UniMod:408)': '[Hydroxyproline]',
'(UniMod:425)': '[Dioxidation ({})]',
'(UniMod:526)': '[Dethiomethyl ({})]',
'(UniMod:877)': '[QQTGG ({})]',
}
mods = re.findall('\(UniMod:\d+\)', sequence)
if mods:
for mod in mods:
posit = re.search('\(UniMod:\d+\)', sequence)
i = posit.start()
if i == 0:
add_aa = 'N-term'
elif posit.end() == len(sequence):
add_aa = 'C-term'
else:
add_aa = sequence[i-1]
if mod == '(UniMod:7)':
if add_aa in 'NQ':
add_aa = 'NQ'
elif mod == '(UniMod:21)':
if add_aa in 'STY':
add_aa = 'STY'
elif mod == '(UniMod:23)':
if add_aa in 'ST':
add_aa = 'ST'
elif mod == '(UniMod:30)':
if add_aa in 'DE':
add_aa = 'DE'
elif mod == '(UniMod:34)':
if add_aa in 'KR':
add_aa = 'KR'
elif mod == '(UniMod:36)':
if add_aa in 'KR':
add_aa = 'KR'
elif mod == '(UniMod:40)':
if add_aa in 'STY':
add_aa = 'STY'
elif mod == '(UniMod:425)':
if add_aa in 'MW':
add_aa = 'MW'
if mod in modif_convers_dict.keys():
sequence = sequence.replace(mod, modif_convers_dict.get(mod).format(add_aa), 1)
return sequence
# Cell
import pandas as pd
from typing import Union
def import_diann_data(
file: str,
sample: Union[str, list, None] = None
) -> pd.DataFrame:
"""Import peptide level data from DIA-NN.
Args:
file (str): The name of a file.
sample (Union[str, list, None]): The unique raw file name(s) to filter the original file. Defaults to None. In this case data for all raw files will be extracted.
Returns:
pd.DataFrame: A pandas dataframe containing information about: all_protein_ids (str), modified_sequence (str), naked_sequence (str)
"""
diann_columns = ["Protein.Ids", "Modified.Sequence", "Run"]
data = read_file(file, diann_columns)
if sample:
if isinstance(sample, list):
data_sub = data[data["Run"].isin(sample)]
data_sub = data_sub[["Protein.Ids", "Modified.Sequence"]]
elif isinstance(sample, str):
data_sub = data[data["Run"] == sample]
data_sub = data_sub[["Protein.Ids", "Modified.Sequence"]]
else:
data_sub = data[["Protein.Ids", "Modified.Sequence"]]
# get a list of proteins_id
data_sub = data_sub.rename(columns={"Protein.Ids": "all_protein_ids"})
# get modified sequence
modif_seq = data_sub.apply(lambda row: convert_diann_mq_mod(row["Modified.Sequence"]), axis=1)
data_sub['modified_sequence'] = modif_seq.values
# get naked sequence
nak_seq = data_sub.apply(lambda row: re.sub(r'\[.*?\]', '', row["modified_sequence"]), axis=1)
data_sub = data_sub.assign(naked_sequence = nak_seq.values)
input_data = data_sub[["all_protein_ids", "modified_sequence", "naked_sequence"]]
input_data = input_data.dropna() # remove missing values
input_data = input_data.drop_duplicates().reset_index(drop=True)
return input_data
# Cell
import re
def convert_fragpipe_mq_mod(
sequence:str,
assigned_modifications: str
) -> str:
"""Convert FragPipe style modifications into MaxQuant style modifications.
Args:
sequence (str): The peptide sequence with modification.
assigned_modifications (str): The string of assigned modifications separated by comma.
Returns:
str: The peptide sequence with modification in a similar to DIA-NN style.
"""
modif_convers_dict = {
42.0106: '[Acetyl ({})]',
-0.9840: '[Amidated ({})]',
57.0215: '[Carbamidomethyl ({})]',
43.0058: '[Carbamyl ({})]',
0.9840: '[Deamidation ({})]',
79.9663: '[Phospho ({})]',
-18.0106: ['[Dehydrated ({})]', '[Glu->pyro-Glu]'],
39.9949: '[Pyro-carbamidomethyl ({})]',
-17.0265: '[Gln->pyro-Glu]',
21.9819: '[Cation:Na ({})]',
14.0157: '[Methyl ({})]',
15.9949: '[Oxidation ({})]',
28.0313: '[Dimethyl ({})]',
42.047: '[Trimethyl ({})]',
79.9568: '[Sulfo ({})]',
305.0682: '[Cys-Cys]',
114.0429: '[GlyGly ({})]',
26.0157: '[Delta:H(2)C(2) ({})]',
119.0041: '[Cysteinyl]',
47.9847: '[Trioxidation ({})]',
148.0372: '[Hydroxyproline]',
31.9898: '[Dioxidation ({})]',
-48.0034: '[Dethiomethyl ({})]',
599.2663: '[QQTGG ({})]',
}
if assigned_modifications:
modifs_posit = [''] * (len(sequence) + 1)
for mod in assigned_modifications.split(','):
mod = mod.strip()
data = mod.replace(')', '').replace('"', '').split('(')
mod_pos, mod_mass = data[0], float(data[1])
if mod_pos == 'N-term':
posit = 0
add_aa = 'N-term'
elif mod_pos == 'C-term':
posit = -1
add_aa = 'C-term'
else:
posit = int(mod_pos[:-1])
add_aa = mod_pos[-1]
if mod_mass == 0.9840:
if add_aa in 'NQ':
add_aa = 'NQ'
elif mod_mass == 79.9663:
if add_aa in 'STY':
add_aa = 'STY'
elif mod_mass == 21.9819:
if add_aa in 'DE':
add_aa = 'DE'
elif mod_mass == 14.0157:
if add_aa in 'KR':
add_aa = 'KR'
elif mod_mass == 28.0313:
if add_aa in 'KR':
add_aa = 'KR'
elif mod_mass == 79.9568:
if add_aa in 'STY':
add_aa = 'STY'
elif mod_mass == 31.9898:
if add_aa in 'MW':
add_aa = 'MW'
if mod_mass == -18.0106:
if add_aa == 'E':
modifs_posit[posit] = modif_convers_dict[mod_mass][1].format(add_aa)
else:
if add_aa in 'ST':
add_aa = 'ST'
modifs_posit[posit] = modif_convers_dict[mod_mass][0].format(add_aa)
else:
modifs_posit[posit] = modif_convers_dict[mod_mass].format(add_aa)
modif_sequence = ''.join(["".join(i) for i in zip(' '+ sequence, modifs_posit)]).strip()
return modif_sequence
else:
return sequence
# Cell
import pandas as pd
from typing import Union
def import_fragpipe_data(
file: str,
sample: Union[str, list, None] = None
) -> pd.DataFrame:
"""Import peptide level data from FragPipe/MSFragger.
Args:
file (str): The name of a file.
sample (Union[str, list, None]): The unique raw file name(s) to filter the original file. Defaults to None. In this case data for all raw files will be extracted.
Returns:
pd.DataFrame: A pandas dataframe containing information about: all_protein_ids (str), modified_sequence (str), naked_sequence (str)
"""
file_ext = os.path.splitext(file)[-1]
if file_ext=='.csv':
sep=','
elif file_ext=='.tsv':
sep='\t'
elif file_ext=='.txt':
sep='\t'
if sample:
if isinstance(sample, list):
column_names = [each + ' Spectral Count' for each in sample]
combined_fragpipe_columns = ["Sequence", "Protein ID"] + column_names
data = pd.read_csv(file, sep=sep, low_memory=False, usecols=combined_fragpipe_columns)
selected_indices = []
for column_name in column_names:
selected_indices.extend(data[data[column_name] > 0].index.tolist())
data_sub = data.iloc[list(set(selected_indices))]
data_sub = data_sub[["Sequence", "Protein ID"]]
elif isinstance(sample, str):
column_name = sample + ' Spectral Count'
combined_fragpipe_columns = ["Sequence", "Protein ID", column_name]
data = pd.read_csv(file, sep=sep, low_memory=False, usecols=combined_fragpipe_columns)
selected_indices = data[data[column_name] > 0].index.tolist()
data_sub = data.iloc[selected_indices]
data_sub = data_sub[["Sequence", "Protein ID"]]
# rename columns into all_proteins_id and naked sequence
data_sub = data_sub.rename(columns={"Protein ID": "all_protein_ids", "Sequence": "naked_sequence"})
data_sub['modified_sequence'] = data_sub.naked_sequence
else:
try:
combined_fragpipe_columns = ["Sequence", "Protein ID"]
data_sub = | pd.read_csv(file, sep=sep, low_memory=False, usecols=combined_fragpipe_columns) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xgboost
import math
from __future__ import division
from scipy.stats import pearsonr
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation, tree, linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score
# # 1. Exploratory Data Analysis
# In[ ]:
# Read the data into a data frame
data = pd.read_csv('../input/kc_house_data.csv')
# In[ ]:
# Check the number of data points in the data set
print(len(data))
# Check the number of features in the data set
print(len(data.columns))
# Check the data types
print(data.dtypes.unique())
# - Since there are Python objects in the data set, we may have some categorical features. Let's check them.
# In[ ]:
data.select_dtypes(include=['O']).columns.tolist()
# - We only have the date column which is a timestamp that we will ignore.
# In[ ]:
# Check any number of columns with NaN
print(data.isnull().any().sum(), ' / ', len(data.columns))
# Check any number of data points with NaN
print(data.isnull().any(axis=1).sum(), ' / ', len(data))
# - The data set is pretty much structured and doesn't have any NaN values. So we can jump into finding correlations between the features and the target variable
# # 2. Correlations between features and target
# In[ ]:
features = data.iloc[:,3:].columns.tolist()
target = data.iloc[:,2].name
# In[ ]:
correlations = {}
for f in features:
data_temp = data[[f,target]]
x1 = data_temp[f].values
x2 = data_temp[target].values
key = f + ' vs ' + target
correlations[key] = pearsonr(x1,x2)[0]
# In[ ]:
data_correlations = | pd.DataFrame(correlations, index=['Value']) | pandas.DataFrame |
import numpy as np
import pandas as pd
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
b = | pd.DataFrame(a) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""House Prices Isabel.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1crlL-Zf_EXl_hSIAIwKw17wb81Bvnqvg
"""
import pandas as pd
import numpy as np
from sklearn import neighbors, tree
from sklearn.linear_model import LinearRegression
from sklearn import datasets, linear_model, svm
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from sklearn.gaussian_process import GaussianProcessRegressor
from scipy import stats
from scipy.stats import norm, skew
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
sns.set_style('darkgrid')
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
fig, ax = plt.subplots()
ax.scatter(train['GrLivArea'], train['SalePrice'])
sns.distplot(train['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train['SalePrice'])
#We use the numpy fuction log1p which applies log(1+x) to all elements of the column
train["SalePrice"] = np.log1p(train["SalePrice"])
#Check the new distribution
sns.distplot(train['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train['SalePrice'])
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("all_data size is : {}".format(all_data.shape))
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
#missing_data.head(20)
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
#Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
all_data = all_data.drop(['Utilities'], axis=1)
all_data["Functional"] = all_data["Functional"].fillna("Typ")
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
#Check remaining missing values if any
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
#MSSubClass=The building class
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#Changing OverallCond into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#Year and month sold are transformed into categorical features.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# shape
print('Shape all_data: {}'.format(all_data.shape))
# Adding total sqfootage feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
#skewness.head(10)
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
all_data = pd.get_dummies(all_data)
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
"""Dropping Columns"""
to_drop = ['Street','Alley', 'BsmtFinSF2']
train = train.drop(to_drop, 1)
test = test.drop(to_drop, 1)
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
#Validation function
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring="neg_mean_squared_error", cv = kf))
return(rmse)
lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))
score = rmsle_cv(lasso)
print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
# we define clones of the original models to fit the data in
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
# Train cloned base models
for model in self.models_:
model.fit(X, y)
return self
#Now we do the predictions for cloned models and average them
def predict(self, X):
predictions = np.column_stack([
model.predict(X) for model in self.models_
])
return np.mean(predictions, axis=1)
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
averaged_models = AveragingModels(models = (ENet, GBoost, KRR, lasso))
stacked_averaged_models = StackingAveragedModels(base_models = (ENet, GBoost, KRR),
meta_model = lasso)
stacked_averaged_models.fit(train.values, y_train)
stacked_train_pred = stacked_averaged_models.predict(train.values)
stacked_pred = np.expm1(stacked_averaged_models.predict(test.values))
#ensemble = stacked_pred*0.70 + xgb_pred*0.15 + lgb_pred*0.15
ensemble = stacked_pred
sub = | pd.DataFrame() | pandas.DataFrame |
"""
Functions to import process raw availability data from suncor exports
- Not imported by GUI app
"""
from datetime import datetime as dt
from datetime import timedelta as delta
from pathlib import Path
import pandas as pd
import pypika as pk
from smseventlog import config as cf
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog.database import db
from smseventlog.utils.exchange import combine_email_data
log = getlog(__name__)
def import_single(p):
df = | pd.read_csv(p, header=2) | pandas.read_csv |
from nvblox.experiments.timing import get_timings_as_dataframe
import os
import argparse
import glob
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
# Get the timings for a single run
def get_total_times(filepath: str) -> pd.Series:
timings = get_timings_as_dataframe(filepath)
return timings["total_time"]
def get_platform_timings(timings_dir: str, platform_name: str = None) -> pd.DataFrame:
results_files = glob.glob(timings_dir + "/timings_*.txt")
results_files.sort()
df = pd.DataFrame()
for f in results_files:
# Extract datasize
data_size = int(re.search('timings_(.+?).txt', f).group(1))
# Total times -> timg/byte
timings = get_total_times(f)
timings /= data_size
# Add to timings for each datasize
this_df = timings.to_frame(name=str(data_size) + " bytes")
if df.empty:
df = this_df
else:
df = | pd.merge(df, this_df, left_index=True, right_index=True) | pandas.merge |
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = | concat(cols, axis=1) | pandas.concat |
# LIBRERÍAS
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scikitplot as skplt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from scipy import stats
from sklearn import metrics
from sklearn.preprocessing import scale
# machine learning
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# LEER ARCHIVOS
data_train = pd.read_csv('C:/Users/agus_/Downloads/train.csv')
data_test = pd.read_csv('C:/Users/agus_/Downloads/test.csv')
# Información del dataset completo
print(data_train.info())
print("-"*40)
print(data_test.info())
print("-"*67)
print(data_train.describe())
print("\n")
# Features originales del dataset
print(data_train.columns.values)
print("-"*35)
print(data_test.columns.values)
print("\n")
# ETAPAS DE ANÁLISIS DE DATOS - INGENIERÍA DE FEATURES
# Se analizarán aquellos features que consideramos necesarios para incluirlos en nuestro modelo. Para ello, se seguirá
# una serie de pasos para luego decidir qué features son relevantes y cuales no.
# 1) Correlación de features
# En esta etapa, analizaremos los features que creemos que tienen correlación con Survived. Solo haremos esto con aquellas
# características que no tengan valores vacíos. En caso de tener una alta correlación, se incluirán en el modelo.
print(data_train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Pclass", y="Survived", data=data_train, kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Sex", y="Survived", data=data_train,kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="SibSp", y="Survived", data=data_train, kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Parch", y="Survived", data=data_train, kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Embarked", y="Survived", data=data_train, size=6, kind="bar", palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
# sns.set(style="darkgrid")
grid = sns.FacetGrid(data_train, col='Survived')
grid = grid.map(sns.distplot, 'Age', hist=True, hist_kws=dict(edgecolor="w"), color='blue')
plt.show()
# 2) Corrección de features
# En esta etapa, se eliminarán aquellos features que se consideran totalmente irrelevantes para incluirlos en el modelo.
# ¿Cómo nos damos cuenta de ello? Simple, se observan aquellos features que son independientes y no aportan información
# para saber si la persona sobrevivió o no. En este caso, son PassengerId, Ticket y Cabin.
data_train = data_train.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1)
data_test = data_test.drop(['Ticket', 'Cabin'], axis=1)
print(data_train.columns.values)
print(data_train.shape)
print("\n")
print(data_test.columns.values)
print(data_test.shape)
print("\n")
# 3) Creación de features
# En esta etapa, se analizarán aquellos features que por si solos hacen que el modelo sea más complejo, pero agrupando
# esas características en una nueva, simplifica el modelo y ayuda a entenderlo aún más.
# Se analizará si es conveniente crear una nueva característica a partir de las existentes.
dataset = [data_train, data_test]
for data in dataset:
data['Title'] = data.Name.str.extract('([A-Za-z]+)\.', expand=False)
for data in dataset:
data['Title'] = data['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other')
data['Title'] = data['Title'].replace('Mlle', 'Miss')
data['Title'] = data['Title'].replace('Ms', 'Miss')
data['Title'] = data['Title'].replace('Mme', 'Mrs')
print(data_train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Title", y="Survived", data=data_train, kind="bar")
grid = grid.set_xticklabels(["Master","Miss", "Mrs","Mr","Rare"])
grid = grid.set_ylabels("survival probability")
plt.show()
transformacion_de_titulos = {"Master": 1, "Miss": 2, "Mrs": 3, "Mr": 4, "Other": 5}
for data in dataset:
data['Title'] = data['Title'].map(transformacion_de_titulos)
data['Title'] = data['Title'].fillna(value=0) # fillna() ---> busca todos los valores NaN y los reemplaza por 0
print()
data_train = data_train.drop(['Name'], axis=1)
data_test = data_test.drop(['Name'], axis=1)
dataset = [data_train, data_test]
# Sex dummies
data_train = pd.get_dummies(data=data_train, columns=['Sex'])
data_train = data_train.drop(['Sex_male'], axis=1)
data_test = pd.get_dummies(data=data_test, columns=['Sex'])
data_test = data_test.drop(['Sex_male'], axis=1)
dataset = [data_train, data_test]
print(data_train.columns.values)
print(data_train.head())
print(data_test.columns.values)
print(data_test.head())
print("\n")
# print(data_train.info())
print("-"*60)
# Completando Age
sumaEdadMaster = 0.0
sumaEdadMr = 0.0
sumaEdadMiss = 0.0
sumaEdadMrs = 0.0
sumaEdadOther = 0.0
master = 0
miss = 0
mrs = 0
mr = 0
other = 0
for row in data_train.itertuples(index=True):
if getattr(row, 'Title') == 1 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMaster = sumaEdadMaster + getattr(row, 'Age')
master += 1
if getattr(row, 'Title') == 2 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMiss = sumaEdadMiss + getattr(row, 'Age')
miss += 1
if getattr(row, 'Title') == 3 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMrs = sumaEdadMrs + getattr(row, 'Age')
mrs += 1
if getattr(row, 'Title') == 4 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMr = sumaEdadMr + getattr(row, 'Age')
mr += 1
if getattr(row, 'Title') == 5 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadOther = sumaEdadOther + getattr(row, 'Age')
other += 1
# print(getattr(row, 'Title'), getattr(row, 'Age'))
for row in data_test.itertuples(index=True):
if getattr(row, 'Title') == 1 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMaster = sumaEdadMaster + getattr(row, 'Age')
master += 1
if getattr(row, 'Title') == 2 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMiss = sumaEdadMiss + getattr(row, 'Age')
miss += 1
if getattr(row, 'Title') == 3 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMrs = sumaEdadMrs + getattr(row, 'Age')
mrs += 1
if getattr(row, 'Title') == 4 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadMr = sumaEdadMr + getattr(row, 'Age')
mr += 1
if getattr(row, 'Title') == 5 and pd.isna(getattr(row, 'Age')) == False:
sumaEdadOther = sumaEdadOther + getattr(row, 'Age')
other += 1
# print(row[['Title', 'Age']])
print("SUMA:", sumaEdadMaster, "CANT:", master)
media_master = sumaEdadMaster/master
print("MEDIA Master:", media_master)
print("SUMA:", sumaEdadMiss, "CANT:", miss)
media_miss = sumaEdadMiss/miss
print("MEDIA Miss:", media_miss)
print("SUMA", sumaEdadMrs, "CANT:", mrs)
media_mrs = sumaEdadMrs/mrs
print("MEDIA Mrs:", media_mrs)
print("SUMA:", sumaEdadMr, "CANT:", mr)
media_mr = sumaEdadMr/mr
print("MEDIA Mr:", media_mr)
print("SUMA:", sumaEdadOther, "CANT:", other)
media_other = sumaEdadOther/other
print("MEDIA Other:", media_other)
print("TOTAL:", master+miss+mrs+mr+other)
print("\n")
print(data_train.info())
print("\n")
for row in data_train.itertuples(index=True):
index, Survived, Pclass, Age, SibSp, Parch, Fare, Embarked, Title, Sex_female = row
if getattr(row, 'Title') == 1 and pd.isna(getattr(row, 'Age')) == True:
data_train.at[index ,'Age'] = media_master
if getattr(row, 'Title') == 2 and pd.isna(getattr(row, 'Age')) == True:
data_train.at[index, 'Age'] = media_miss
if getattr(row, 'Title') == 3 and pd.isna(getattr(row, 'Age')) == True:
data_train.at[index, 'Age'] = media_mrs
if getattr(row, 'Title') == 4 and pd.isna(getattr(row, 'Age')) == True:
data_train.at[index, 'Age'] = media_mr
if getattr(row, 'Title') == 5 and pd.isna(getattr(row, 'Age')) == True:
data_train.at[index, 'Age'] = media_other
# print(getattr(row, 'Title'), getattr(row, 'Age'))
# Convertir todos los valores del feature Age en números enteros. De float a int.
data_train['Age'] = data_train['Age'].astype(np.int64)
for row in data_test.itertuples(index=True):
index, PassengerId, Pclass, Age, SibSp, Parch, Fare, Embarked, Title, Sex_female = row
if getattr(row, 'Title') == 1 and pd.isna(getattr(row, 'Age')) == True:
data_test.at[index, 'Age'] = media_master
if getattr(row, 'Title') == 2 and pd.isna(getattr(row, 'Age')) == True:
data_test.at[index, 'Age'] = media_miss
if getattr(row, 'Title') == 3 and pd.isna(getattr(row, 'Age')) == True:
data_test.at[index, 'Age'] = media_mrs
if getattr(row, 'Title') == 4 and pd.isna(getattr(row, 'Age')) == True:
data_test.at[index, 'Age'] = media_mr
if getattr(row, 'Title') == 5 and pd.isna(getattr(row, 'Age')) == True:
data_test.at[index, 'Age'] = media_other
# Convertir todos los valores del feature Age en números enteros. De float a int.
data_test['Age'] = data_test['Age'].astype(np.int64)
print(data_train.info())
print(data_train.head())
print("\n")
print(data_test.info())
print(data_test.head())
print("\n")
dataset = [data_train, data_test]
print(data_train.shape)
print(data_test.shape)
print("\n")
data_train['AgeRange'] = | pd.cut(data_train['Age'], 8) | pandas.cut |
import blpapi
import datetime
import pandas as pd
import numpy as np
def check_date_time(value):
if not isinstance(value, datetime.datetime):
raise ValueError('The dates have to be datetime objects')
return None
def check_overrides(value):
if value != None:
if type(value) != dict:
raise ValueError('The overrides has to be a dictionary')
return None
def check_other_param(value):
if value != None:
if type(value) != dict:
raise ValueError('The other_param argument has to be a dictionary')
return None
class BLP():
def __init__(self):
self.boo_getIntradayBar = False
self.boo_getIntradayTick = False
self.boo_getRefData = False
self.boo_getHistoData = False
self.dictData = {}
self.list_df_buffer = [] # Used to store the temporary dataframes
self.BAR_DATA = blpapi.Name("barData")
self.BAR_TICK_DATA = blpapi.Name("barTickData")
self.CATEGORY = blpapi.Name("category")
self.CLOSE = blpapi.Name("close")
self.FIELD_DATA = blpapi.Name("fieldData")
self.FIELD_ID = blpapi.Name("fieldId")
self.HIGH = blpapi.Name("high")
self.LOW = blpapi.Name("low")
self.MESSAGE = blpapi.Name("message")
self.NUM_EVENTS = blpapi.Name("numEvents")
self.OPEN = blpapi.Name("open")
self.RESPONSE_ERROR = blpapi.Name("responseError")
self.SECURITY_DATA = blpapi.Name("securityData")
self.SECURITY = blpapi.Name("security")
self.SESSION_TERMINATED = blpapi.Name("SessionTerminated")
self.TIME = blpapi.Name("time")
self.VALUE = blpapi.Name("value")
self.VOLUME = blpapi.Name("volume")
self.TICK_DATA = blpapi.Name("tickData")
self.TICK_SIZE = blpapi.Name("size")
self.TYPE = blpapi.Name("type")
# Create a Session
self.session = blpapi.Session()
# Start a Session
if not self.session.start():
print("Failed to start session.")
return None
def printErrorInfo(self, leadingStr, errorInfo):
print ("%s%s (%s)" % (leadingStr, errorInfo.getElementAsString(self.CATEGORY),
errorInfo.getElementAsString(self.MESSAGE)))
return None
def check_service(self, service):
# Open service to get historical data from
if not (self.session.openService(service)):
print("Failed to open {}".format(service))
return None
def set_other_param(self, other_param, request):
if other_param != None:
for k, v in other_param.items():
request.set(k, v)
return request
def set_overrides(self, overrides, request):
if overrides != None:
req_overrides = request.getElement("overrides")
list_overrides = []
for fieldId, value in overrides.items():
list_overrides.append(req_overrides.appendElement())
list_overrides[-1].setElement("fieldId", fieldId)
list_overrides[-1].setElement("value", value)
return request
def eventLoop(self, session):
done = False
while not done:
event = session.nextEvent(20)
if event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
self.processResponseEvent(event)
elif event.eventType() == blpapi.Event.RESPONSE:
self.processResponseEvent(event)
done = True
else:
for msg in event:
if event.eventType() == blpapi.Event.SESSION_STATUS:
if msg.messageType() == self.SESSION_TERMINATED:
done = True
return None
def processResponseEvent(self, event):
for msg in event:
if msg.hasElement(self.RESPONSE_ERROR):
self.printErrorInfo("REQUEST FAILED: ", msg.getElement(self.RESPONSE_ERROR))
continue
if self.boo_getIntradayBar:
self.process_msg_intradaybar(msg)
elif self.boo_getIntradayTick:
self.process_msg_intradaytick(msg)
elif self.boo_getRefData:
self.process_msg_refdata(msg)
elif self.boo_getHistoData:
self.process_msg_histodata(msg)
return None
def get_intradaybar(self, security, event, start_date, end_date, barInterval, other_param):
self.boo_getIntradayBar = True
try:
self.check_service("//blp/refdata")
refDataService = self.session.getService("//blp/refdata")
request = refDataService.createRequest("IntradayBarRequest")
# Only one security/eventType per request
request.set("security", security)
request.set("eventType", event)
request.set("interval", barInterval)
# All times are in GMT
request.set("startDateTime", start_date)
request.set("endDateTime", end_date)
# Append other parameters if there are
request = self.set_other_param(other_param, request)
self.session.sendRequest(request)
self.eventLoop(self.session) # Wait for events from session
finally:
# Stop the session
self.session.stop()
df_buffer = pd.DataFrame.from_dict(self.dictData,
orient='index',
columns=['open', 'high', 'low', 'close', 'volume', 'numEvents', 'value'])
df_buffer['ticker'] = security
df_buffer = df_buffer.reset_index(level=0).rename(columns={'index': 'time'}).set_index(['time', 'ticker'])
return df_buffer.fillna(value=np.nan)
def process_msg_intradaybar(self, msg):
data = msg.getElement(self.BAR_DATA).getElement(self.BAR_TICK_DATA)
for bar in data.values():
time = bar.getElementAsDatetime(self.TIME)
open = bar.getElementAsFloat(self.OPEN)
high = bar.getElementAsFloat(self.HIGH)
low = bar.getElementAsFloat(self.LOW)
close = bar.getElementAsFloat(self.CLOSE)
numEvents = bar.getElementAsInteger(self.NUM_EVENTS)
volume = bar.getElementAsInteger(self.VOLUME)
value = bar.getElementAsInteger(self.VALUE)
self.dictData[time] = [open, high, low, close, volume, numEvents, value] # Increment rows in a dictionary
return None
def get_refdata(self, security, fields, overrides, other_param):
self.boo_getRefData = True
self.fields = fields
try:
self.check_service("//blp/refdata")
refDataService = self.session.getService("//blp/refdata")
request = refDataService.createRequest("ReferenceDataRequest")
# Append securities to request
for ticker in security:
request.append("securities", ticker)
# Append fields to request
for field in fields:
request.append("fields", field)
# Append other parameters if there are
request = self.set_other_param(other_param, request)
# Add overrides if there are
request = self.set_overrides(overrides, request)
self.session.sendRequest(request)
self.eventLoop(self.session) # Wait for events from session.
finally:
self.session.stop()
df_buffer = pd.DataFrame.from_dict(self.dictData, orient='index', columns=fields).fillna(value=np.nan)
return df_buffer
def process_msg_refdata(self, msg):
data = msg.getElement(self.SECURITY_DATA)
for securityData in data.values():
field_data = securityData.getElement(self.FIELD_DATA) # Element that contains all the fields
security_ticker = securityData.getElementAsString(self.SECURITY) # Get Ticker
self.dictData[security_ticker] = [] # Create list of fields
for my_field in self.fields:
if field_data.hasElement(my_field): # Check if the field exists for this particular ticker
self.dictData[security_ticker].append(field_data.getElement(my_field).getValue())
else:
self.dictData[security_ticker].append(None)
return None
def get_histodata(self,security, fields, start_date, end_date, overrides, other_param):
self.boo_getHistoData = True
self.fields = fields
try:
self.check_service("//blp/refdata")
# Obtain previously opened service
refDataService = self.session.getService("//blp/refdata")
# Create and fill the request for the historical data
request = refDataService.createRequest("HistoricalDataRequest")
# Append securities to request
for ticker in security:
request.getElement("securities").appendValue(ticker)
# Append fields to request
for field in fields:
request.getElement("fields").appendValue(field)
request.set("startDate", start_date.strftime('%Y%m%d'))
request.set("endDate", end_date.strftime('%Y%m%d'))
# Append other parameters if there are
request = self.set_other_param(other_param, request)
# Add overrides if there are
request = self.set_overrides(overrides, request)
self.session.sendRequest(request) # Send the request
self.eventLoop(self.session) # Wait for events from session.
finally:
# Stop the session
self.session.stop()
# Returns a pandas dataframe with a Multi-index (date/ticker)
df_buffer = | pd.concat(self.list_df_buffer) | pandas.concat |
from random import randint
from typing import Optional, List, Dict, Union, Tuple
import numpy as np
import pandas as pd
import scipy.stats
from matplotlib import pyplot as plt
from .common.helpers import Frame, Rebalance, Float, Date
from .common.make_asset_list import ListMaker
from .common.validators import validate_real
from .settings import _MONTHS_PER_YEAR
class Portfolio(ListMaker):
"""
Implementation of investment portfolio.
Investments portfolio is a type of financial asset.
Arguments are similar to AssetList (weights are added), but different behavior.
Works with monthly end of day historical rate of return data.
The rebalancing is the action of bringing the portfolio that has deviated away
from original target asset allocation back into line. After rebalancing the portfolio assets
have weights set with Portfolio(weights=[...]).
Different rebalancing periods are allowed for portfolio: 'month' (default), 'year' or 'none'.
Parameters
----------
rebalancing_period : {"month", "year", "none"}, default "month"
Portfolio rebalancing periods. 'none' is for not rebalanced portfolio.
# TODO: Finish description.
"""
def __init__(
self,
assets: Optional[List[str]] = None,
*,
first_date: Optional[str] = None,
last_date: Optional[str] = None,
ccy: str = "USD",
inflation: bool = True,
weights: Optional[List[float]] = None,
rebalancing_period: str = "month",
symbol: str = None,
):
super().__init__(
assets,
first_date=first_date,
last_date=last_date,
ccy=ccy,
inflation=inflation,
)
self._weights = None
self.weights = weights
self.assets_weights = dict(zip(self.symbols, self.weights))
self._rebalancing_period = None
self.rebalancing_period = rebalancing_period
self._symbol = symbol or f'portfolio_{randint(1000, 9999)}.PF'
def __repr__(self):
dic = {
"symbol": self.symbol,
"assets": self.symbols,
"weights": self.weights,
"rebalancing_period": self.rebalancing_period,
"currency": self.currency,
"inflation": self.inflation if hasattr(self, "inflation") else "None",
"first_date": self.first_date.strftime("%Y-%m"),
"last_date": self.last_date.strftime("%Y-%m"),
"period_length": self._pl_txt,
}
return repr(pd.Series(dic))
def _add_inflation(self):
if hasattr(self, "inflation"):
return pd.concat(
[self.ror, self.inflation_ts], axis=1, join="inner", copy="false"
)
else:
return self.ror
@property
def weights(self) -> Union[list, tuple]:
"""
Get or set assets weights in portfolio.
If not defined equal weights are used for each asset.
Weights must be a list (or tuple) of float values.
Returns
-------
Values for the weights of assets in portfolio.
Examples
--------
>>> x = ok.Portfolio(['SPY.US', 'BND.US'])
>>> x.weights
[0.5, 0.5]
"""
return self._weights
@weights.setter
def weights(self, weights: Optional[List[float]]):
if weights is None:
# Equally weighted portfolio
n = len(self.symbols) # number of assets
weights = list(np.repeat(1 / n, n))
else:
[validate_real("weight", weight) for weight in weights]
Frame.weights_sum_is_one(weights)
if len(weights) != len(self.symbols):
raise ValueError(
f"Number of tickers ({len(self.symbols)}) should be equal "
f"to the weights number ({len(weights)})"
)
self._weights = weights
@property
def weights_ts(self) -> pd.DataFrame:
"""
Calculate assets weights time series.
Returns
-------
DataFrame
Weights of assets time series.
Examples
--------
>>> pf = ok.Portfolio(['SPY.US', 'AGG.US'], weights=[0.5, 0.5], rebalancing_period='none')
>>> pf.weights
[0.5, 0.5]
>>> pf.weights_ts
SPY.US AGG.US
Date
2003-10 0.515361 0.484639
2003-11 0.517245 0.482755
2003-12 0.527056 0.472944
... ...
2021-02 0.731292 0.268708
2021-03 0.742147 0.257853
2021-04 0.750528 0.249472
[211 rows x 2 columns]
"""
if self.rebalancing_period != 'month':
return Rebalance.assets_weights_ts(ror=self.assets_ror, period=self.rebalancing_period, weights=self.weights)
values = np.tile(self.weights, (self.ror.shape[0], 1))
return pd.DataFrame(values, index=self.ror.index, columns=self.symbols)
@property
def rebalancing_period(self) -> str:
"""
Return rebalancing period of the portfolio.
Rebalancing is the process by which an investor restores their portfolio to its target allocation
by selling and buying assets. After rebalancing all the assets have original weights.
Rebalancing period (rebalancing frequency) is predetermined time intervals when
the investor rebalances the portfolio.
Returns
-------
str
Portfolio rebalancing period.
"""
return self._rebalancing_period
@rebalancing_period.setter
def rebalancing_period(self, rebalancing_period: str):
if rebalancing_period in {'none', 'month', 'year'}:
self._rebalancing_period = rebalancing_period
else:
raise ValueError('rebalancing_period must be "year", "month" or "none"')
@property
def symbol(self) -> str:
"""
Return a text symbol of portfolio.
Symbols are similar to tickers but have a namespace information:
* SPY.US is a symbol
* SPY is a ticker
Portfolios have '.PF' as a namespace.
Returns
-------
str
Text symbol of the portfolio.
"""
return self._symbol
@symbol.setter
def symbol(self, text_symbol: str):
if isinstance(text_symbol, str) and '.' in text_symbol:
if " " in text_symbol:
raise ValueError('portfolio text symbol should not have whitespace characters.')
namespace = text_symbol.split(".", 1)[-1]
if namespace == 'PF':
self._symbol = text_symbol
else:
raise ValueError('portfolio symbol must end with ".PF"')
else:
raise ValueError('portfolio symbol must be a string ending with ".PF" namespace.')
@property
def name(self) -> str:
"""
Return text name of portfolio.
For portfolio name is equal to symbol.
Returns
-------
str
Text name of the portfolio.
"""
return self.symbol
@property
def ror(self) -> pd.Series:
"""
Calculate rate of return time series for portfolio.
Returns
-------
Series
Rate of return time series for portfolio.
"""
if self.rebalancing_period == 'month':
s = Frame.get_portfolio_return_ts(self.weights, self.assets_ror)
else:
s = Rebalance.return_ts(
self.weights, self.assets_ror, period=self.rebalancing_period
)
return s.rename(self.symbol, inplace=True)
@property
def wealth_index(self) -> pd.DataFrame:
"""
Calculate wealth index time series for the portfolio and accumulated inflation.
Wealth index (Cumulative Wealth Index) is a time series that presents the value of portfolio over
historical time period. Accumulated inflation time series is added if `inflation=True` in the Portfolio.
Wealth index is obtained from the accumulated return multiplicated by the initial investments.
That is: 1000 * (Acc_Return + 1)
Initial investments are taken as 1000 units of the Portfolio base currency.
Returns
-------
Time series of wealth index values for portfolio and accumulated inflation.
Examples
--------
>>> x = ok.Portfolio(['SPY.US', 'BND.US'])
>>> x.wealth_index
portfolio USD.INFL
2007-05 1000.000000 1000.000000
2007-06 1004.034950 1008.011590
2007-07 992.940364 1007.709187
2007-08 1006.642941 1005.895310
... ...
2020-12 2561.882476 1260.242835
2021-01 2537.800781 1265.661880
2021-02 2553.408256 1272.623020
2021-03 2595.156481 1281.658643
[167 rows x 2 columns]
"""
df = self._add_inflation()
df = Frame.get_wealth_indexes(df)
df = self._make_df_if_series(df)
return df
def _make_df_if_series(self, ts):
if isinstance(ts, pd.Series): # should always return a DataFrame
ts = ts.to_frame()
ts.rename({1: self.symbol}, axis="columns", inplace=True)
return ts
@property
def wealth_index_with_assets(self) -> pd.DataFrame:
"""
Calculate wealth index time series for the portfolio, all assets and accumulated inflation.
Wealth index (Cumulative Wealth Index) is a time series that presents the value of portfolio over
historical time period. Accumulated inflation time series is added if `inflation=True` in the Portfolio.
Wealth index is obtained from the accumulated return multiplicated by the initial investments.
That is: 1000 * (Acc_Return + 1)
Initial investments are taken as 1000 units of the Portfolio base currency.
Returns
-------
DataFrame
Time series of wealth index values for portfolio, each asset and accumulated inflation.
Examples
--------
>>> pf = ok.Portfolio(['VOO.US', 'GLD.US'], weights=[0.8, 0.2])
>>> pf.wealth_index_with_assets
portfolio VOO.US GLD.US USD.INFL
2010-10 1000.000000 1000.000000 1000.000000 1000.000000
2010-11 1041.065584 1036.658420 1058.676480 1001.600480
2010-12 1103.779375 1108.395183 1084.508186 1003.303201
2011-01 1109.298272 1133.001556 1015.316564 1008.119056
... ... ... ...
2020-12 3381.729677 4043.276231 1394.513920 1192.576493
2021-01 3332.356424 4002.034813 1349.610572 1197.704572
2021-02 3364.480340 4112.891178 1265.124950 1204.291947
2021-03 3480.083884 4301.261594 1250.702526 1212.842420
"""
if hasattr(self, "inflation"):
df = pd.concat(
[self.ror, self.assets_ror, self.inflation_ts],
axis=1,
join="inner",
copy="false",
)
else:
df = pd.concat(
[self.ror, self.assets_ror], axis=1, join="inner", copy="false"
)
return Frame.get_wealth_indexes(df)
@property
def mean_return_monthly(self) -> float:
"""
Calculate monthly mean return (arithmetic mean) for the portfolio rate of return time series.
Mean return calculated for the full history period.
Returns
-------
Float
Mean return value.
Examples
--------
>>> pf = ok.Portfolio(['ISF.LSE', 'XGLE.LSE'], weights=[0.6, 0.4], ccy='GBP')
>>> pf
0.0001803312727272665
"""
return Frame.get_portfolio_mean_return(self.weights, self.assets_ror)
@property
def mean_return_annual(self) -> float:
"""
Calculate annualized mean return (arithmetic mean) for the portfolio rate of return time series.
Mean return calculated for the full history period.
Returns
-------
Float
Mean return value.
Examples
--------
>>> pf = ok.Portfolio(['XCS6.XETR', 'PHAU.LSE'], weights=[0.85, 0.15], ccy='USD')
>>> pf.names
{'XCS6.XETR': 'Xtrackers MSCI China UCITS ETF 1C', 'PHAU.LSE': 'WisdomTree Physical Gold'}
>>> pf.mean_return_annual
0.09005826844072184
"""
return Float.annualize_return(self.mean_return_monthly)
@property
def annual_return_ts(self) -> pd.Series:
"""
Calculate annual rate of return time series for portfolio.
Rate of return is calculated for each calendar year.
Returns
-------
DataFrame
Calendar annual rate of return time series.
Examples
--------
>>> pf = ok.Portfolio(['VOO.US', 'AGG.US'], weights=[0.4, 0.6])
>>> pf.annual_return_ts
Date
2010 0.034299
2011 0.056599
2012 0.086613
2013 0.107111
2014 0.090420
2015 0.010381
2016 0.063620
2017 0.105450
2018 -0.013262
2019 0.174182
2020 0.124668
2021 0.030430
Freq: A-DEC, Name: portfolio_5364.PF, dtype: float64
"""
return Frame.get_annual_return_ts_from_monthly(self.ror)
def get_cagr(self, period: Optional[int] = None, real: bool = False) -> pd.Series:
"""
Calculate portfolio Compound Annual Growth Rate (CAGR) for a given trailing period.
Compound annual growth rate (CAGR) is the rate of return that would be required for an investment to grow from
its initial to its final value, assuming all incomes were reinvested.
Inflation adjusted annualized returns (real CAGR) are shown with `real=True` option.
Annual inflation value is calculated for the same period if inflation=True in the AssetList.
Parameters
----------
period: int, optional
CAGR trailing period in years. None for the full time CAGR.
real: bool, default False
CAGR is adjusted for inflation (real CAGR) if True.
Portfolio should be initiated with Inflation=True for real CAGR.
Returns
-------
Series
Portfolio CAGR value and annualized inflation (optional).
Notes
-----
CAGR is not defined for periods less than 1 year (NaN values are returned).
Examples
--------
>>> pf = ok.Portfolio(['XCS6.XETR', 'PHAU.LSE'], weights=[0.85, 0.15], ccy='USD')
>>> pf.names
{'XCS6.XETR': 'Xtrackers MSCI China UCITS ETF 1C', 'PHAU.LSE': 'WisdomTree Physical Gold'}
To get inflation adjusted return (real annualized return) add `real=True` option:
>>> pf.get_cagr(period=5, real=True)
portfolio_5625.PF 0.121265
dtype: float64
"""
ts = self._add_inflation()
df = self._make_df_if_series(ts)
dt0 = self.last_date
if period is None:
dt = self.first_date
else:
self._validate_period(period)
dt = Date.subtract_years(dt0, period)
cagr = Frame.get_cagr(df[dt:])
if real:
if not hasattr(self, "inflation"):
raise ValueError(
"Real CAGR is not defined. Set inflation=True in Portfolio to calculate it."
)
mean_inflation = Frame.get_cagr(self.inflation_ts[dt:])
cagr = (1. + cagr) / (1. + mean_inflation) - 1.
cagr.drop(self.inflation, inplace=True)
return cagr
def get_rolling_cagr(self, window: int = 12, real: bool = False) -> pd.DataFrame:
"""
Calculate rolling CAGR (Compound Annual Growth Rate) for the portfolio.
Parameters
----------
window : int, default 12
Size of the moving window in months. Window size should be at least 12 months for CAGR.
real: bool, default False
CAGR is adjusted for inflation (real CAGR) if True.
Portfolio should be initiated with Inflation=True for real CAGR.
Returns
-------
DataFrame
Time series of rolling CAGR and mean inflation (optionaly).
Notes
-----
CAGR is not defined for periods less than 1 year (NaN values are returned).
Examples
--------
Get inflation adjusted rolling CAGR (real annualized return) win 5 years window:
>>> x = ok.Portfolio(['DXET.XETR', 'DBXN.XETR'], ccy='EUR', inflation=True)
>>> x.get_rolling_cagr(window=5*12, real=True)
portfolio_7645.PF
2013-09 0.029914
2013-10 0.052435
2013-11 0.055651
2013-12 0.045180
2014-01 0.063153
...
2021-01 0.032734
2021-02 0.037779
2021-03 0.043811
2021-04 0.043729
2021-05 0.042704
"""
df = self._add_inflation()
if real:
df = self._make_real_return_time_series(df)
return Frame.get_rolling_fn(df, window=window, fn=Frame.get_cagr)
def get_cumulative_return(self, period: Union[str, int, None] = None, real: bool = False) -> pd.Series:
"""
Calculate cumulative return over a given trailing period for the portfolio.
The cumulative return is the total change in the portfolio price during the investment period.
Inflation adjusted cumulative returns (real cumulative returns) are shown with `real=True` option.
Annual inflation data is calculated for the same period if `inflation=True` in the AssetList.
Parameters
----------
period: str, int or None, default None
Trailing period in years.
None - full time cumulative return.
'YTD' - (Year To Date) period of time beginning the first day of the calendar year up to the last month.
real: bool, default False
Cumulative return is adjusted for inflation (real cumulative return) if True.
Portfolio should be initiated with `Inflation=True` for real cumulative return.
Returns
-------
Series
Cumulative rate of return values for portfolio and cumulative inflation (if inflation=True in Portfolio).
Examples
--------
>>> pf = ok.Portfolio(['BTC-USD.CC', 'LTC-USD.CC'], weights=[.8, .2], last_date='2021-03')
>>> pf.get_cumulative_return(period=2)
portfolio_6232.PF 9.920432
USD.INFL 0.042121
dtype: float64
To get inflation adjusted return (real annualized return) add `real=True` option:
>>> pf.get_cumulative_return(period=2, real=True)
portfolio_6232.PF 9.39381
dtype: float64
"""
ts = self._add_inflation()
df = self._make_df_if_series(ts)
dt0 = self.last_date
if period is None:
dt = self.first_date
elif str(period).lower() == "ytd":
year = dt0.year
dt = str(year)
else:
self._validate_period(period)
dt = Date.subtract_years(dt0, period)
cr = Frame.get_cumulative_return(df[dt:])
if real:
if not hasattr(self, "inflation"):
raise ValueError(
"Real cumulative return is not defined (no inflation information is available)."
"Set inflation=True in Portfolio to calculate it."
)
cumulative_inflation = Frame.get_cumulative_return(self.inflation_ts[dt:])
cr = (1. + cr) / (1. + cumulative_inflation) - 1.
cr.drop(self.inflation, inplace=True)
return cr
def get_rolling_cumulative_return(self, window: int = 12, real: bool = False) -> pd.DataFrame:
"""
Calculate rolling cumulative return.
The cumulative return is the total change in the portfolio price.
Parameters
----------
window : int, default 12
Size of the moving window in months.
real: bool, default False
Cumulative return is adjusted for inflation (real cumulative return) if True.
Portfolio should be initiated with `Inflation=True` for real cumulative return.
Returns
-------
DataFrame
Time series of rolling cumulative return and inflation (optional).
"""
ts = self._add_inflation()
if real:
ts = self._make_real_return_time_series(ts)
df = self._make_df_if_series(ts)
return Frame.get_rolling_fn(
df,
window=window,
fn=Frame.get_cumulative_return,
window_below_year=True,
)
@property
def assets_close_monthly(self) -> pd.DataFrame:
"""
Show assets monthly close time series adjusted to the base currency.
Returns
-------
DataFrame
Assets monthly close time series adjusted to the base currency.
"""
assets_close_monthly = pd.DataFrame(dtype=float)
for i, x in enumerate(self.asset_obj_dict.values()):
if i == 0: # required to use pd.concat below (df should not be empty).
assets_close_monthly = x.close_monthly if x.currency == self.currency else self._adjust_price_to_currency_monthly(x.close_monthly, x.currency)
assets_close_monthly.rename(x.symbol, inplace=True)
else:
new = x.close_monthly if x.currency == self.currency else self._adjust_price_to_currency_monthly(x.close_monthly, x.currency)
new.rename(x.symbol, inplace=True)
assets_close_monthly = pd.concat([assets_close_monthly, new], axis=1, join="inner", copy="false")
if isinstance(assets_close_monthly, pd.Series):
assets_close_monthly = assets_close_monthly.to_frame()
assets_close_monthly = assets_close_monthly[self.first_date: self.last_date]
return assets_close_monthly
@property
def close_monthly(self) -> pd.Series:
"""
Portfolio size monthly time series.
Portfolio size is shown in base currency units. It is similar to the close value of an asset.
Initial portfolio value is equal to 1000 units of base currency.
Returns
-------
pd.Series
Monthly portfolio size time series.
"""
return self.wealth_index.iloc[:, 0]
@property
def number_of_securities(self) -> pd.DataFrame:
"""
Calculate the number of securities monthly time series for the portfolio assets.
Number of securities is changing over time as the dividends are reinvested.
Portfolio rebalancing also affects the number of securities.
Initial number of securities depends on the portfolio size in base currency (1000 units).
Returns
-------
DataFrame
Number of securities monthly time series for the portfolio assets.
"""
return self.weights_ts.mul(self.wealth_index.iloc[:, 0], axis=0).div(self.assets_close_monthly, axis=0)
@property
def dividends(self) -> pd.Series:
"""
Calculate portfolio dividends monthly time series.
Portfolio dividends are obtained by summing asset dividends adjusted to the base currency.
Dividends size depends on the portfolio value and number of securities.
Returns
-------
Series
Portfolio dividends monthly time series.
"""
s = (self._get_assets_dividends() * self.number_of_securities).sum(axis=1)
s.rename(self.symbol, inplace=True)
return s
@property
def dividend_yield(self) -> pd.Series:
"""
Calculate last twelve months (LTM) dividend yield time series for the portfolio. Time series has monthly values.
Portfolio dividend yield is a weighted sum of the assets dividend yields (adjusted to
the portfolio base currency).
For an asset LTM dividend yield is the sum trailing twelve months of common dividends per share divided by
the current price per share.
Returns
-------
Series
Portfolio LTM dividend yield monthly time series.
Examples
--------
>>> pf = ok.Portfolio(['T.US', 'XOM.US'], weights=[0.8, 0.2], first_date='2010-01', last_date='2021-01', ccy='USD')
>>> pf.dividend_yield
2010-01 0.013249
2010-02 0.014835
2010-03 0.014257
...
2020-11 0.076132
2020-12 0.074743
2021-01 0.073643
Freq: M, Name: portfolio_8836.PF, Length: 133, dtype: float64
"""
df = self.assets_dividend_yield @ self.weights_ts.T
div_yield_series = pd.Series(np.diag(df), index=df.index)
div_yield_series.rename(self.symbol, inplace=True)
return div_yield_series
@property
def real_mean_return(self) -> float:
"""
Calculate annualized real mean return (arithmetic mean) for the rate of return time series.
Real rate of return is adjusted for inflation. Real return is defined if
there is an `inflation=True` option in Portfolio.
Returns
-------
float
Annualized value of the mean for the real rate of return time series.
Examples
--------
>>> pf = ok.Portfolio(['MSFT.US', 'AAPL.US'])
>>> pf.real_mean_return
0.3088967455111862
"""
if not hasattr(self, "inflation"):
raise ValueError(
"Real Return is not defined. Set inflation=True to calculate."
)
infl_mean = Float.annualize_return(self.inflation_ts.mean())
ror_mean = Float.annualize_return(self.ror.mean())
return (1.0 + ror_mean) / (1.0 + infl_mean) - 1.0
@property
def risk_monthly(self) -> float:
"""
Calculate monthly risk (standard deviation of return) for Portfolio.
Monthly risk of portfolio is a standard deviation of the rate of return time series.
Standard deviation (sigma σ) is normalized by N-1.
Returns
-------
float
Standard deviation value of the monthly return time series.
See Also
--------
risk_annual : Calculate annualized risks.
semideviation_monthly : Calculate semideviation monthly values.
semideviation_annual : Calculate semideviation annualized values.
get_var_historic : Calculate historic Value at Risk (VaR).
get_cvar_historic : Calculate historic Conditional Value at Risk (CVaR).
drawdowns : Calculate drawdowns.
Examples
--------
>>> pf = ok.Portfolio(['MSFT.US', 'AAPL.US'])
>>> pf.risk_monthly
0.09415483565833212
"""
return self.ror.std()
@property
def risk_annual(self) -> float:
"""
Calculate annualized risk (return standard deviation) for portfolio.
Returns
-------
float
Annualized standard deviation value of the monthly return time series.
Examples
--------
>>> pf = ok.Portfolio(['MSFT.US', 'AAPL.US'])
>>> pf.risk_annual
0.4374591902169046
"""
return Float.annualize_risk(self.risk_monthly, self.mean_return_monthly)
@property
def semideviation_monthly(self) -> float:
"""
Calculate semi-deviation monthly value for portfolio rate of return time series.
Semi-deviation (Downside risk) is the risk of the return being below the expected return.
Returns
-------
float
Semi-deviation monthly value for portfolio rate of return time series.
Examples
--------
>>> pf = ok.Portfolio(['MSFT.US', 'AAPL.US'])
>>> pf.semideviation_monthly
0.05601433676604449
"""
return Frame.get_semideviation(self.ror)
@property
def semideviation_annual(self) -> float:
"""
Return semideviation annualized value for portfolio rate of return time series.
Semi-deviation (Downside risk) is the risk of the return being below the expected return.
Returns
-------
float
Annualized semi-deviation monthly value for portfolio rate of return time series.
Examples
--------
>>> pf = ok.Portfolio(['MSFT.US', 'AAPL.US'])
>>> pf.semideviation_annual
0.1940393544621248
"""
return Frame.get_semideviation(self.ror) * 12 ** 0.5
def get_var_historic(self, time_frame: int = 12, level=1) -> float:
"""
Calculate historic Value at Risk (VaR) for the portfolio.
The VaR calculates the potential loss of an investment with a given time frame and confidence level.
Loss is a positive number (expressed in cumulative return).
If VaR is negative there are expected gains at this confidence level.
Parameters
----------
time_frame : int, default 12
Time frame for VAR. Default is 12 months.
level : int, default 1
Confidence level in percents. Default value is 1%.
Returns
-------
Float
Historic Value at Risk (VaR) value for the portfolio.
Examples
--------
>>> x = ok.Portfolio(['SP500TR.INDX', 'SP500BDT.INDX'], last_date='2021-01')
>>> x.get_var_historic(time_frame=12, level=1)
0.24030006476701732
"""
# remove inflation column from rolling return
df = self.get_rolling_cumulative_return(window=time_frame).loc[:, [self.symbol]]
return Frame.get_var_historic(df, level).iloc[0]
def get_cvar_historic(self, time_frame: int = 12, level=1) -> float:
"""
Calculate historic Conditional Value at Risk (CVAR, expected shortfall) for the portfolio.
CVaR is the average loss over a specified time period of unlikely scenarios beyond the confidence level.
Loss is a positive number (expressed in cumulative return).
If CVaR is negative there are expected gains at this confidence level.
Parameters
----------
time_frame : int, default 12 (12 months)
level : int, default 1 (1% quantile)
Returns
-------
Float
Historic Conditional Value at Risk (CVAR, expected shortfall) value for the portfolio.
Examples
--------
>>> x = ok.Portfolio(['USDEUR.FX', 'BTC-USD.CC'], last_date='2021-01')
>>> x.get_cvar_historic(time_frame=2, level=1)
0.3566909250442616
"""
# remove inflation column form rolling return
df = self.get_rolling_cumulative_return(window=time_frame).loc[:, [self.symbol]]
return Frame.get_cvar_historic(df, level).iloc[0]
@property
def drawdowns(self) -> pd.Series:
"""
Calculate drawdowns time series for the portfolio.
The drawdown is the percent decline from a previous peak in wealth index.
Returns
-------
Series
Drawdowns time series for the portfolio
"""
return Frame.get_drawdowns(self.ror)
@property
def recovery_period(self) -> int:
"""
Calculate the longest recovery period for the portfolio assets value.
The recovery period (drawdown duration) is the number of months to reach the value of the last maximum.
Returns
-------
Integer
Max recovery period for the protfolio assets value in months.
Notes
-----
If the last maximum value is not recovered NaN is returned.
The largest recovery period does not necessary correspond to the max drawdown.
Examples
--------
>>> pf = ok.Portfolio(['SPY.US', 'AGG.US'], weights=[0.5, 0.5])
>>> pf.recovery_period
35
See Also
--------
drawdowns : Calculate drawdowns time series.
"""
if hasattr(self, "inflation"):
w_index = self.wealth_index.drop(columns=[self.inflation])
else:
w_index = self.wealth_index
if isinstance(w_index, pd.DataFrame):
# time series should be a Series to use groupby
w_index = w_index.squeeze()
cummax = w_index.cummax()
s = cummax.pct_change()[1:]
s1 = s.where(s == 0).notnull().astype(int)
s1_1 = s.where(s == 0).isnull().astype(int).cumsum()
s2 = s1.groupby(s1_1).cumsum()
# Max recovery period date should not be in the border (means it's not recovered)
max_period = s2.max() if s2.idxmax().to_timestamp() != self.last_date else np.NAN
return max_period
def describe(self, years: Tuple[int] = (1, 5, 10)) -> pd.DataFrame:
"""
Generate descriptive statistics for the portfolio.
Statistics includes:
- YTD (Year To date) compound return
- CAGR for a given list of periods
- LTM Dividend yield - last twelve months dividend yield
Risk metrics (full available period):
- risk (standard deviation)
- CVAR
- max drawdowns (and dates)
Parameters
----------
years : tuple of (int,), default (1, 5, 10)
List of periods for CAGR.
Returns
-------
DataFrame
Table of descriptive statistics for the portfolio.
See Also
--------
get_cumulative_return : Calculate cumulative return.
get_cagr : Calculate assets Compound Annual Growth Rate (CAGR).
dividend_yield : Calculate dividend yield (LTM).
risk_annual : Return annualized risks (standard deviation).
get_cvar : Calculate historic Conditional Value at Risk (CVAR, expected shortfall).
drawdowns : Calculate drawdowns.
"""
description = pd.DataFrame()
dt0 = self.last_date
df = self._add_inflation()
# YTD return
ytd_return = self.get_cumulative_return(period="YTD")
row = ytd_return.to_dict()
row.update(period="YTD", property="compound return")
description = description.append(row, ignore_index=True)
# CAGR for a list of periods
if self.pl.years >= 1:
for i in years:
dt = Date.subtract_years(dt0, i)
if dt >= self.first_date:
row = self.get_cagr(period=i).to_dict()
else:
row = (
{x: None for x in df.columns}
if hasattr(self, "inflation")
else {self.symbol: None}
)
row.update(period=f"{i} years", property="CAGR")
description = description.append(row, ignore_index=True)
# CAGR for full period
row = self.get_cagr(period=None).to_dict()
row.update(period=self._pl_txt, property="CAGR",)
description = description.append(row, ignore_index=True)
# Dividend Yield
value = self.dividend_yield.iloc[-1]
row = {self.symbol: value}
row.update(period="LTM", property=f"Dividend yield",)
description = description.append(row, ignore_index=True)
# risk (standard deviation)
row = {self.symbol: self.risk_annual}
row.update(
period=self._pl_txt, property="Risk"
)
description = description.append(row, ignore_index=True)
# CVAR
if self.pl.years >= 1:
row = {self.symbol: self.get_cvar_historic()}
row.update(
period=self._pl_txt,
property="CVAR",
)
description = description.append(row, ignore_index=True)
# max drawdowns
row = {self.symbol: self.drawdowns.min()}
row.update(
period=self._pl_txt,
property="Max drawdown",
)
description = description.append(row, ignore_index=True)
# max drawdowns dates
row = {self.symbol: self.drawdowns.idxmin()}
row.update(
period=self._pl_txt,
property="Max drawdown date",
)
description = description.append(row, ignore_index=True)
if hasattr(self, "inflation"):
description.rename(columns={self.inflation: "inflation"}, inplace=True)
description = Frame.change_columns_order(
description, ["property", "period", self.symbol]
)
return description
@property
def table(self) -> pd.DataFrame:
"""
Return security name - ticker - weight table.
Returns
-------
DataFrame
Security name - ticker - weight table.
Examples
--------
>>> pf = ok.Portfolio(['MSFT.US', 'AAPL.US'])
>>> pf.table
asset name ticker weights
0 Microsoft Corporation MSFT.US 0.5
1 Apple Inc AAPL.US 0.5
"""
x = pd.DataFrame(
data={
"asset name": list(self.names.values()),
"ticker": list(self.names.keys()),
}
)
x["weights"] = self.weights
return x
# Forecasting
def _test_forecast_period(self, years):
max_period_years = round(self.period_length / 2)
if max_period_years < 1:
raise ValueError(
f"Time series does not have enough history to forecast. "
f"Period length is {self.period_length:.2f} years. At least 2 years are required."
)
if not isinstance(years, int) or years == 0:
raise ValueError("years must be an integer number (not equal to zero).")
if years > max_period_years:
raise ValueError(
f"Forecast period {years} years is not credible. "
f"It should not exceed 1/2 of portfolio history period length {self.period_length / 2} years"
)
def percentile_inverse(
self,
distr: str = "norm",
years: int = 1,
score: float = 0,
n: Optional[int] = None,
) -> float:
"""
Compute the percentile rank of a score (CAGR value) in a given time frame.
If percentile_inverse of, for example, 0% (CAGR value) is equal to 8% for 1 year time frame
it means that 8% of the CAGR values in the distribution are negative in 1 year periods. Or in other words
the probability of getting negative result after 1 year of investments is 8%.
Args:
distr: norm, lognorm, hist - distribution type (normal or lognormal) or hist for CAGR array from history
years: period length when CAGR is calculated
score: score that is compared to the elements in CAGR array.
n: number of random time series (for 'norm' or 'lognorm' only)
Returns:
Percentile-position of score (0-100) relative to distr.
"""
if distr == "hist":
cagr_distr = self.get_rolling_cagr(years)
elif distr in ["norm", "lognorm"]:
if not n:
n = 1000
cagr_distr = self._get_monte_carlo_cagr_distribution(
distr=distr, years=years, n=n
)
else:
raise ValueError('distr should be one of "norm", "lognorm", "hist".')
return scipy.stats.percentileofscore(cagr_distr, score, kind="rank")
def percentile_from_history(
self, years: int, percentiles: List[int] = [10, 50, 90]
) -> pd.DataFrame:
"""
Calculate given percentiles for portfolio CAGR (annualized rolling returns) distribution from the historical data.
Each percentile is calculated for a period range from 1 year to 'years'.
years - max window size for rolling CAGR (limited with half history of period length).
percentiles - list of percentiles to be calculated
"""
self._test_forecast_period(years)
period_range = range(1, years + 1)
returns_dict = {}
for percentile in percentiles:
percentile_returns_list = [
self.get_rolling_cagr(years * 12).loc[:, self.symbol].quantile(percentile / 100)
for years in period_range
]
returns_dict.update({percentile: percentile_returns_list})
df = pd.DataFrame(returns_dict, index=list(period_range))
df.index.rename("years", inplace=True)
return df
def forecast_wealth_history(
self, years: int = 1, percentiles: List[int] = [10, 50, 90]
) -> pd.DataFrame:
"""
Compute accumulated wealth for each CAGR derived by 'percentile_from_history' method.
CAGRs are taken from the historical data.
Initial portfolio wealth is adjusted to the last known historical value (from wealth_index). It is useful
for a chart with historical wealth index and forecasted values.
Args:
years:
percentiles:
Returns:
Dataframe of percentiles for period range from 1 to 'years'
"""
first_value = self.wealth_index[self.symbol].values[-1]
percentile_returns = self.percentile_from_history(
years=years, percentiles=percentiles
)
return first_value * (percentile_returns + 1.0).pow(
percentile_returns.index.values, axis=0
)
def _forecast_preparation(self, years: int):
self._test_forecast_period(years)
period_months = years * _MONTHS_PER_YEAR
# make periods index where the shape is max_period
start_period = self.last_date.to_period("M")
end_period = self.last_date.to_period("M") + period_months - 1
ts_index = pd.period_range(start_period, end_period, freq="M")
return period_months, ts_index
def forecast_monte_carlo_returns(
self, distr: str = "norm", years: int = 1, n: int = 100
) -> pd.DataFrame:
"""
Generates N random monthly returns time series with normal or lognormal distributions.
Forecast period should not exceed 1/2 of portfolio history period length.
"""
period_months, ts_index = self._forecast_preparation(years)
# random returns
if distr == "norm":
random_returns = np.random.normal(
self.mean_return_monthly, self.risk_monthly, (period_months, n)
)
elif distr == "lognorm":
std, loc, scale = scipy.stats.lognorm.fit(self.ror)
random_returns = scipy.stats.lognorm(std, loc=loc, scale=scale).rvs(
size=[period_months, n]
)
else:
raise ValueError('distr should be "norm" (default) or "lognorm".')
return | pd.DataFrame(data=random_returns, index=ts_index) | pandas.DataFrame |
import time
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from performance_anomaly_detection.training import utils
def prepare_data_for_training_last_n_fold(X_train, X_test, y_train, y_test, original_y, original_y_test,
exog_train=None, exog_test=None):
if exog_train is not None and exog_test is not None:
return (X_train, X_test), (y_train, y_test), (exog_train, exog_test), (original_y, original_y_test)
return (X_train, X_test), (y_train, y_test), (original_y, original_y_test)
def train_scikit(data,
columns,
model,
scalers,
test_steps=1,
save_results=False,
results_output="out/",
label_data=None):
start = time.time()
global_scores, test_scores, train_rsq, test_rsq, predictions, real_ys = [], [], [], [], [], []
scaler = None
for ((X_train, X_test), (y_train, y_test), (original_y, original_y_test)), col, i in zip(data, columns,
range(0, len(columns))):
if scalers is not None:
scaler = scalers[i]
print("Training for ", col)
step = int(len(X_test) / test_steps)
y_hat_test = []
for s in range(test_steps):
X_train_s = get_next_step_data(X_train, s, step)
y_train_s = get_next_step_data(y_train, s, step)
X_test_s = X_test[s * step: (s + 1) * step] if s < (test_steps - 1) else X_test[s * step:]
model.fit(X_train_s, y_train_s)
end = time.time()
print("Execution time ", end - start)
if s == 0:
y_hat_train = model.predict(X_train_s)
test_predictions = model.predict(X_test_s)
y_hat_test.append(np.array(test_predictions).reshape(len(test_predictions), 1))
y_hat_test = np.concatenate(y_hat_test)
y_hat = np.concatenate((np.array(y_hat_train).reshape(len(y_hat_train), 1), y_hat_test))
if scaler is not None:
y_hat = scaler.inverse_transform(y_hat)
y_hat_test = scaler.inverse_transform(y_hat_test)
global_scores.append(mean_squared_error(original_y, y_hat))
test_scores.append(mean_squared_error(original_y_test, y_hat_test))
train_rsq.append(r2_score(original_y, y_hat))
test_rsq.append(r2_score(original_y_test, y_hat_test))
if save_results:
predictions.append(pd.Series(y_hat, name=col))
real_ys.append(pd.Series(original_y, name=col))
#utils.plot_results(y=original_y, y_hat=y_hat)
rmse_scores = utils.calculate_rmse(global_scores, test_scores)
r2_scores = utils.calculate_r2(train_rsq, test_rsq)
print(rmse_scores)
print(r2_scores)
if save_results:
utils.save_results(predictions=predictions, real_ys=real_ys, results_output=results_output,
label_data=label_data)
return rmse_scores, r2_scores
def train_nn(data,
columns,
callbacks,
dev_size,
scalers,
optimizer,
batch_size,
model,
epochs=5,
test_steps=1,
verbose=False,
loss="mean_squared_error",
save_results=False,
results_output="out/",
use_exog=True,
label_data=None):
start = time.time()
scaler = None
global_scores, test_scores, train_rsq, test_rsq, predictions, real_ys = [], [], [], [], [], []
model.compile(loss=loss, optimizer=optimizer)
for values_to_unpack, col, i in zip(data, columns, range(0, len(columns))):
if scalers is not None:
scaler = scalers[i]
if use_exog:
(X_train, X_test), (y_train, y_test), (exog_train, exog_test), (
original_y, original_y_test) = values_to_unpack
else:
(X_train, X_test), (y_train, y_test), (original_y, original_y_test) = values_to_unpack
print("Training for ", col)
step = int(len(X_test) / test_steps)
y_hat_test = []
for s in range(test_steps):
X_train_s = get_next_step_data(X_train, s, step)
y_train_s = get_next_step_data(y_train, s, step)
X_test_s = X_test[s * step: (s + 1) * step] if s < (test_steps - 1) else X_test[s * step:]
if use_exog:
exog_train_s = get_next_step_data(exog_train, s, step)
exog_test_s = exog_test[s * step: (s + 1) * step] if s < (test_steps - 1) else exog_test[s * step:]
train_inputs = [X_train_s, exog_train_s] if use_exog else [X_train_s]
test_inputs = [X_test_s, exog_test_s] if use_exog else [X_test_s]
history = model.fit(train_inputs, y_train_s, validation_split=dev_size, epochs=epochs,
batch_size=batch_size, verbose=verbose, callbacks=callbacks)
end = time.time()
print("Execution time ", end - start)
if s == 0: y_hat_train = model.predict(train_inputs)
test_predictions = model.predict(test_inputs)
y_hat_test.append(np.array(test_predictions).reshape(len(test_predictions), 1))
y_hat_test = np.concatenate(y_hat_test)
y_hat = np.concatenate((np.array(y_hat_train).reshape(len(y_hat_train), 1), y_hat_test), axis=0)
if scaler is not None: y_hat = scaler.inverse_transform(y_hat)
if scaler is not None: y_hat_test = scaler.inverse_transform(y_hat_test)
global_scores.append(mean_squared_error(original_y, y_hat))
test_scores.append(mean_squared_error(original_y_test, y_hat_test))
train_rsq.append(r2_score(original_y, y_hat))
test_rsq.append(r2_score(original_y_test, y_hat_test))
if save_results:
predictions.append(pd.Series(y_hat, name=col))
real_ys.append( | pd.Series(original_y, name=col) | pandas.Series |
# coding: utf-8
# # Digit Recognition
#
# ## 1. Introduction
#
# In this analysis, the handwritten digits are identified using support vector machines and radial basis functions.
#
# ### 1.1 Libraries
#
# The essential libraries used here are numpy, matplotlib, and scikit-learn. For convenience, pandas and IPython.display are used for displaying tables, and tqdm is used for progress bars.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from itertools import product
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score, cross_val_predict, ShuffleSplit, KFold
from tqdm import tqdm
from IPython.display import display, Math, Latex, HTML
get_ipython().magic('matplotlib inline')
np.set_printoptions(precision=4,threshold=200)
tqdm_bar_fmt='{percentage:3.0f}%|{bar}|'
# ### 1.2 Dataset
#
# The US Postal Service Zip Code dataset is used, which contains handwritten digits zero to nine. The data has been preprocessed, whereby features of intensity and symmetry are extracted.
# In[2]:
def download_data():
train_url = "http://www.amlbook.com/data/zip/features.train"
test_url = "http://www.amlbook.com/data/zip/features.test"
column_names = ['digit','intensity','symmetry']
train = pd.read_table(train_url,names=column_names,header=None,delim_whitespace=True)
test = pd.read_table(test_url,names=column_names,header=None,delim_whitespace=True)
train.digit = train.digit.astype(int)
test.digit = test.digit.astype(int)
return train,test
def process_data(train,test):
X_train = train.iloc[:,1:].values
y_train = train.iloc[:,0].values
X_test = test.iloc[:,1:].values
y_test = test.iloc[:,0].values
return X_train,y_train,X_test,y_test
# In[3]:
train,test = download_data()
X_train,y_train,X_test,y_test = process_data(train,test)
# ## 2. Support Vector Machines for Digit Recognition
# ### 2.1 Polynomial Kernels
#
# We wish to implement the following polynomial kernel for our support vector machine:
#
# $$K\left(\mathbf{x_n,x_m}\right) = \left(1+\mathbf{x_n^Tx_m}\right)^Q$$
#
# This is implemented in scikit-learn in the subroutine [sklearn.svm.SVC](http://scikit-learn.org/stable/modules/svm.html), where the kernel function takes the form:
#
# $$\left(\gamma \langle x,x' \rangle + r\right)^d$$
#
# where $d$ is specified by the keyword `degree`, and $r$ by `coef0`.
# ### 2.1.1 One vs Rest Classification
# In the following subroutine, the data is split into "one-vs-rest", where $y=1$ corresponds to a match to the digit, and $y=0$ corresponds to all the other digits. The training step is implemented in the call to `clf.fit()`.
# In[4]:
def get_misclassification_ovr(X_train,y_train,X_test,y_test,digit,
Q=2,r=1.0,C=0.01,kernel='poly',verbose=False):
clf = SVC(C=C, kernel=kernel, degree=Q, coef0 = r, gamma = 1.0,
decision_function_shape='ovr', verbose=False)
y_in = (y_train==digit).astype(int)
y_out = (y_test==digit).astype(int)
model = clf.fit(X_train,y_in) # print(model)
E_in = np.mean(y_in != clf.predict(X_train))
E_out = np.mean(y_out != clf.predict(X_test))
n_support_vectors = len(clf.support_vectors_)
if verbose is True:
print()
print("Q = {}, C = {}: Support vectors: {}".format(Q, C, n_support_vectors))
print("{} vs all: E_in = {}".format(digit,E_in))
print("{} vs all: E_out = {}".format(digit,E_out))
return E_in,E_out,n_support_vectors
# The following code trains on the data for the cases: 0 vs all, 1 vs all, ..., 9 vs all. For each of the digits, 0 to 9, the errors $E_{in}, E_{out}$ and the number of support vectors are recorded and stored in a pandas dataframe.
# In[5]:
results = pd.DataFrame()
i=0
for digit in tqdm(range(10),bar_format=tqdm_bar_fmt):
ei, eo, n = get_misclassification_ovr(X_train,y_train,X_test,y_test,digit)
df = pd.DataFrame({'digit': digit, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i])
results = results.append(df)
i += 1
# In[6]:
display(HTML(results[['digit','E_in','E_out','n']].iloc[::2].to_html(index=False)))
# In[7]:
display(HTML(results[['digit','E_in','E_out','n']].iloc[1::2].to_html(index=False)))
# In[8]:
from tabulate import tabulate
print(tabulate(results, headers='keys', tablefmt='simple'))
# ### 2.1.2 One vs One Classification
#
# One vs one classification makes better use of the data, but is more computatationally expensive. The following subroutine splits the data so that $y=0$ for the first digit, and $y=1$ for the second digit. The rows of data corresponding to all other digits are removed.
# In[9]:
def get_misclassification_ovo(X_train,y_train,X_test,y_test,digit1,digit2,
Q=2,r=1.0,C=0.01,kernel='poly'):
clf = SVC(C=C, kernel=kernel, degree=Q, coef0 = r, gamma = 1.0,
decision_function_shape='ovo', verbose=False)
select_in = np.logical_or(y_train==digit1,y_train==digit2)
y_in = (y_train[select_in]==digit1).astype(int)
X_in = X_train[select_in]
select_out = np.logical_or(y_test==digit1,y_test==digit2)
y_out = (y_test[select_out]==digit1).astype(int)
X_out = X_test[select_out]
model = clf.fit(X_in,y_in)
E_in = np.mean(y_in != clf.predict(X_in))
E_out = np.mean(y_out != clf.predict(X_out))
n_support_vectors = len(clf.support_vectors_)
return E_in,E_out,n_support_vectors
# In the following code, a 1-vs-5 classifier is tested for $Q=2,5$ and $C=0.001,0.01,0.1,1$.
# In[10]:
C_arr = [0.0001, 0.001, 0.01, 1]
Q_arr = [2, 5]
CQ_arr = list(product(C_arr,Q_arr))
results = pd.DataFrame()
i=0
for C, Q in tqdm(CQ_arr,bar_format=tqdm_bar_fmt):
ei, eo, n = get_misclassification_ovo(X_train,y_train,X_test,y_test,
digit1=1,digit2=5,Q=Q,r=1.0,C=C)
df = | pd.DataFrame({'C': C, 'Q': Q, 'E_in': ei, 'E_out': eo, 'n': n}, index=[i]) | pandas.DataFrame |
# coding: utf-8
import pymysql
import numpy as np
import pandas as pd
import csv
import xgboost as xgb
from numpy import loadtxt
from xgboost import XGBClassifier
from xgboost import plot_importance
from xgboost import plot_tree
# 필요한 다른 python 파일
import feature
###################### DB connect
db = pymysql.connect(host="", port=3306, user="", passwd="",db="")
### train_set - 뼈대
def make_train_set():
SQL = "SELECT order_id, user_id, order_dow, order_hour_of_day FROM orders"
orders_df = pd.read_sql(SQL, db)
SQL = "SELECT order_id FROM order_products__train"
train_df = pd.read_sql(SQL, db)
print("make train set - basic start")
# ------------------ train id에 맞는 유저를 찾은 뒤 그 유저가 최근에 샀던 상품 확인
# order_id 중복 제거 >> 갯수 세는 것 같지만 중복 제거
train_df= train_df.groupby("order_id").aggregate("count").reset_index()
# order_id에 맞는 user_id를 찾아서 merge
train_df = pd.merge(train_df, orders_df, how="inner", on="order_id")
# prior과 merge
# 유저와 order_id 에 맞는 상품 목록
train_df = pd.merge(train_df, feature.latest_order(), how="inner", on="user_id")
# product table에서 id, 소분류, 대분류만 가져와서 merge
# products_df = pd.read_csv( "products.csv", usecols=["product_id", "aisle_id", "department_id"])
SQL = "SELECT product_id, aisle_id, department_id FROM products"
products_df = pd.read_sql(SQL, db)
train_df = pd.merge(train_df, products_df, how="inner", on="product_id")
del products_df, orders_df, SQL
print("make train set - basic finish")
return train_df
'''
새로 만든 feature를 붙이는 부분
만들어진 것은 많지만 제일 정확성이 높은 것만 활용
'''
def train_result():
train_x = make_train_set()
train_x = pd.merge(train_x, feature.order_ratio_bychance(), how="left", on = ["user_id, product_id"])
return train_x
### train answer : train_y
def make_answer(train_x):
SQL = "SELECT order_id, user_id FROM orders"
orders_df = pd.read_sql(SQL, db)
SQL = "SELECT order_id, product_id, reordered FROM order_products__train"
train_df = pd.read_sql(SQL, db)
print ("train_y start")
answer = pd.merge(train_df, orders_df, how="inner", on="order_id")
del orders_df, train_df
#order_id 제거
answer = answer[["user_id", "product_id", "reordered"]]
# train과 그 외 정보를 merge >>>> train_result() 를 train_x로 파라미터 받아올까?
train_df = pd.merge(train_x, answer, how="left", on=["user_id", "product_id"])
del answer
# reordered 값이 nan 인것들은 0으로 변경
train_df["reordered"].fillna(0, inplace=True)
train_y = train_df.reordered.values
print("train_y finish")
return train_y
### TEST BASIC - test 뼈대
def make_test_set():
SQL = "SELECT order_id FROM submission"
test_df = | pd.read_sql(SQL, db) | pandas.read_sql |
#!/usr/bin/env python
import numpy as np
import os
import pandas as pd
import sys
from glob import glob
from natsort import natsorted
from scipy.signal import savgol_filter
from scipy.special import gamma
def tripower_volatility(x):
"""
Realized tripower volatility (e.g. Barndorff-Nielsen, Shephard, and Winkel (2006))
"""
x = pd.Series(x)
xi = 0.5 * (gamma(5 / 6) / gamma(1 / 2)) ** -3
z = (x.abs() ** (2 / 3) * x.shift(1).abs() ** (2 / 3) * x.shift(-1).abs() ** (2 / 3)).bfill().ffill()
return xi * z.sum()
def shortest_half(x):
"""
Shortest-half scale estimator (Rousseeuw and Leroy, 1998)
"""
xs = np.sort(x)
l = x.size
h = int(np.floor(l / 2) + 1)
if l % 2 == 0:
sh = 0.7413 * np.min(xs[h - 1:] - xs[:h - 1])
else:
sh = 0.7413 * np.min(xs[h - 1:] - xs[:h])
return sh
def time_to_ssm(x):
"""
Transforms a datetime index into the numerical date (YYYMMDD) and the seconds since midnight.
"""
x = pd.DataFrame(x)
date = x.index.map(lambda d: d.year * 10000 + d.month * 100 + d.day).values
ssm = x.index.map(lambda t: t.hour * 3600 + t.minute * 60 + t.second + t.microsecond / 1e6).values
x.insert(0, "Date", date)
x.insert(1, "SSM", ssm)
x.reset_index(drop=True)
return x
def resample_prices(intensity, data, n_trades):
T = intensity.size # Trading seconds per day
eps = 0.000001 if T == 86400 else 0 # Ensure that the days do not overlap
if intensity.isnull().any():
intensity.interpolate(method="pchip", inplace=True, limit_direction="both", limit=T)
intensity[intensity < 0] = 0 # interpolated values could be negative
Q = intensity.cumsum() / intensity.sum() * T
Q_inv = pd.Series(np.concatenate((np.array([0]),
np.interp(np.arange(1, T), xp=Q.values, fp=Q.index),
np.array([T-eps]))), index=range(T+1))
idx = data.index[0] + pd.to_timedelta(Q_inv, unit="s")
reindexed_data = data.reindex(idx, method="ffill")
resampled_data = reindexed_data.iloc[np.linspace(0, reindexed_data.size - 1, num=n_trades).round()]
return Q, resampled_data
def process_data(asset, avg_dur, path):
# region Set some variables
T = 86400 if asset in ["EURGBP", "EURUSD"] else 23400
n_trades = int(np.ceil(T / avg_dur)) + 1
file_list = natsorted(glob(path + asset + "/" + "h5" + "/**"))
dt = pd.to_datetime([os.path.basename(f).replace(".h5", "") for f in file_list])
total_trades_per_second = pd.Series(0, index=range(1, T + 1))
# Empty pd.Series for the resampled prices
cts = pd.Series()
tts = | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
"""
This module is EXPERIMENTAL, that means that tests are missing.
The reason is that the coastdat2 dataset is deprecated and will be replaced by
the OpenFred dataset from Helmholtz-Zentrum Geesthacht. It should work though.
This module is designed for the use with the coastdat2 weather data set
of the Helmholtz-Zentrum Geesthacht.
A description of the coastdat2 data set can be found here:
https://www.earth-syst-sci-data.net/6/147/2014/
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
# Python libraries
import os
import datetime
import logging
from collections import namedtuple
import calendar
# External libraries
import requests
import pandas as pd
import pvlib
from shapely.geometry import Point
from windpowerlib.wind_turbine import WindTurbine
# Internal modules
from reegis import tools
from reegis import feedin
from reegis import config as cfg
from reegis import powerplants as powerplants
from reegis import geometries
from reegis import bmwi
def download_coastdat_data(filename=None, year=None, url=None,
test_only=False, overwrite=True):
"""
Download coastdat data set from internet source.
Parameters
----------
filename : str
Full path with the filename, where the downloaded file will be stored.
year : int or None
Year of the weather data set. If a url is passed this value will be
ignored because it is used to create the default url.
url : str or None
Own url can be used if the default url does not work an one found an
alternative valid url.
test_only : bool
If True the the url is tested but the file will not be downloaded
(default: False).
overwrite : bool
If True the file will be downloaded even if it already exist.
(default: True)
Returns
-------
str or None : If the url is valid the filename is returned otherwise None.
Examples
--------
>>> download_coastdat_data(year=2014, test_only=True)
'coastDat2_de_2014.h5'
>>> print(download_coastdat_data(url='https://osf.io/url', test_only=True))
None
>>> download_coastdat_data(filename='w14.hd5', year=2014) # doctest: +SKIP
"""
if url is None:
url_ids = cfg.get_dict("coastdat_url_id")
url_id = url_ids.get(str(year), None)
if url_id is not None:
url = cfg.get("coastdat", "basic_url").format(url_id=url_id)
if url is not None and not test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
msg = "Downloading the coastdat2 file of {0} from {1} ..."
logging.info(msg.format(year, url))
if filename is None:
headers = response.headers["Content-Disposition"]
filename = (
headers.split("; ")[1].split("=")[1].replace('"', "")
)
tools.download_file(filename, url, overwrite=overwrite)
return filename
else:
raise ValueError("URL not valid: {0}".format(url))
elif url is not None and test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
headers = response.headers["Content-Disposition"]
filename = headers.split("; ")[1].split("=")[1].replace('"', "")
else:
filename = None
return filename
else:
raise ValueError("No URL found for {0}".format(year))
def fetch_id_by_coordinates(latitude, longitude):
"""
Get nearest weather data set to a given location.
Parameters
----------
latitude : float
longitude : float
Returns
-------
int : coastdat id
Examples
--------
>>> fetch_id_by_coordinates(53.655119, 11.181475)
1132101
"""
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
location = Point(longitude, latitude)
cid = coastdat_polygons[coastdat_polygons.contains(location)].index
if len(cid) == 0:
msg = "No id found for latitude {0} and longitude {1}."
logging.warning(msg.format(latitude, longitude))
return None
elif len(cid) == 1:
return cid[0]
def fetch_data_coordinates_by_id(coastdat_id):
"""
Returns the coordinates of the weather data set.
Parameters
----------
coastdat_id : int or str
ID of the coastdat weather data set
Returns
-------
namedtuple : Fields are latitude and longitude
Examples
--------
>>> location=fetch_data_coordinates_by_id(1132101)
>>> round(location.latitude, 3)
53.692
>>> round(location.longitude, 3)
11.351
"""
coord = namedtuple("weather_location", "latitude, longitude")
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
c = coastdat_polygons.loc[int(coastdat_id)].geometry.centroid
return coord(latitude=c.y, longitude=c.x)
def fetch_coastdat_weather(year, coastdat_id):
"""
Fetch weather one coastdat weather data set.
Parameters
----------
year : int
Year of the weather data set
coastdat_id : numeric
ID of the coastdat data set.
Returns
-------
pd.DataFrame : Weather data set.
Examples
--------
>>> coastdat_id=fetch_id_by_coordinates(53.655119, 11.181475)
>>> fetch_coastdat_weather(2014, coastdat_id)['v_wind'].mean().round(2)
4.39
"""
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(filename=weather_file_name, year=year)
key = "/A{0}".format(int(coastdat_id))
return pd.DataFrame(pd.read_hdf(weather_file_name, key))
def adapt_coastdat_weather_to_pvlib(weather, loc):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
loc : pvlib.location.Location
The coordinates of the weather data point.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> c=fetch_data_coordinates_by_id(cd_id)
>>> location=pvlib.location.Location(**getattr(c, '_asdict')())
>>> pv_weather=adapt_coastdat_weather_to_pvlib(cd_weather, location)
>>> 'ghi' in cd_weather.columns
False
>>> 'ghi' in pv_weather.columns
True
"""
w = pd.DataFrame(weather.copy())
w["temp_air"] = w.temp_air - 273.15
w["ghi"] = w.dirhi + w.dhi
clearskydni = loc.get_clearsky(w.index).dni
w["dni"] = pvlib.irradiance.dni(
w["ghi"],
w["dhi"],
pvlib.solarposition.get_solarposition(
w.index, loc.latitude, loc.longitude
).zenith,
clearsky_dni=clearskydni,
)
return w
def adapt_coastdat_weather_to_windpowerlib(weather, data_height):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
data_height : dict
The data height for each weather data column.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> data_height=cfg.get_dict('coastdat_data_height')
>>> wind_weather=adapt_coastdat_weather_to_windpowerlib(
... cd_weather, data_height)
>>> cd_weather.columns.nlevels
1
>>> wind_weather.columns.nlevels
2
"""
weather = pd.DataFrame(weather.copy())
cols = {
"v_wind": "wind_speed",
"z0": "roughness_length",
"temp_air": "temperature",
}
weather.rename(columns=cols, inplace=True)
dh = [(key, data_height[key]) for key in weather.columns]
weather.columns = pd.MultiIndex.from_tuples(dh)
return weather
def normalised_feedin_for_each_data_set(
year, wind=True, solar=True, overwrite=False
):
"""
Loop over all weather data sets (regions) and calculate a normalised time
series for each data set with the given parameters of the power plants.
This file could be more elegant and shorter but it will be rewritten soon
with the new feedinlib features.
year : int
The year of the weather data set to use.
wind : boolean
Set to True if you want to create wind feed-in time series.
solar : boolean
Set to True if you want to create solar feed-in time series.
Returns
-------
"""
# Get coordinates of the coastdat data points.
data_points = pd.read_csv(
os.path.join(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_centroid"),
),
index_col="gid",
)
pv_sets = None
wind_sets = None
# Open coastdat-weather data hdf5 file for the given year or try to
# download it if the file is not found.
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(year=year, filename=weather_file_name)
weather = pd.HDFStore(weather_file_name, mode="r")
# Fetch coastdat data heights from ini file.
data_height = cfg.get_dict("coastdat_data_height")
# Create basic file and path pattern for the resulting files
coastdat_path = os.path.join(cfg.get("paths_pattern", "coastdat"))
feedin_file = os.path.join(
coastdat_path, cfg.get("feedin", "file_pattern")
)
# Fetch coastdat region-keys from weather file.
key_file_path = coastdat_path.format(year="", type="")[:-2]
key_file = os.path.join(key_file_path, "coastdat_keys.csv")
if not os.path.isfile(key_file):
coastdat_keys = weather.keys()
if not os.path.isdir(key_file_path):
os.makedirs(key_file_path)
pd.Series(coastdat_keys).to_csv(key_file)
else:
coastdat_keys = pd.read_csv(
key_file, index_col=[0], squeeze=True, header=None
)
txt_create = "Creating normalised {0} feedin time series for {1}."
hdf = {"wind": {}, "solar": {}}
if solar:
logging.info(txt_create.format("solar", year))
# Add directory if not present
os.makedirs(
coastdat_path.format(year=year, type="solar"), exist_ok=True
)
# Create the pv-sets defined in the solar.ini
pv_sets = feedin.create_pvlib_sets()
# Open a file for each main set (subsets are stored in columns)
for pv_key, pv_set in pv_sets.items():
filename = feedin_file.format(
type="solar", year=year, set_name=pv_key
)
if not os.path.isfile(filename) or overwrite:
hdf["solar"][pv_key] = pd.HDFStore(filename, mode="w")
if wind:
logging.info(txt_create.format("wind", year))
# Add directory if not present
os.makedirs(
coastdat_path.format(year=year, type="wind"), exist_ok=True
)
# Create the pv-sets defined in the wind.ini
wind_sets = feedin.create_windpowerlib_sets()
# Open a file for each main set (subsets are stored in columns)
for wind_key, wind_set in wind_sets.items():
for subset_key, subset in wind_set.items():
wind_sets[wind_key][subset_key] = WindTurbine(**subset)
filename = feedin_file.format(
type="wind", year=year, set_name=wind_key
)
if not os.path.isfile(filename) or overwrite:
hdf["wind"][wind_key] = pd.HDFStore(filename, mode="w")
# Define basic variables for time logging
remain = len(coastdat_keys)
done = 0
start = datetime.datetime.now()
# Loop over all regions
for coastdat_key in coastdat_keys:
# Get weather data set for one location
local_weather = weather[coastdat_key]
# Adapt the coastdat weather format to the needs of pvlib.
# The expression "len(list(hdf['solar'].keys()))" returns the number
# of open hdf5 files. If no file is open, there is nothing to do.
if solar and len(list(hdf["solar"].keys())) > 0:
# Get coordinates for the weather location
local_point = data_points.loc[int(coastdat_key[2:])]
# Create a pvlib Location object
location = pvlib.location.Location(
latitude=local_point["lat"], longitude=local_point["lon"]
)
# Adapt weather data to the needs of the pvlib
local_weather_pv = adapt_coastdat_weather_to_pvlib(
local_weather, location
)
# Create one DataFrame for each pv-set and store into the file
for pv_key, pv_set in pv_sets.items():
if pv_key in hdf["solar"]:
hdf["solar"][pv_key][coastdat_key] = feedin.feedin_pv_sets(
local_weather_pv, location, pv_set
)
# Create one DataFrame for each wind-set and store into the file
if wind and len(list(hdf["wind"].keys())) > 0:
local_weather_wind = adapt_coastdat_weather_to_windpowerlib(
local_weather, data_height
)
for wind_key, wind_set in wind_sets.items():
if wind_key in hdf["wind"]:
hdf["wind"][wind_key][
coastdat_key
] = feedin.feedin_wind_sets(local_weather_wind, wind_set)
# Start- time logging *******
remain -= 1
done += 1
if divmod(remain, 10)[1] == 0:
elapsed_time = (datetime.datetime.now() - start).seconds
remain_time = elapsed_time / done * remain
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=remain_time
)
msg = "Actual time: {:%H:%M}, estimated end time: {:%H:%M}, "
msg += "done: {0}, remain: {1}".format(done, remain)
logging.info(msg.format(datetime.datetime.now(), end_time))
# End - time logging ********
for k1 in hdf.keys():
for k2 in hdf[k1].keys():
hdf[k1][k2].close()
weather.close()
logging.info(
"All feedin time series for {0} are stored in {1}".format(
year, coastdat_path.format(year=year, type="")
)
)
def store_average_weather(
data_type,
weather_path=None,
years=None,
keys=None,
out_file_pattern="average_data_{data_type}.csv",
):
"""
Get average wind speed over all years for each weather region. This can be
used to select the appropriate wind turbine for each region
(strong/low wind turbines).
Parameters
----------
data_type : str
The data_type of the coastdat weather data: 'dhi', 'dirhi', 'pressure',
'temp_air', 'v_wind', 'z0'.
keys : list or None
List of coastdat keys. If None all available keys will be used.
years : list or None
List of one or more years to calculate the average data from. You
have to make sure that the weather data files for the given years
exist in the weather path.
weather_path : str
Path to folder that contains all needed files. If None the default
path defined in the config file will be used.
out_file_pattern : str or None
Name of the results file with a placeholder for the data type e.g.
``average_data_{data_type}.csv``). If None no file will be written.
Examples
--------
>>> store_average_weather('temp_air', years=[2014, 2013]) # doctest: +SKIP
>>> v=store_average_weather('v_wind', years=[2014],
... out_file_pattern=None, keys=[1132101])
>>> float(v.loc[1132101].round(2))
4.39
"""
logging.info("Calculating the average wind speed...")
weather_pattern = cfg.get("coastdat", "file_pattern")
if weather_path is None:
weather_path = cfg.get("paths", "coastdat")
# Finding existing weather files.
data_files = os.listdir(weather_path)
# Possible time range for coastdat data set (reegis: 1998-2014).
check = True
if years is None:
years = range(1948, 2017)
check = False
used_years = []
for year in years:
if weather_pattern.format(year=year) in data_files:
used_years.append(year)
elif check is True:
msg = "File not found".format(weather_pattern.format(year=year))
raise FileNotFoundError(msg)
# Loading coastdat-grid as shapely geometries.
coastdat_polygons = pd.DataFrame(
geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
)
coastdat_polygons.drop("geometry", axis=1, inplace=True)
# Opening all weather files
weather = dict()
# open hdf files
for year in used_years:
weather[year] = pd.HDFStore(
os.path.join(weather_path, weather_pattern.format(year=year)),
mode="r",
)
if keys is None:
keys = coastdat_polygons.index
n = len(list(keys))
logging.info("Remaining: {0}".format(n))
for key in keys:
data_type_avg = pd.Series()
n -= 1
if n % 100 == 0:
logging.info("Remaining: {0}".format(n))
hdf_id = "/A{0}".format(key)
for year in used_years:
ws = weather[year][hdf_id][data_type]
data_type_avg = data_type_avg.append(ws, verify_integrity=True)
# calculate the average wind speed for one grid item
coastdat_polygons.loc[
key, "{0}_avg".format(data_type)
] = data_type_avg.mean()
# Close hdf files
for year in used_years:
weather[year].close()
if keys is not None:
coastdat_polygons.dropna(inplace=True)
# write results to csv file
if out_file_pattern is not None:
filename = out_file_pattern.format(data_type=data_type)
fn = os.path.join(weather_path, filename)
logging.info("Average temperature saved to {0}".format(fn))
coastdat_polygons.to_csv(fn)
return coastdat_polygons
def spatial_average_weather(
year, geo, parameter, name, outpath=None, outfile=None
):
"""
Calculate the mean value of a parameter over all data sets within each
region for one year.
Parameters
----------
year : int
Select the year you want to calculate the average temperature for.
geo : geometries.Geometry object
Polygons to calculate the average parameter for.
outpath : str
Place to store the outputfile.
outfile : str
Set your own name for the outputfile.
parameter : str
Name of the item (temperature, wind speed,... of the weather data set.
name : str
Name of the regions table to be used as a column name.
Returns
-------
str : Full file name of the created file.
Example
-------
>>> germany_geo=geometries.load(
... cfg.get('paths', 'geometry'),
... cfg.get('geometry', 'germany_polygon'))
>>> fn=spatial_average_weather(2012, germany_geo, 'temp_air', 'deTemp',
... outpath=os.path.expanduser('~')
... )# doctest: +SKIP
>>> temp=pd.read_csv(fn, index_col=[0], parse_dates=True, squeeze=True
... )# doctest: +SKIP
>>> round(temp.mean() - 273.15, 2)# doctest: +SKIP
8.28
>>> os.remove(fn)# doctest: +SKIP
"""
logging.info(
"Getting average {0} for {1} in {2} from coastdat2.".format(
parameter, name, year
)
)
name = name.replace(" ", "_")
# Create a Geometry object for the coastdat centroids.
coastdat_geo = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
coastdat_geo["geometry"] = coastdat_geo.centroid
# Join the tables to create a list of coastdat id's for each region.
coastdat_geo = geometries.spatial_join_with_buffer(
coastdat_geo, geo, name=name, limit=0
)
# Fix regions with no matches (no matches if a region ist too small).
fix = {}
for reg in set(geo.index) - set(coastdat_geo[name].unique()):
reg_point = geo.representative_point().loc[reg]
coastdat_poly = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
fix[reg] = coastdat_poly.loc[
coastdat_poly.intersects(reg_point)
].index[0]
# Open the weather file
weather_file = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file):
download_coastdat_data(year=year, filename=weather_file)
weather = pd.HDFStore(weather_file, mode="r")
# Calculate the average temperature for each region with more than one id.
avg_value = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
import pandas as pd
NUM_TEST = 40
TEST_ID = [x for x in range(1, NUM_TEST + 1)]
DATA_DIR = 'data'
ANALYZE_DIR = 'analyze'
STATISTICS_CSV = Path(ANALYZE_DIR).joinpath('statistics.csv')
def analyze():
# read data
with open('title.txt', 'r', encoding='utf-8') as f:
plan_name = f.read()
data = | pd.DataFrame() | pandas.DataFrame |
import csv
import re
import string
import math
import warnings
import pandas as pd
import numpy as np
import ipywidgets as wg
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mtick
from itertools import product
from scipy.optimize import curve_fit
from IPython.display import display
from platemapping import plate_map as pm
# define custom errors
class DataError(Exception):
pass
class PlateSizeError(Exception):
pass
class DataTypeError(Exception):
pass
# define well plate dimensions
plate_dim = {96:(8, 12), 384:(16, 24)}
# define header names for platemapping module
pm.header_names = {'Well ID': {'dtype':str, 'long':True, 'short_row': False, 'short_col':False},
'Type': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Contents': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Concentration Units':{'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
}
class FA:
"""Class used for the analysis of fluorescence anisotropy data.
:param data_dict: A dictionary contaning data frames with pre-processed data and metadata
:type data_dict: dict
:param g_factor: A value of g-factor
:type g_factor: float
:param plate_map: A data frame with platemap containing information about each well
:type plate_map: pandas df"""
def __init__(self, data_dict, g_factor, plate_map):
self.data_dict = data_dict
self.g_factor = g_factor
self.plate_map = plate_map
# create list of all p and s data frames to run some stats
frames = []
for repeat in self.data_dict.values():
metadata, data = repeat.values()
p_channel, s_channel = data.values()
frames.append(p_channel)
frames.append(s_channel)
new = pd.concat(frames, axis=1) # join all p and s data frames into one df
nan = new.size - new.describe().loc['count'].sum() # find sum of 'nan' cells
# create a data frame to store the final fitting parameters
col_names = ['rmin', 'rmin error', 'rmax', 'rmax error', 'lambda', 'Kd', 'Kd error']
p_names = self.plate_map['Protein Name'].dropna().unique() # get list of all protein names
t_names = self.plate_map['Tracer Name'].dropna().unique() # get list of all tracer names
c_names = self.plate_map['Competitor Name'].dropna().unique() # get list of all competitor names
if len(c_names) == 0: # if there are no comeptitors, replace nan with a string
c_names = ['-']
c_names_print = 'None'
else:
c_names_print = c_names
final_fit = pd.DataFrame(index=pd.MultiIndex.from_product([p_names, t_names, c_names]), columns=col_names)
final_fit["lambda"] = 1 # set the default lambda value as 1
self.final_fit = final_fit
print("Data was uploaded!\n")
print(f"Number of repeats: {len(self.data_dict)} \nValue of g-factor: {self.g_factor} \nOverall number of empty cells is {int(nan)} in {len(frames)} data frames.\nProteins: {p_names}\nTracers: {t_names}\nCompetitors: {c_names_print}\n")
@classmethod
def read_in_envision(cls, data_csv, platemap_csv, data_type='plate', size=384):
"""Reads in the raw data from csv file along with a platemap and constructs the FA class boject.
:param data_csv: File path of the raw data file in .csv format.
:type data_csv: str
:param platemap_csv: File path of the platemap file in .csv format.
:type platemap_csv: str
:param data_type: Format in which the raw data was exported (plate or list), defaults to plate.
:type data_type: str
:param size: Size of the well plate (384 or 96), defaults to 384.
:type size: int
:return: A dictionary contaning data frames with pre-processed data, g-factor and data frame containing platemap.
:rtype: dict, float, pandas df """
# ensure the plate size is either 384 or 96
if size not in plate_dim:
raise PlateSizeError('Invalid size of the well plate, should be 384 or 96.')
# try to read in data in plate format
if data_type == 'plate':
try:
data_dict, g_factor = FA._read_in_plate(data_csv, size) # get data dictionary and g factor
plate_map_df = pm.plate_map(platemap_csv, size) # get platemap using the platemapping module
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError, ValueError):
raise DataError(f"Error occured during data read in. Check your file contains data in the 'plate' format and plate size is {size}.")
# try to read in data in list format
if data_type == 'list':
try:
data_dict, g_factor = FA._read_in_list(data_csv, size) # get data dictionary and g factor
plate_map_df = pm.plate_map(platemap_csv, size) # get platemap using the platemapping module
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError):
raise DataError("Error occured during data read in. Check your file contains data in the 'list' format.")
else:
raise DataTypeError(f"'{data_type}' is not one of the two valid data types: plate or list.")
def _read_in_plate(csv_file, size):
"""Reads the raw data file and finds the information needed to extract data. Passes those parameters to pre_process_plate function and executes it.
Returns a tuple of two elemnts: dictionary of data frames and g-factor.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A tuple of dictionary of data frames and the g-factor
:rtype: pandas df, float """
with open(csv_file) as file:
all_data_lines = list(csv.reader(file, delimiter=',')) # read the csv file and cast it into a list containing all lines
blank_indexes = list(index for index, item in enumerate(all_data_lines) if item == []) # list containing indices of all blank rows
if blank_indexes == []: # case for the raw data file having commas instead of blank spaces
blank_indexes = list(index for index, item in enumerate(all_data_lines) if set(item) == {''}) # treats a line filled only with commas (empty strings) as balnk
blanks = np.array(blank_indexes) # convert the list of blank indices to a numpy array
read_in_info = [] # list to store the tuples with parameters needed for pandas to read in the csv file
for index, item in enumerate(all_data_lines): # iterate over list with all lines in the csv file
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) == None and re.findall(r"Formula", all_data_lines[index+1][10]) != ['Formula']:
skiprows = index + 9 # Set the skiprows parameter for raw data table
skiprows_meta = index + 1 # Set the skiprows parameter for metadata table
end_of_data = blanks[blanks > skiprows].min() # Calculate the end of data table by finding the smallest blank index after the beginning of data table
read_in_info.append((skiprows, end_of_data - skiprows + 1, skiprows_meta)) # add the skiprows, caculated number of data lines and skiprows for metadata parameters to the list as a tuple
data_format = 'plate1'
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) != None:
skiprows = index + 10 # Set the skiprows parameter for raw data table
skiprows_meta = index + 1 # Set the skiprows parameter for metadata table
end_of_data = blanks[blanks > skiprows].min() # Calculate the end of data table by finding the smallest blank index after the beginning of data table
read_in_info.append((skiprows, end_of_data - skiprows - 1, skiprows_meta)) # add the skiprows, caculated number of data lines and skiprows for metadata parameters to
data_format = 'plate2'
if item != [] and len(item) > 1 and re.fullmatch(r"G-factor", item[0]):
g_factor = float(item[4])
return FA._pre_process_plate(csv_file, read_in_info, data_format, size), g_factor
def _pre_process_plate(csv_file, read_in_info, data_format, size):
"""Extracts the data and metadata from the csv file, processes it and returns a nested dictionary containing data and metadata for each repeat and channel.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param read_in_info: Tuples with read in parameters for each channel.
:type read_in_info: list
:param data_format: Plate type (plate1 or plate2)
:type data_format: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A dictionary containing data and metadata
:rtype: dict """
data_frames = {} # dictionary to store data frames
counter = 1 # counter incremented by 0.5 to enable alternating labelling of data frames as 'p' or 's'
row_letters = list(string.ascii_uppercase)[0: plate_dim[size][0]] # list of letters for well IDs
col_numbers = list(np.arange(1, plate_dim[size][1] + 1).astype(str)) # list of numbers for well IDs
well_ids = ['%s%s' % (item[0], item[1]) for item in product(row_letters, col_numbers)] # list of well IDs for the pre-processed data frames
for index, item in enumerate(read_in_info): # iterate over all tuples in the list, each tuple contains skiprows, nrows and skiprows_meta for one channel
if data_format == 'plate1': # raw data table does not have row and column names so 'names' parameter passed to omit the last column
raw_data = pd.read_csv(csv_file, sep=',', names=col_numbers, index_col=False, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8')
if data_format == 'plate2': # raw data table has row and column names, so index_col=0 to set the first column as row labels
raw_data = pd.read_csv(csv_file, sep=',', index_col=0, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8')
if len(raw_data.columns) in [13, 25]:
raw_data.drop(raw_data.columns[-1], axis=1, inplace=True) # delete the last column because it is empty
# generate df for metadata (number of rows is always 1) and convert measurement time into datetime object
metadata = pd.read_csv(csv_file, sep=',', engine='python', skiprows=item[2], nrows=1, encoding='utf-8').astype({'Measurement date': 'datetime64[ns]'})
# convert and reshape data frame into 1D array
data_as_array = np.reshape(raw_data.to_numpy(), (int(size), 1))
if counter % 1 == 0:
new_data = pd.DataFrame(data=data_as_array, index=well_ids, columns=['p']) # generate new 384 (or 96) by 1 data frame with p channel data
data_frames[f'repeat_{int(counter)}'] = {'metadata':metadata, 'data': {'p': new_data, 's':''}} # add p channel data and metadata dfs to dictionary
if counter % 1 != 0:
new_data = pd.DataFrame(data=data_as_array, index=well_ids, columns=['s']) # generate new 384 (or 96) by 1 data frame with s channel data
data_frames[f'repeat_{int(counter-0.5)}']['data']['s'] = new_data # add s channel data to dictionary
counter = counter + 0.5
return data_frames
def _read_in_list(csv_file, size):
"""Reads the raw data file and extracts the data and metadata. Passes the raw data to pre_process_list function and executes it.
Returns a tuple of two elemnts: dictionary of data frames and g-factor.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A tuple of dictionary of data frames and the g-factor
:rtype: tuple """
with open(csv_file) as file:
all_data_lines = list(csv.reader(file, delimiter=',')) # read the csv file and cast it into a list containing all lines
blank_indexes = list(index for index, item in enumerate(all_data_lines) if item == []) # list containing indexes of all blank rows
if blank_indexes == []: # case for the raw data file having commas instead of blank spaces
blank_indexes = list(index for index, item in enumerate(all_data_lines) if set(item) == {''}) # treats a line filled only with commas (empty strings) as balnk
blanks = np.array(blank_indexes) # convert the list of blank indexes to a numpy array
# iterate over all lines to find beggining of the data table ('skiprows') and determine the format of data (list A, B, or C)
for index, item in enumerate(all_data_lines):
if item != [] and len(item) == 1 and re.findall(r"Plate information", item[0]) == ["Plate information"]:
skiprows_meta = index + 1
end_of_metadata = blanks[blanks > skiprows_meta].min() # find the end of metadata by finding the smallest blank index after the beginning of metadata
if item != [] and len(item) >= 2 and re.findall(r"PlateNumber", item[0]) == ['PlateNumber'] and re.findall(r"PlateRepeat", item[1]) == ['PlateRepeat']: # find line number with the beggining of the data
skiprows = index - 1
data_format = 'listA'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and len(item) >= 2 and re.findall(r"Plate", item[0]) == ['Plate'] and re.findall(r"Barcode", item[1]) == ['Barcode']: # find line number with the beggining of the data
skiprows = index
data_format = 'listB'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and len(item) >= 2 and re.findall(r"Plate", item[0]) == ['Plate'] and re.findall(r"Well", item[1]) == ['Well']:
skiprows = index
data_format = 'listC'
end_of_data = blanks[blanks > skiprows].min()
if item != [] and re.fullmatch(r"G-factor", item[0]): # find the g factor
g_factor = float(item[4])
nrows = end_of_data - skiprows - 1 # calculate the length of data table
nrows_meta = end_of_metadata - skiprows_meta - 1 # calucalte the length of metadata table (number of rows depends on the number of repeats)
raw_data = pd.read_csv(csv_file, sep=',', engine='python', skiprows=skiprows, nrows=nrows, encoding='utf-8')
raw_metadata = pd.read_csv(csv_file, sep=',', engine='python', skiprows=skiprows_meta, nrows=nrows_meta, encoding='utf-8')
return FA._pre_process_list(raw_data, raw_metadata, data_format, size), g_factor
def _pre_process_list(raw_data, raw_metadata, data_format, size):
"""Extracts the data and metadata for each channel and repeat from the raw data and raw metadata
and returns a nested dictionary containing data and metadata for each repeat and channel.
:param raw_data: Data frame containing raw data
:type raw_data: pandas data frame
:param raw_metadata: Data frame containing raw metadata
:type raw_metadata: pandas data frame
:param data_format: Type of list (listA, listB, or listC)
:type data_format: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A dictionary containing data and metadata
:rtype: dict"""
# remove the '0' from middle position of well numbers (A01 -> A1), done by reassigning the 'Well' column to a Series containing modified well numbers
raw_data['Well'] = raw_data['Well'].apply(lambda x: x[0] + x[2] if x[1] == '0' else x)
data_frames = {} # dictionary to store data frames
repeats = list(raw_metadata['Repeat'].to_numpy()) # generate a list with repeats based on the metadata table, e.g. for 3 repeats -> [1,2,3]
row_letters = list(string.ascii_uppercase)[0: plate_dim[size][0]] # list of letters for well IDs
col_numbers = list(np.arange(1, plate_dim[size][1] + 1).astype(str)) # list of numbers for well IDs
well_ids = ['%s%s' % (item[0], item[1]) for item in product(row_letters, col_numbers)] # list of well IDs for the pre-processed data frames
for index, repeat in enumerate(repeats): # iterate over the number of repeats
if data_format == 'listA':
groupped_data = raw_data.groupby(raw_data.PlateRepeat).get_group(repeat) # group and extract the data by the plate repeat column, i.e. in each iteration get data only for the current repeat
p_groupped = groupped_data.iloc[::3, :] # extract data only for the p channel, i.e. each third row starting from the first row
s_groupped = groupped_data.iloc[1::3, :] # extract data only for the s channel, i.e. each third row starting from the second row
p_raw_data = p_groupped[['Well', 'Signal']] # extract only the two relevant columns
s_raw_data = s_groupped[['Well', 'Signal']] # for each channel
if data_format in ['listB', 'listC']:
# the column naming is different for the first repeat ('Signal'), then it's 'Signal.1', 'Signal.2', etc.
if repeat == 1:
p_raw_data = raw_data[['Well', 'Signal']]
s_raw_data = raw_data[['Well', f'Signal.{repeat}']]
else:
p_raw_data = raw_data[['Well', f'Signal.{repeat + index - 1}']] # the column cotntaining data to be extracted is calculated in each iteration
s_raw_data = raw_data[['Well', f'Signal.{repeat + index}']]
# create an empty df with no columns and indexes matching the plate size
indexes = pd.DataFrame(well_ids, columns=['Wells'])
empty_frame = indexes.set_index('Wells')
p_raw_data.set_index('Well', inplace=True) # set the row indexes as the well numbers
p_raw_data.set_axis(['p'], axis=1, inplace=True) # rename the 'Signal' column to 'p'
p_data = empty_frame.join(p_raw_data) # join the raw data df to an empty frame based on the indexes, assigns 'NaN' to indexes not present in the raw data table
s_raw_data.set_index('Well', inplace=True)
s_raw_data.set_axis(['s'], axis=1, inplace=True)
s_data = empty_frame.join(s_raw_data)
metadata = raw_metadata.iloc[[repeat-1]].astype({'Measurement date': 'datetime64[ns]'}) # extract the row with metadata relevant for each repeat and covert date and time into a datetime object
data_frames[f'repeat_{repeat}'] = {'metadata': metadata, 'data': {'p': p_data, 's': s_data}} # add data frames to the dictionary
return data_frames
def visualise(self, labelby='Type', colorby='Type', title="", cmap='rainbow', blank_yellow=True, scale='lin', dpi=250, export=False):
"""Returns a visual representation of the plate map.
The label and colour for each well can be customised to be a platemap variable, for example 'Type', 'Protein Name', 'Protein Concentration', etc.
It can also be the p or s channel value, calculated anisotropy or intensity, however in such cases the 'colorby' or 'labelby'
parameters must be passed as tuple of two strings specifying the repeat number and variable to display, for example ('repeat_2', 'p_corrected').
:param labelby: Variable to display on the wells, for example 'Type', 'Protein Name', ('repeat_1', 's_corrected'), defaults to 'Type'.
:type labelby: str or tuple of str
:param colorby: Variable to color code by, for example 'Type', 'Contents', 'Protein Concentration', ('repeat_2', 'p'), for non-categorical data the well coulour represnets the magnitude of the number, defaults to 'Type'.
:type colorby: str or tuple of str
:param title: Sets the title of the figure, defaults to None.
:type title: str
:param cmap: Sets the colormap for the color-coding, defaults to 'rainbow'.
:type cmap: str
:param blank_yellow: Sets the colour-coding of blank wells as yellow, defaults to True.
:type blank_yellow: bool
:param scale: Determines whether data for colour-coding of non-categorical data (e.g. 'p_chanel', 'r_corrected') is scaled linearly ('lin') or logarithmically ('log', works only if data does not contain values less than or equal 0), wdefaults to 'lin'.
:type scale: str
:param dpi: Resolution of the exported figure in points per inches, defaults to 250.
:type dpi: int
:param export: If True, save the figure as .png file, defaults to False.
:type export: bool
:return: Visual representation of the plate map.
:rtype: figure
"""
plate_map = self.plate_map # default platemap
size = plate_map.shape[0]
str_format, str_len = None, None # default string format and lengh (used for categorical types, e.g. 'Type', 'Protein Name', etc.)
noncat_vars = ['p','s','p_corrected','s_corrected','r_raw','r_corrected','i_raw','i_corrected','i_percent'] # list of non-categorical data
scinot_vars = noncat_vars[:-1] + ['Protein Concentration', 'Tracer Concentration', 'Competitor Concentration'] # types that may have to be formatted in scinot (all non-categorical types except of i_percent)
if type(labelby) == tuple: # option for labelling by the a variable and its repeat number
plate_map = self.plate_map.join(self.data_dict[labelby[0]]['data'][labelby[1]]) # data frame containing variable from specified repeat is added to the platemap
labelby = labelby[1] # reassign labelby as the variable name
if labelby == 'i_percent':
str_format = 'percent' # display the values to 1 decimal place
str_len = 3 # determine the length of string to avoid issues with incorrect font scaling
if type(colorby) == tuple: # option for colouring by the a variable and its repeat number
plate_map = self.plate_map.join(self.data_dict[colorby[0]]['data'][colorby[1]]) # data frame containing variable from specified repeat is added to the platemap
colorby = colorby[1] # reassign colorby as the variable name
if labelby in scinot_vars: # check if the data needs to be displyed in scientific notation
if sum((plate_map[labelby] > 1000) | (plate_map[labelby] < 0)) > 0: # format in sci notation if the number is greater than 1000 or less than 0
str_format = 'scinot'
str_len = 8 # determine the length of string to avoid issues with incorrect font scaling
if colorby in noncat_vars:
categorical = False # colours for colour-coding are generated based on normalised data from colorby column
else:
categorical = True # colurs for colour-coding are generated based on an array of uniformally spaced numbers representing each category
return pm.visualise(plate_map, title, size, export, cmap, colorby, labelby, dpi, str_format=str_format, str_len=str_len, blank_yellow=blank_yellow, scale=scale, categorical=categorical)
def invalidate(self, valid=False, **kwargs):
"""Invalidates wells, entire columns and/or rows. Any of the following keyword arguments, or their combination,
can be passed: wells, rows, columns. For example, to invalidate well A1, rows C and D and columns 7 and 8 execute
the following: invalidate(wells='A1', rows=['C','D'], columns=[7,8]).
To validate previously invalidated wells, rows and/or columns, pass the additional 'valid' argument as True.
:param valid: Sets the stipulated well, row or column invalid ('False') or valid ('True'), defaults to False.
:type valid: bool
:param wells: Wells to be invalidated passed as a string or list of strings.
:type wells: str or list of str
:param rows: Rows to be invalidated passed as a string or list of strings.
:type rows: str or list of str
:param columns: Columns to be invalidated passed as an integer or list of integers.
:type columns: int or list of int
"""
# execute the corresponding invalidate functon from the platemapping package
if 'wells' in kwargs:
pm.invalidate_wells(platemap=self.plate_map, wells=kwargs['wells'], valid=valid)
if 'rows' in kwargs:
rows = tuple(kwargs['rows']) # convert the rows to tuple because invalidate_rows cannot take in a list
pm.invalidate_rows(platemap=self.plate_map, rows=rows, valid=valid)
if 'columns' in kwargs:
pm.invalidate_cols(platemap=self.plate_map, cols=kwargs['columns'], valid=valid)
if len(kwargs) == 0: # return error if neither of the keyword arguments is passed
raise TypeError('No arguments were passed. Specify the wells, rows and/or columns to be invalidated!')
def background_correct(self):
"""Calculates background corrected values for p and s channel in all repeats.
The backgorund correction is done by subtracting the mean value of blank p (or s) channel intensity for a given
protein, tracer or competitor concentration from each non-blank value of the p (or s) channel intensity for that concentration.
"""
for key, value in self.data_dict.items():
metadata, data = value.values()
# calculate p and s corrected data frame using _background_correct func and add it to data dictionary
self.data_dict[key]['data']['p_corrected'] = FA._background_correct(data['p'], self.plate_map)
self.data_dict[key]['data']['s_corrected'] = FA._background_correct(data['s'], self.plate_map)
print('Background correction was successfully performed!')
def _background_correct(data, platemap):
"""Calculates background corrected p or s channel values for protein/titration or competition experiment.
:param data: Data frame with raw p or s channel values
:type data: pandas df
:param platemap: Data frame with platemap
:type platemap: pandas df
:return: Data frame with background corrected values
:rtype: pandas df
"""
df = platemap.join(data) # join p or s channel data to platemap
df[df.columns[-1]] = df[df.columns[-1]][df['Valid'] == True] # replace 'p' or 's' values with NaN if the well is invalidated
col_name = df.columns[-1] + '_corrected'
no_index = df.reset_index() # move the 'well id' index to df column
columns = ['Type','Protein Name','Protein Concentration','Tracer Name','Tracer Concentration','Competitor Name','Competitor Concentration']
# create a multindex df to which blank df will be joined
mindex = pd.MultiIndex.from_frame(no_index[columns]) # create multiindex
reindexed = no_index.set_index(mindex).drop(columns, axis=1) # add multiindex to df and drop the columns from which multiindex was created
mean = no_index.groupby(columns, dropna=False).mean().drop('Valid', axis=1).drop('empty', axis=0) # calculate mean for each group of three wells and remove 'Valid' column
mean.rename(columns={mean.columns[-1]: 'Mean'}, inplace=True) # rename the last column to 'Mean' to avoid errors during joining
blank = mean.xs('blank', level=0, drop_level=True) # take a group with only blank wells
reset_idx = blank.reset_index() # move multiindex to df
nans = [col for col in reset_idx.columns if reset_idx[col].dropna().empty] # list of all columns containing only 'nan' values
d = reset_idx.drop(labels=nans, axis=1) # delete all columns containing only 'nan' values
blank2 = d.set_index(pd.MultiIndex.from_frame(d.loc[:,d.columns[:-1]])).drop(d.columns[:-1], axis=1) # multi index to the remaining columns
joined = reindexed.join(blank2, on=list(blank2.index.names)) # join the blank mean data on the indexes only from blank df
joined[col_name] = joined[joined.columns[-2]] - joined[joined.columns[-1]] # calculate background corrected values
jindexed = joined.set_index('index', append=True).reset_index(level=[0,1,2,3,4,5,6]).rename_axis(None) # set index to 'well id' and move multiindex to df columns
return jindexed[[col_name]] # extract and return df with corrected values
def calc_r_i(self, correct=True, plot_i=True, thr=80):
"""Calculates anisotropy and fluorescence intensity for each well in all repeats using the raw and background corrected p and s channel data.
The fluorescence intensity (I) and anisotropy (r) are calculated using the follwing formulas: I = s + (2*g*p) for intensity and
r = (s - (g*p)) / I for anisotropy. Results are stored in the following data frames: i_raw and r_raw (calculated using the uncorrected
p and s channel values) and i_corrected and r_corrected (calculated using the background corrected p and s channel values).
The function also calculates the percentage intesity of the non blank wells as comapred to the blank corrected wells using the formula:
(raw intensity - corrected intensity) / raw intensity * 100%. If 'plot_i=True', the graph of percentage intenstiy against the
well ids for all repeats is displayed along with a summary of wells above the threshold (defaults to 80%).
:param correct: Calculate the anisotropy and intensity using the background corrected values of p and s channel data, defaults to True.
:type correct: bool
:param plot_i: Display plots of the percentage intensity against well ids for all repeats, defaults to True.
:type plot_i: bool
:param thr: Percentage intensity above which the wells are included in the summary if plot_i=True, defaults to 80.
:type thr: int
"""
FA.th = thr # assign the threshold value to the class variable so that it can be accessed by functions that are not class methods
for key, value in self.data_dict.items(): # iterate over all repeats
metadata, data = value.values()
# calculate raw intensity and anisotropy using _calc_r_i function and add them to data dictionary
i, r = FA._calc_r_i(data['p'], data['s'], self.g_factor, 'raw')
self.data_dict[key]['data']['i_raw'] = i
self.data_dict[key]['data']['r_raw'] = r
if correct: # calculate intensity and anisotropy using background corrected values of p and s
if 'p_corrected' and 's_corrected' not in data: # check if background subtraction has been performed
raise AttributeError('The corrected anisotropy and intensity can only be calculated after background correction of the raw p and s channel data.')
i_c, r_c = FA._calc_r_i(data['p_corrected'], data['s_corrected'], self.g_factor, 'corrected')
self.data_dict[key]['data']['i_corrected'] = i_c
self.data_dict[key]['data']['r_corrected'] = r_c
# calculate intensity percentage data and add it to data dict
self.data_dict[key]['data']['i_percent'] = FA._calc_i_percent(i, i_c, self.plate_map)
if plot_i: # plot the percentage intensity against the well ids for all repeats
FA._plot_i_percent(self.data_dict, self.plate_map)
else:
print('The fluorescence intensity and anisotropy were successfully calculated!\n')
def _calc_r_i(p, s, g, col_suffix):
"""Calculates either anisotropy or intensity and labels the resulting dfs according to the col_suffix parameter
:param p: Data frame with p channel data (can be either raw or background corrected)
:type p: pandas df
:param s: Data frame with s channel data (can be either raw or background corrected)
:type s: pandas df
:param g: G-factor
:type g: float
:param col_suffix: Suffix to add to column name of the resulting intensity or anisotropy data frame, e.g. 'raw', 'corrected'
:type col_suffix: str
:return: Two data frames with calculated anisotropy and intensity values
:rtype: tuple of pandas df"""
p_rn = p.rename(columns={p.columns[0]: s.columns[0]}) # rename the col name in p data frame so that both p and s dfs have the same col names to enable calculation on dfs
i = s + (2 * g * p_rn) # calculate intensity
r = (s - (g * p_rn)) / i # and anisotropy
i_rn = i.rename(columns={i.columns[0]: 'i_' + col_suffix}) # rename the col name using the column suffix argument
r_rn = r.rename(columns={r.columns[0]: 'r_' + col_suffix})
return i_rn, r_rn
def _calc_i_percent(ir, ic, platemap):
"""Calculates the percentage intensity of blank wells compared to non-blank wells.
:param ir: Data frame with corrected intensity
:type ir: pandas df
:param ic: Data frame with raw intensity
:type ic: pandas df
:param platemap: Platemap
:type platemap: pandas df
:return: Data frame with percentage intensity data
:rtype: pandas df"""
ir_rn = ir.rename(columns={ir.columns[0]:ic.columns[0]}) # rename the col name in raw intensity df so that it's the same as in corrected intensity df
percent = (ir_rn - ic) / ir_rn * 100
percent.rename(columns={'i_corrected': 'i_percent'}, inplace=True)
return percent
def _plot_i_percent(data_d, platemap):
"""Plots the percentage intensity data against the well ids with a horizontal threshold bar and prints a summary of wells above the
threshold for all non-blank and non-empty cells in all repeats. A single figure with multiple subplots for each repeat is created.
:param data_d: Data dictionary
:type data_d: dict
:param platemap: Platemap needed to subset only the non-blank and non-empty cells
:type platemap: pandas df"""
summary = '' # empty string to which lists of wells to be printed are appended after checking data from each repeat
fig = plt.figure(figsize=(8*int((len(data_d) + 2 - abs(len(data_d) - 2))/2), 4*int( math.ceil((len(data_d))/2)) ), tight_layout=True) # plot a figure with variable size depending on the number subplots (i.e. repeats)
for key, value in data_d.items(): # iterate over all repeats
metadata, data = value.values()
df = platemap.join(data['i_percent'])
df_per = df[(df['Type'] != 'blank') & (df['Type'] != 'empty')] # subset only the non-blank and non-empty cells
plt.subplot(int( math.ceil((len(data_d))/2) ), int( (len(data_d) + 2 - abs(len(data_d) - 2))/2 ), int(key[-1]))
plt.bar(df_per.index, df_per['i_percent']) # plot a bar plot with intensity percentage data
plt.axhline(FA.th, color='red') # plot horizontal line representing the threshold on the bar plot
ax = plt.gca() # get the axis object
ax.set_ylabel('')
ax.set_xlabel('wells')
ax.set_title(f'Repeat {key[-1]}')
ax.yaxis.set_major_formatter(mtick.PercentFormatter()) # set formatting of the y axis as percentage
xlabels = [i if len(i) == 2 and i[1] == '1' else '' for i in list(df_per.index)] # create a list of xtics and xticklabels consiting only of the first wells from a each row
ax.set_xticks(xlabels)
ax.set_xticklabels(xlabels)
wells = list(df_per[df_per['i_percent'] > FA.th].index) # get a list of well ids above the threshold for this repeat
if wells != []: # append wells above the threshold and the repective repeat number to the string with appropriate formatting
summary = summary + f'\tRepeat {key[-1]}: {str(wells)}\n'
plt.show() # ensure the figure is displayed before printing the summary message
if summary != '': # display the summary of wells above the threshold
print(f'In the following wells the percentage intensity value was above the {FA.th}% threshold:')
print(summary)
else:
print(f'None of the wells has the percentage intensity value above the {FA.th}% threshold.')
def plot_i_percent(self):
"""Disply the graph of percentage intesity of the non blank wells as comapred to the blank corrected wells against well ids for all repeats."""
return FA._plot_i_percent(self.data_dict, self.plate_map)
def calc_mean_r_i(self):
"""Calculates the mean anisotropy and intensity over the number of replicates for each specific protein, tracer
or competitor concentration along with standard deviation and standard error.
This data is required for fitting a logistic curve to anisotropy and intensity plots.
"""
for key, value in self.data_dict.items():
metadata, data = value.values()
# create dictionaries 'r_mean'and 'i_mean' containing mean anisotropy and intensity data frames for each protein-tracer-competitor
data['r_mean'] = FA._calc_mean_r_i(data['r_corrected'], self.plate_map)
data['i_mean'] = FA._calc_mean_r_i(data['i_corrected'], self.plate_map)
# create data frame for storing the fitting params and set lambda value to 1
cols = ['rmin','rmin error', 'rmax', f'rmax error', 'r_EC50', 'r_EC50 error', 'r_hill', 'r_hill error', 'Ifree',
'Ifree error', 'Ibound', 'Ibound error', 'I_EC50', 'I_EC50 error', 'I_hill', 'I_hill error', 'lambda']
data['fit_params'] = | pd.DataFrame(index=self.final_fit.index, columns=cols) | pandas.DataFrame |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# You can run this test by first running `nPython.exe` (with mono or otherwise):
# $ ./nPython.exe ReportChartTests.py
import numpy as np
import pandas as pd
from datetime import datetime
from ReportCharts import ReportCharts
charts = ReportCharts()
## Test GetReturnsPerTrade
backtest = list(np.random.normal(0, 1, 1000))
live = list(np.random.normal(0.5, 1, 400))
result = charts.GetReturnsPerTrade([], [])
result = charts.GetReturnsPerTrade(backtest, [])
result = charts.GetReturnsPerTrade(backtest, live)
## Test GetCumulativeReturnsPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01T00:00:00', periods=365)]
strategy = np.linspace(1, 25, 365)
benchmark = np.linspace(2, 26, 365)
backtest = [time, strategy, time, benchmark]
time = [ | pd.Timestamp(x) | pandas.Timestamp |
import unittest
import backtest_pkg as bt
import pandas as pd
import numpy as np
from math import sqrt, log
from pandas.util.testing import assert_frame_equal
def cal_std(data):
if len(data)<=1:
return np.nan
data_mean = sum(data)/len(data)
data_var = sum((i-data_mean)**2 for i in data)/(len(data)-1)
return sqrt(data_var)
def cal_mean(data):
return sum(data)/len(data)
class TestMarketSingleAsset(unittest.TestCase):
def setUp(self):
def construct_market(data):
ticker = ['Test Ticker']
index = pd.date_range('2020-01-01', periods=len(data), freq='D')
data_dict = dict(
adj_close_price = pd.DataFrame(data, index=index, columns=ticker),
open_price = pd.DataFrame(data, index=index, columns=ticker),
high_price = pd.DataFrame([i*1.1 for i in data], index=index, columns=ticker),
low_price = pd.DataFrame([i*0.9 for i in data], index=index, columns=ticker),
close_price = pd.DataFrame(data, index=index, columns=ticker),
)
return bt.market(**data_dict)
data_trend = [1, 2, 3, 4, 5]
self.index = pd.date_range('2020-01-01', periods=len(data_trend), freq='D')
self.ticker = ['Test Ticker']
self.market = construct_market(data_trend)
self.market_down = construct_market(data_trend[::-1])
data_sin = [3, 5, 3, 1, 3]
data_convex = [3, 2, 1, 2, 3]
data_concave = [1, 2, 3, 2, 1]
self.market_sin = construct_market(data_sin)
self.market_convex = construct_market(data_convex)
self.market_concave = construct_market(data_concave)
# Daily return: np.log([np.nan, 2/1, 3/2, 4/3, 5/4])
def test_market_daily_ret(self):
expect = pd.DataFrame(log(5/4), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(), expect)
def test_market_daily_ret_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(log(3/2), index=[date], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(date=date), expect)
assert_frame_equal(self.market.daily_ret(date=date_str), expect)
def test_market_daily_ret_given_lag(self):
lag = 1
expect = pd.DataFrame(log(4/3), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(lag=lag), expect)
def test_market_daily_ret_given_date_lag(self):
date = pd.to_datetime('2020-01-03')
lag = 1
expect = pd.DataFrame(log(2/1), index=[date], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(date=date, lag=lag), expect)
def test_market_daily_ret_out_range_date(self):
late_date = pd.to_datetime('2020-01-20')
early_date = pd.to_datetime('2019-01-01')
with self.assertRaises(AssertionError):
self.market.daily_ret(date=early_date)
with self.assertRaises(AssertionError):
self.market.daily_ret(date=late_date)
def test_market_daily_ret_large_lag(self):
lag = 100
expect = pd.DataFrame(np.nan, index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(lag=lag), expect)
def test_market_daily_ret_negative_lag(self):
lag = -1
with self.assertRaises(AssertionError):
self.market.daily_ret(lag=lag)
def test_market_total_ret(self):
expect = pd.DataFrame(log(5), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.total_ret(), expect)
def test_market_total_ret_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(log(3), index=[date], columns=self.ticker)
assert_frame_equal(self.market.total_ret(date=date), expect)
assert_frame_equal(self.market.total_ret(date=date_str), expect)
def test_market_total_ret_given_period(self):
expect = pd.DataFrame(log(5/3), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.total_ret(period=2), expect)
def test_market_total_ret_given_date_period(self):
date_str = '2020-01-04'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(log(4/2), index=[date], columns=self.ticker)
assert_frame_equal(self.market.total_ret(date = date, period=2), expect)
def test_market_total_ret_out_range_date(self):
late_date = pd.to_datetime('2020-01-20')
early_date = pd.to_datetime('2019-01-01')
with self.assertRaises(AssertionError):
self.market.total_ret(date=early_date)
with self.assertRaises(AssertionError):
self.market.total_ret(date=late_date)
def test_market_total_ret_large_period(self):
with self.assertRaises(AssertionError):
self.market.total_ret(period=100)
def test_market_total_ret_negative_period(self):
with self.assertRaises(AssertionError):
self.market.total_ret(period=0)
with self.assertRaises(AssertionError):
self.market.total_ret(period=-1)
def test_market_vol(self):
data = [log(i) for i in [2/1, 3/2, 4/3, 5/4]]
expect = pd.DataFrame(cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.volatility(), expect)
def test_market_vol_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
data = [log(i) for i in [2/1, 3/2]]
expect = pd.DataFrame(cal_std(data), index=[date], columns=self.ticker)
assert_frame_equal(self.market.volatility(date=date), expect)
assert_frame_equal(self.market.volatility(date=date_str), expect)
def test_market_vol_given_period(self):
data = [log(i) for i in [4/3, 5/4]]
expect = pd.DataFrame(cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.volatility(period=2), expect)
def test_market_vol_given_date_period(self):
date_str = '2020-01-04'
date = pd.to_datetime(date_str)
data = [log(i) for i in [3/2, 4/3]]
expect = pd.DataFrame(cal_std(data), index=[date], columns=self.ticker)
assert_frame_equal(self.market.volatility(date=date, period=2), expect)
def test_market_vol_period_1(self):
expect = pd.DataFrame(np.nan, index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.volatility(period=1), expect)
def test_market_vol_out_range_period(self):
with self.assertRaises(AssertionError):
self.market.volatility(period=10)
def test_market_bollinger(self):
data_std = cal_std(list(range(1, 6)))
expect = pd.DataFrame((5-3)/data_std, index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.bollinger(), expect)
def test_market_bollinger_given_date(self):
date_str = '2020-01-03'
date = | pd.to_datetime(date_str) | pandas.to_datetime |
from numpy import dtype
def estado_civil_dummy():
dic_estado={"Separado(a) o divorciado(a)":0,
"Soltero(a)":0,"Casado":1,"En unión libre":1,
"Viudo(a)":0,1.0:1,2.0:1,3.0:0,4.0:0,5.0:0}
return dic_estado
def dic_etnia():
import numpy as np
dic_etnia={"Mestizo":1,'Ninguno de los anteriores':0,"Blanco":1,"Indígena":0,"Negro, mulato (afro descendiente)":1,
"Palenquero":1,np.NaN:0,1.0:1,2.0:1,3.0:1,4.0:1,5.0:1,6.0:1,7.0:1,8.0:0}
return dic_etnia
def cols_names():
names_cols={"actividad_ppal":"employment","sexo":"sex","edad":"age","estado_civil":"couple",
"hijos":"sons","etnia":"ethnicity","Discapacidad":"Disability","educ_años":"educ_years",
"embarazo_hoy":"w_pregnant","lee_escribe":"read_write","estudia":"student",
"n_internet":"internet","Urbano":"Urban"}
return names_cols
def creador_id(data):
try:
data.insert(0,"id",data["DIRECTORIO"]+data["SECUENCIA_P"]+data["ORDEN"]+data["HOGAR"])
data.insert(1,"id_hogar",data["DIRECTORIO"]+data["SECUENCIA_P"])
except:
data.insert(0,"id_hogar",data["DIRECTORIO"]+data["SECUENCIA_P"])
def dic_dtypes():
dtype={"DIRECTORIO":"str",
"SECUENCIA_P":"str",
"ORDEN":"str",
"HOGAR":"str"}
return dtype
def variables_modelo():
variables=["id","id_hogar","ocupado","desocupado","P6020","P6040","ESC","P6080","P6070","P6170","P4030S1A1","P5210S16","P5210S3","P6081","P6083","DPTO_x"]
return variables
def procces_data_month(mes,variables):
import pandas as pd
dtype=dic_dtypes()
Ac=pd.read_csv(f"sets_model/{mes}/Acaracteristicas.csv",sep=";",dtype=dtype)
Ao=pd.read_csv(f"sets_model/{mes}/Aocupados.csv",sep=";",dtype=dtype)
Ad= | pd.read_csv(f"sets_model/{mes}/Adesocupados.csv",sep=";",dtype=dtype) | pandas.read_csv |
import natsort
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import re
import traceback
from io import BytesIO
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise as pw
import json
import statistics
import matplotlib.pyplot as plt
import matplotlib_venn as venn
from matplotlib_venn import venn2, venn3, venn3_circles
from PIL import Image
from upsetplot import from_memberships
from upsetplot import plot as upplot
import pkg_resources
def natsort_index_keys(x):
order = natsort.natsorted(np.unique(x.values))
return pd.Index([order.index(el) for el in x], name=x.name)
def natsort_list_keys(x):
order = natsort.natsorted(np.unique(x))
return [order.index(el) for el in x]
class SpatialDataSet:
regex = {
"imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$"
}
acquisition_set_dict = {
"LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"],
"Custom": ["(?!Protein IDs|Gene names)"]
}
Spectronaut_columnRenaming = {
"R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score",
"PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity"
}
css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3",
"#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise",
"khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen",
"coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse",
"chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta",
"darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace",
"olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey",
"lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen",
"mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin",
"olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru",
"pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver",
"skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey",
"aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"]
analysed_datasets_dict = {}
df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")),
usecols=lambda x: bool(re.match("Gene name|Compartment", x)))
df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"})
df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"})
def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs):
self.filename = filename
self.expname = expname
self.acquisition = acquisition
self.name_pattern = name_pattern
self.comment = comment
self.imported_columns = self.regex["imported_columns"]
self.fractions, self.map_names = [], []
self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
if acquisition == "SILAC - MQ":
if "RatioHLcount" not in kwargs.keys():
self.RatioHLcount = 2
else:
self.RatioHLcount = kwargs["RatioHLcount"]
del kwargs["RatioHLcount"]
if "RatioVariability" not in kwargs.keys():
self.RatioVariability = 30
else:
self.RatioVariability = kwargs["RatioVariability"]
del kwargs["RatioVariability"]
elif acquisition == "Custom":
self.custom_columns = kwargs["custom_columns"]
self.custom_normalized = kwargs["custom_normalized"]
self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$"
#elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut":
else:
if "summed_MSMS_counts" not in kwargs.keys():
self.summed_MSMS_counts = 2
else:
self.summed_MSMS_counts = kwargs["summed_MSMS_counts"]
del kwargs["summed_MSMS_counts"]
if "consecutiveLFQi" not in kwargs.keys():
self.consecutiveLFQi = 4
else:
self.consecutiveLFQi = kwargs["consecutiveLFQi"]
del kwargs["consecutiveLFQi"]
#self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"]
if "organism" not in kwargs.keys():
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot")))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
else:
assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes")
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"])))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.organism = kwargs["organism"]
del kwargs["organism"]
self.analysed_datasets_dict = {}
self.analysis_summary_dict = {}
def data_reading(self, filename=None, content=None):
"""
Data import. Can read the df_original from a file or buffer.
df_original contains all information of the raw file; tab separated file is imported,
Args:
self:
filename: string
imported_columns : dictionry; columns that correspond to this regular expression will be imported
filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success.
content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer.
Returns:
self.df_orginal: raw, unprocessed dataframe, single level column index
"""
# use instance attribute if no filename is provided
if filename is None:
filename = self.filename
# if no buffer is provided for the content read straight from the file
if content is None:
content = filename
if filename.endswith("xls") or filename.endswith("txt"):
self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
else: #assuming csv file
self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5
self.filename = filename
return self.df_original
def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None):
"""
Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe,
characterized by a flat column index. These tasks is performed by following functions:
indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
stringency_silac(df_index)
normalization_01_silac(df_stringency_mapfracstacked):
logarithmization_silac(df_stringency_mapfracstacked):
stringency_lfq(df_index):
normalization_01_lfq(df_stringency_mapfracstacked):
logarithmization_lfq(df_stringency_mapfracstacked):
Args:
self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
additional arguments can be used to override the value set by the class init function
Returns:
self:
map_names: list of Map names
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
df_log_stacked: df; log transformed data
analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean
["changes in shape after filtering"]
["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";"
["Analysis parameters"] : {"acquisition" : ...,
"filename" : ...,
#SILAC#
"Ratio H/L count 1 (>=X)" : ...,
"Ratio H/L count 2 (>=Y, var<Z)" : ...,
"Ratio variability (<Z, count>=Y)" : ...
#LFQ#
"consecutive data points" : ...,
"summed MS/MS counts" : ...
}
"""
if name_pattern is None:
name_pattern = self.name_pattern
if self.acquisition == "SILAC - MQ":
if RatioHLcount is None:
RatioHLcount = self.RatioHLcount
if RatioVariability is None:
RatioVariability = self.RatioVariability
elif self.acquisition == "Custom":
if custom_columns is None:
custom_columns = self.custom_columns
if custom_normalized is None:
custom_normalized = self.custom_normalized
else:
if summed_MSMS_counts is None:
summed_MSMS_counts = self.summed_MSMS_counts
if consecutiveLFQi is None:
consecutiveLFQi = self.consecutiveLFQi
shape_dict = {}
def indexingdf():
"""
For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]"
(SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L
variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and
unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and
potential contaminants.
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_original
shape_dict["Shape after categorical filtering"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
[[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0]
for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
try:
df_index = df_original.xs(
np.nan, 0, "Reverse")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Potential contaminant")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Only identified by site")
except:
pass
df_index.replace(0, np.nan, inplace=True)
shape_dict["Shape after categorical filtering"] = df_index.shape
df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True)
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
##############Cyt should get only be removed if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - MQ":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def custom_indexing_and_normalization():
df_original = self.df_original.copy()
df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
["normalized profile" for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
# for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later
df_index = df_original.copy()
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def spectronaut_LFQ_indexingdf():
"""
For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all
columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and
"LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map"
in df_renamed["Map"]) as level labels.
!!!
!!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!!
!!!
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
Spectronaut_columnRenaming
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming)
df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]]
df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join(
re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]]
df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False])
df_index.columns.names = ["Set"]
# In case fractionated data was used this needs to be catched and aggregated
try:
df_index = df_index.unstack(["Map", "Fraction"])
except ValueError:
df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0)
df_index = df_index.unstack(["Map", "Fraction"])
df_index.replace(0, np.nan, inplace=True)
shape_dict["Original size"]=df_index.shape
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
#Cyt is removed only if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - Spectronaut":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def stringency_silac(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios
in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each
subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for
ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median.
Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type
RatioHLcount: int, 2
RatioVariability: int, 30
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked
shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked
"""
# Fraction and Map will be stacked
df_stack = df_index.stack(["Fraction", "Map"])
# filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account
# zip: allows direct comparison of count and var
# only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked
#default setting: RatioHLcount = 2 ; RatioVariability = 30
df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability)
for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]]
shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape
# "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median
# np.median([...]): only entries, that are not NANs are considered
df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\
.unstack(["Fraction", "Map"])\
.apply(lambda x: x/np.nanmedian(x), axis=0)\
.stack(["Map", "Fraction"])
df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"]))
# dataframe is grouped (Map, id), that allows the filtering for complete profiles
df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions))
shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape
# Ratio H/L is converted into Ratio L/H
df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x)
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs)
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized
profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices;
plotting is possible now
self:
analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness
column: "Experiment", "Map", "Data completeness", "Profile completeness"
no row index
"""
df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0)
df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame
(df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "normalized profile"
df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns]
return df_01_stacked
def logarithmization_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]",
and "Ratio H/L" stored as single level indices
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L"
data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices;
PCA is possible now
"""
# logarithmizing, basis of 2
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2)
df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "log profile"
df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def stringency_lfq(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with
at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2]
(LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included.
Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
summed_MSMS_counts: int, 2
consecutiveLFQi: int, 4
Returns:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked
shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked
"""
df_index = df_index.stack("Map")
# sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation
df_index.sort_index(axis=1, level=0, inplace=True)
# "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe
minms = (len(self.fractions) * self.summed_MSMS_counts)
if minms > 0:
df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms]
shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_mscount_mapstacked.copy()
else:
df_stringency_mapfracstacked = df_index.copy()
# series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained
df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[
df_stringency_mapfracstacked[("LFQ intensity")]\
.apply(lambda x: np.isfinite(x), axis=0)\
.apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)]
shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction")
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c!="MS/MS count" and c!="LFQ intensity"], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_lfq(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to
"normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now
"""
df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack(
"Fraction"),columns=["LFQ intensity"]))
# rename columns: "LFQ intensity" into "normalized profile"
df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in
df_01_stacked.columns]
#imputation
df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction")
df_01_stacked = df_01_stacked.sort_index()
return df_01_stacked
def logarithmization_lfq(df_stringency_mapfracstacked):
"""The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized
"LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now
"""
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"]))
# "LFQ intensity" will be renamed to "log profile"
df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def split_ids_uniprot(el):
"""
This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID.
"""
p1 = el.split(";")[0]
if "-" not in p1:
return p1
else:
p = p1.split("-")[0]
if p in el.split(";"):
return p
else:
return p1
if self.acquisition == "SILAC - MQ":
# Index data
df_index = indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
# Run stringency filtering and normalization
df_stringency_mapfracstacked = stringency_silac(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked)
self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked)
# format and reduce 0-1 normalized data for comparison with other experiments
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1)
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
# poopulate analysis summary dictionary with (meta)data
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"Ratio H/L count" : self.RatioHLcount,
"Ratio variability" : self.RatioVariability,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
# TODO this line needs to be removed.
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
#if not summed_MS_counts:
# summed_MS_counts = self.summed_MS_counts
#if not consecutiveLFQi:
# consecutiveLFQi = self.consecutiveLFQi
if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ":
df_index = indexingdf()
elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
df_index = spectronaut_LFQ_indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_stringency_mapfracstacked = stringency_lfq(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked)
self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked)
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"consecutive data points" : self.consecutiveLFQi,
"summed MS/MS counts" : self.summed_MSMS_counts,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#return self.df_01_stacked
elif self.acquisition == "Custom":
df_index = custom_indexing_and_normalization()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_01_stacked = df_index.stack(["Map", "Fraction"])
df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names")
df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True)
df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
self.df_01_stacked = df_01_stacked
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
else:
return "I do not know this"
def plot_log_data(self):
"""
Args:
self.df_log_stacked
Returns:
log_histogram: Histogram of log transformed data
"""
log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys),
x="log profile",
facet_col="Fraction",
facet_row="Map",
template="simple_white",
labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")}
)
log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper",
text="log2(LFQ intensity)")
log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50,
text="count")
log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return log_histogram
def quantity_profiles_proteinGroups(self):
"""
Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated.
Args:
self:
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
Returns:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information:
npg_t: protein groups per experiment total quantity
npgf_t = groups with valid profiles per experiment total quanitity
npr_t: profiles with any valid values
nprf_t = total number of valid profiles
npg_i: protein groups per experiment intersection
npgf_i = groups with valid profiles per experiment intersection
npr_i: profiles with any valid values in the intersection
nprf_i = total number of valid profiles in the intersection
npr_t_dc: profiles, % values != nan
nprf_t_dc = profiles, total, filtered, % values != nan
npr_i_dc: profiles, intersection, % values != nan
nprf_i_dc = profiles, intersection, filtered, % values != nan
df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f = protein groups, per fraction
or npgf_f = protein groups, filtered, per fraction
df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f_dc = protein groups, per fraction, % values != nan
or npgf_f_dc = protein groups, filtered, per fraction, % values != nan
"""
if self.acquisition == "SILAC - MQ":
df_index = self.df_index["Ratio H/L"]
df_01_stacked = self.df_01_stacked["normalized profile"]
elif self.acquisition.startswith("LFQ"):
df_index = self.df_index["LFQ intensity"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
elif self.acquisition == "Custom":
df_index = self.df_index["normalized profile"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
#unfiltered
npg_t = df_index.shape[0]
df_index_MapStacked = df_index.stack("Map")
npr_t = df_index_MapStacked.shape[0]/len(self.map_names)
npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape)
#filtered
npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0]
df_01_MapStacked = df_01_stacked.unstack("Fraction")
nprf_t = df_01_MapStacked.shape[0]/len(self.map_names)
nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape)
#unfiltered intersection
try:
df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names))
npr_i = df_index_intersection.shape[0]/len(self.map_names)
npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape)
npg_i = df_index_intersection.unstack("Map").shape[0]
#filtered intersection
try:
df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names))
nprf_i = df_01_intersection.shape[0]/len(self.map_names)
nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape)
npgf_i = df_01_intersection.unstack("Map").shape[0]
# summarize in dataframe and save to attribute
df_quantity_pr_pg = pd.DataFrame(
{
"filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")),
"type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")),
"number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")),
"number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")),
"data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))})
self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index()
self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json()
#additional depth assessment per fraction
dict_npgf = {}
dict_npg = {}
list_npg_dc = []
list_npgf_dc = []
for df_intersection in [df_index_intersection, df_01_intersection]:
for fraction in self.fractions:
df_intersection_frac = df_intersection[fraction]
npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac)
npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts()
if fraction not in dict_npg.keys():
dict_npg[fraction] = npgF_f
list_npg_dc.append(npgF_f_dc)
else:
dict_npgf[fraction] = npgF_f
list_npgf_dc.append(npgF_f_dc)
df_npg = pd.DataFrame(dict_npg)
df_npg.index.name = "Protein Groups present in:"
df_npg.rename_axis("Fraction", axis=1, inplace=True)
df_npg = df_npg.stack("Fraction").reset_index()
df_npg = df_npg.rename({0: "Protein Groups"}, axis=1)
df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
df_npgf = pd.DataFrame(dict_npgf)
df_npgf.index.name = "Protein Groups present in:"
df_npgf.rename_axis("Fraction", axis=1, inplace=True)
df_npgf = df_npgf.stack("Fraction").reset_index()
df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1)
df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
max_df_npg = df_npg["Protein Groups present in:"].max()
min_df_npg = df_npg["Protein Groups present in:"].min()
rename_numOFnans = {}
for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)):
if y == 1:
rename_numOFnans[x] = "{} Map".format(y)
elif y == 0:
rename_numOFnans[x] = "PG not identified".format(y)
else:
rename_numOFnans[x] = "{} Maps".format(y)
for keys in rename_numOFnans.keys():
df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
# summarize in dataframe and save to attributes
self.df_npg_dc = pd.DataFrame(
{
"Fraction" : pd.Series(self.fractions),
"Data completeness before filtering": pd.Series(list_npg_dc),
"Data completeness after filtering": pd.Series(list_npgf_dc),
})
self.df_npg = df_npg
self.df_npgf = df_npgf
def plot_quantity_profiles_proteinGroups(self):
"""
Args:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above
Returns:
"""
df_quantity_pr_pg = self.df_quantity_pr_pg
layout = go.Layout(barmode="overlay",
xaxis_tickangle=90,
autosize=False,
width=300,
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
#title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
mirror=True),
template="simple_white")
fig_npg = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npg.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of protein groups"],
name=t))
fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups"))
fig_npr = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npr.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of profiles"],
name=t))
fig_npr.update_layout(layout, title="Number of Profiles")
df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering")
fig_npr_dc = go.Figure()
for t in df_quantity_pr_pg["filtering"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t]
fig_npr_dc.add_trace(go.Bar(
x=plot_df["type"],
y=plot_df["data completeness of profiles"],
name=t))
fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness"))
#fig_npr_dc.update_xaxes(tickangle=30)
fig_npg_F = px.bar(self.df_npg,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - before filtering",
width=500)
fig_npgf_F = px.bar(self.df_npgf,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - after filtering",
width=500)
fig_npg_F_dc = go.Figure()
for data_type in ["Data completeness after filtering", "Data completeness before filtering"]:
fig_npg_F_dc.add_trace(go.Bar(
x=self.df_npg_dc["Fraction"],
y=self.df_npg_dc[data_type],
name=data_type))
fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600)
return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc
def perform_pca(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"V-type proton ATP
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"
and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored
as single level indices
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized
profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now
Returns:
self:
df_pca: df, PCA was performed, while keeping the information of the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment"
df_pca_combined: df, PCA was performed across the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment"
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent
throughout all maps / coverage filtering.
"""
markerproteins = self.markerproteins
if self.acquisition == "SILAC - MQ":
df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna()
elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom":
df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna()
pca = PCA(n_components=3)
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_01orlog_fracunstacked.index
self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"])
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked))
df_pca_combined.columns = ["PC1", "PC2", "PC3"]
df_pca_combined.index = df_01orlog_MapFracUnstacked.index
self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"])
map_names = self.map_names
df_pca_all_marker_cluster_maps = pd.DataFrame()
df_pca_filtered = df_pca.unstack("Map").dropna()
for clusters in markerproteins:
for marker in markerproteins[clusters]:
try:
plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append(
plot_try_pca)
if len(df_pca_all_marker_cluster_maps) == 0:
df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map")
else:
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map")
self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"])
def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3",
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment",
Returns:
pca_figure: global PCA plot
"""
if collapse_maps == False:
df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index()
else:
df_global_pca = self.df_pca_combined.reset_index()
for i in self.markerproteins[cluster_of_interest]:
df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection"
compartments = self.df_organellarMarkerSet["Compartment"].unique()
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment",
color_discrete_map=compartment_color,
title= "Protein subcellular localization by PCA for {}".format(map_of_interest)
if collapse_maps == False else "Protein subcellular localization by PCA of combined maps",
hover_data=["Protein IDs", "Gene names", "Compartment"],
template="simple_white",
opacity=0.9
)
return fig_global_pca
def plot_cluster_pca(self, cluster_of_interest="Proteasome"):
"""
PCA plot will be generated
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are
consistent throughout all maps / coverage filtering.
Returns:
pca_figure: PCA plot, for one protein cluster all maps are plotted
"""
df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps
map_names = self.map_names
markerproteins = self.markerproteins
try:
for maps in map_names:
df_setofproteins_PCA = pd.DataFrame()
for marker in markerproteins[cluster_of_interest]:
try:
plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"],
drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
if maps == map_names[0]:
pca_figure = go.Figure(
data=[go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
)])
else:
pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
))
pca_figure.update_layout(autosize=False, width=500, height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest),
template="simple_white")
return pca_figure
except:
return "This protein cluster was not quantified"
def calc_biological_precision(self):
"""
This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances.
TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure.
TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions.
Args:
self attributes:
markerproteins: dict, contains marker protein assignments
df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered
Returns:
df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked
df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked
df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data
self attributes:
df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked
df_allclusters_01_unfiltered_mapfracunstacked
df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview)
df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked
genenames_sortedout_list = list of gene names with incomplete coverage
analysis_summary_dict entries:
"Manhattan distances" = df_distance_noindex
"Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted
"""
df_alldistances_individual_mapfracunstacked = pd.DataFrame()
df_alldistances_aggregated_mapunstacked = pd.DataFrame()
df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame()
for cluster in self.markerproteins.keys():
# collect data irrespective of coverage
df_cluster_unfiltered = self.get_marker_proteins_unfiltered(cluster)
df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked.append(df_cluster_unfiltered)
# filter for coverage and calculate distances
df_cluster = df_cluster_unfiltered.dropna()
if len(df_cluster) == 0:
continue
df_distances_aggregated, df_distances_individual = self.calc_cluster_distances(df_cluster)
df_alldistances_individual_mapfracunstacked = df_alldistances_individual_mapfracunstacked.append(df_distances_individual)
df_alldistances_aggregated_mapunstacked = df_alldistances_aggregated_mapunstacked.append(df_distances_aggregated)
if len(df_alldistances_individual_mapfracunstacked) == 0:
self.df_distance_noindex = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_clusterdist_fracunstacked_unfiltered = pd.DataFrame(columns = ["Fraction"])
self.df_allclusters_clusterdist_fracunstacked = pd.DataFrame(columns = ["Fraction"])
self.genenames_sortedout_list = "No clusters found"
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
else:
df_alldistances_aggregated_mapunstacked.columns.name = "Map"
## Get compatibility with plotting functions, by mimicking assignment of old functions:
# old output of distance_calculation
self.df_distance_noindex = df_alldistances_aggregated_mapunstacked.stack("Map").reset_index().rename({0: "distance"}, axis=1)
self.analysis_summary_dict["Manhattan distances"] = self.df_distance_noindex.to_json()
# old output of multiple_iterations
# self.df_allclusters_clusterdist_fracunstacked_unfiltered --> this won't exist anymore, replaced by:
self.df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked
# kept for testing of quantification table:
self.df_allclusters_clusterdist_fracunstacked_unfiltered = df_allclusters_01_unfiltered_mapfracunstacked.stack("Map")
# same as before, but now already abs
self.df_allclusters_clusterdist_fracunstacked = df_alldistances_individual_mapfracunstacked.stack("Map")
df_dist_to_median = self.df_allclusters_clusterdist_fracunstacked.stack("Fraction")
df_dist_to_median.name = "distance"
df_dist_to_median = df_dist_to_median.reindex(index=natsort.natsorted(df_dist_to_median.index))
self.analysis_summary_dict["Distances to the median profile"] = df_dist_to_median.reset_index().to_json()
self.genenames_sortedout_list = [el for el in df_allclusters_01_unfiltered_mapfracunstacked.index.get_level_values("Gene names")
if el not in df_alldistances_individual_mapfracunstacked.index.get_level_values("Gene names")]
return df_alldistances_individual_mapfracunstacked, df_alldistances_aggregated_mapunstacked, df_allclusters_01_unfiltered_mapfracunstacked
def get_marker_proteins_unfiltered(self, cluster):
"""
This funciton retrieves the 0-1 normalized data for any given protein cluster, unfiltered for coverage.
Args:
cluster: str, cluster name, should be one of self.markerproteins.keys()
self attributes:
df_01_stacked: df, contains the fully stacked 0-1 normalized data
markerproteins: dict, contains marker protein assignments
Returns:
df_cluster_unfiltered: df, unfiltered data for the selected cluster, maps and fractions are unstacked.
self attribtues:
None
"""
df_in = self.df_01_stacked["normalized profile"].unstack("Fraction")
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster_unfiltered = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster_unfiltered = df_cluster_unfiltered.append(df_p)
if len(df_cluster_unfiltered) == 0:
return df_cluster_unfiltered
# Unstack maps and add Cluster to index
df_cluster_unfiltered = df_cluster_unfiltered.unstack("Map")
df_cluster_unfiltered.set_index(pd.Index(np.repeat(cluster, len(df_cluster_unfiltered)), name="Cluster"), append=True, inplace=True)
return df_cluster_unfiltered
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
"""
Calculates the absolute differences in each fraction and the profile distances relative to the center of a cluster.
Per default this is the manhattan distance to the median profile.
Args:
df_cluster: df, 0-1 normalized profiles of cluster members, should already be filtered for full coverage and be in full wide format.
complex_profile: fun, function provided to apply for calculating the reference profile, default: np.median.
distance_measure: str, selected distance measure to calculate. Currently only 'manhattan' is supported, everything else raises a ValueError.
self attributes:
None
Returns:
df_distances_aggregated: df, proteins x maps, if stacked distance column is currently named 0 but contains manhattan distances.
df_distances_individual: df, same shape as df_cluster, but now with absolute differences to the reference.
self attribtues:
None
"""
df_distances_aggregated = pd.DataFrame()
ref_profile = pd.DataFrame(df_cluster.apply(complex_profile, axis=0, result_type="expand")).T
df_distances_individual = df_cluster.apply(lambda x: np.abs(x-ref_profile.iloc[0,:]), axis=1)
# loop over maps
maps = set(df_cluster.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_cluster.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[m], index=df_cluster.index)
df_distances_aggregated = pd.concat([df_distances_aggregated, d_m], axis=1)
df_distances_aggregated.columns.set_names(names="Map", inplace=True)
return df_distances_aggregated, df_distances_individual
def profiles_plot(self, map_of_interest="Map1", cluster_of_interest="Proteasome"):
"""
The function allows the plotting of filtered and normalized spatial proteomic data using plotly.express.
The median profile is also calculated based on the overlapping proteins. Profiles of proteins that are not quantified in all maps are dashed.
Args:
map_of_interest: str, must be in self.map_names
cluster_of_interest: str, must be in self.markerproteins.keys()
self attribtues:
df_allclusters_01_unfiltered_mapfracunstacked: df, contains 0-1 normalized profiles for all markerproteins detected in any map
Returns:
abundance_profiles_and_median_figure: plotly line plot, displaying the relative abundance profiles.
"""
try:
df_setofproteins = self.df_allclusters_01_unfiltered_mapfracunstacked.xs(cluster_of_interest, level="Cluster", axis=0)
df_setofproteins_median = df_setofproteins.dropna().xs(map_of_interest, level="Map", axis=1).median(axis=0)
# fractions get sorted
df_setofproteins = df_setofproteins.xs(map_of_interest, level="Map", axis=1).stack("Fraction")
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins.name = "normalized profile"
# make it available for plotting
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins = df_setofproteins.reset_index()
abundance_profiles_figure = px.line(df_setofproteins,
x="Fraction",
y="normalized profile",
color="Gene names",
line_group="Sequence" if "Sequence" in df_setofproteins.columns else "Gene names",
template="simple_white",
title="Relative abundance profile for {} of <br>the protein cluster: {}".format(map_of_interest, cluster_of_interest)
)
df_setofproteins_median.name = "normalized profile"
#fractions get sorted
df_setofproteins_median = df_setofproteins_median.reindex(index=natsort.natsorted(df_setofproteins_median.index))
# make it available for plotting
df_setofproteins_median = df_setofproteins_median.reset_index()
df_setofproteins_median.insert(0, "Gene names", np.repeat("Median profile", len(df_setofproteins_median)))
abundance_profiles_and_median_figure = abundance_profiles_figure.add_scatter(x=df_setofproteins_median["Fraction"],
y=df_setofproteins_median["normalized profile"],
name="Median profile"
)
# dash lines for proteins that have insufficient coverage across maps
abundance_profiles_and_median_figure.for_each_trace(lambda x: x.update(line={"dash":"dash"}),
selector=lambda x: x.name in self.genenames_sortedout_list)
return abundance_profiles_and_median_figure
except:
return "This protein cluster was not quantified"
def quantification_overview(self, cluster_of_interest="Proteasome"):
"""
Args:
self.df_allclusters_clusterdist_fracunstacked_unfiltered
columns: 01K, 03K, 06K, 12K, 24K, 80K
index: Gene names, Protein IDs, C-Score, Q-value, Map, Compartment, Cluster
Returns:
df
"""
df_quantification_overview = self.df_allclusters_clusterdist_fracunstacked_unfiltered.xs(cluster_of_interest, level="Cluster", axis=0)\
[self.fractions[0]].unstack("Map")
if "Sequence" in df_quantification_overview.index.names:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i in ["Sequence","Gene names"]])
else:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i=="Gene names"])
df_quantification_overview = df_quantification_overview.notnull().replace({True: "x", False: "-"})
return df_quantification_overview
def distance_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, and across all maps is generated displaying the distribution of the e.g.
Manhattan distance.
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
map_names = self.map_names
df_distance_noindex = self.df_distance_noindex
# "Gene names", "Map", "Cluster" and transferred into the index
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
if "Sequence" in df_distance_map_cluster_gene_in_index.columns:
df_distance_map_cluster_gene_in_index.set_index("Sequence", append=True, inplace=True)
df_cluster_xmaps_distance_with_index = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_distance_map_cluster_gene_in_index" and appended to the new dataframe df_cluster_xmaps_distance_with_index
for maps in map_names:
plot_try = df_distance_map_cluster_gene_in_index.xs((cluster_of_interest, maps),
level=["Cluster", "Map"], drop_level=False)
df_cluster_xmaps_distance_with_index = df_cluster_xmaps_distance_with_index.append(plot_try)
df_cluster_xmaps_distance_with_index["Combined Maps"] = "Combined Maps"
#number of proteins within one cluster
self.proteins_quantified_across_all_maps = df_cluster_xmaps_distance_with_index.unstack("Map").shape[0]
# index will be reset, required by px.box
df_cluster_xmaps_distance = df_cluster_xmaps_distance_with_index.reset_index()
distance_boxplot_figure = go.Figure()
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Map"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Combined Maps"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.update_layout(
title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest),
autosize=False,
showlegend=False,
width=500,
height=500,
# black box around the graph
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="distance",
mirror=True),
template="simple_white"
)
return distance_boxplot_figure
except:
self.cache_cluster_quantified = False
def distance_to_median_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, across all maps and fractions is generated displaying the
distribution of the distance to the median. For each fraction, one box plot will be displayed.
Args:
self:
df_allclusters_clusterdist_fracunstacked, dataframe with single level column, stored as attribute
(self.allclusters_clusterdist_fracunstacked), in which "Fraction" is unstacked. It contains only the
normalized data of individual protein clusters substracted by the median of the respective protein cluster
for each fraction.
map_names: individual map names are stored as an index
Returns:
distance_to_median_boxplot_figure: Box plot. Along the x-axis, the maps are shown, along the y-axis
the distances is plotted
"""
df_boxplot_manymaps = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_allclusters_clusterdist_fracunstacked" and appended to the new dataframe df_boxplot_manymaps
for maps in self.map_names:
plot_try = self.df_allclusters_clusterdist_fracunstacked.xs((cluster_of_interest, maps), level=["Cluster", "Map"], drop_level=False)
df_boxplot_manymaps = df_boxplot_manymaps.append(plot_try)
self.df_boxplot_manymaps = df_boxplot_manymaps
# index will be reset, required by px.violin
df_boxplot_manymaps = abs(df_boxplot_manymaps.stack("Fraction"))
df_boxplot_manymaps.name = "distance"
df_boxplot_manymaps = df_boxplot_manymaps.reindex(index=natsort.natsorted(df_boxplot_manymaps.index))
df_boxplot_manymaps = df_boxplot_manymaps.reset_index()
# box plot will be generated, every fraction will be displayed in a single plot
distance_to_median_boxplot_figure = px.box(df_boxplot_manymaps,
x="Map",
y="distance",
facet_col="Fraction",
facet_col_wrap=2,
boxmode="overlay", height=900, width=700, points="all",
hover_name="Gene names",
template="simple_white",
title="Distribution of the distance to the median for <br>the protein cluster: {}".format(cluster_of_interest))
return distance_to_median_boxplot_figure
except:
return "This protein cluster was not quantified"
def dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is calculated"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
df_setofproteins_allMaps = pd.DataFrame()
df_dynamicRange = pd.DataFrame()
df_01_stacked = self.df_01_stacked
for clusters in self.markerproteins:
try:
df_setofproteins_allMaps = pd.DataFrame()
for marker in self.markerproteins[clusters]:
try:
df_marker_allMaps = df_01_stacked.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_setofproteins_allMaps = df_setofproteins_allMaps.append(df_marker_allMaps)
df_setofproteins_allMaps_median = df_setofproteins_allMaps["normalized profile"].unstack("Fraction").median()
df_dynamicRange = df_dynamicRange.append(pd.DataFrame(np.array([[max(df_setofproteins_allMaps_median),
min(df_setofproteins_allMaps_median),
max(df_setofproteins_allMaps_median)-min(df_setofproteins_allMaps_median),
clusters]]),
columns=["Max", "Min", "Dynamic Range", "Cluster"]),
ignore_index=True)
except:
continue
self.analysis_summary_dict["Dynamic Range"] = df_dynamicRange.to_json()
def plot_dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is displayed"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
fig_dynamicRange = px.bar(pd.read_json(self.analysis_summary_dict["Dynamic Range"]),
x="Cluster",
y="Dynamic Range",
base="Min",
template="simple_white",
width=1000,
height=500).update_xaxes(categoryorder="total ascending")
return fig_dynamicRange
def results_overview_table(self):
"""
Dataframe will be created, that provides information about "range", "mean" and "standardeviation",
given as the column names, based on the data given in df_distance_noindex
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance",
in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins)
are stored
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"""
df_distance_noindex = self.df_distance_noindex
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
map_names = self.map_names
df_overview = pd.DataFrame()
for clusters in self.markerproteins:
#if a certain cluster is not available in the dataset at all
try:
for maps in map_names:
df_dist_map_cluster = df_distance_map_cluster_gene_in_index.xs((clusters, maps), level=["Cluster", "Map"], drop_level=False)
statistic_table = {"range": (df_dist_map_cluster["distance"].max(axis=0)) - (df_dist_map_cluster["distance"].min(axis=0)),
"median": df_dist_map_cluster["distance"].median(axis=0),
"standardeviation": df_dist_map_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": maps
}
statistic_series = pd.Series(data=statistic_table)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
df_dist_cluster = df_distance_map_cluster_gene_in_index.xs(clusters, level="Cluster")
statistic_table_combined = {
"range": (df_dist_cluster["distance"].max(axis=0)) - (df_dist_cluster["distance"].min(axis=0)),
"median": df_dist_cluster["distance"].median(axis=0),
"standardeviation": df_dist_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": "combined maps"
}
statistic_series_combined = pd.Series(data=statistic_table_combined)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series_combined).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
except:
continue
try:
df_overview.set_index(["Cluster", "Map"], inplace=True)
df_overview.sort_index(axis=0, level=0, inplace=True)
except:
df_overview = pd.DataFrame()
self.analysis_summary_dict["Overview table"] = df_overview.reset_index().to_json()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#self.analysis_summary_dict.clear()
return df_overview
def reframe_df_01ORlog_for_Perseus(self, df_01ORlog):
""""
To be available for Perseus df_01_stacked needs to be reframed.
Args:
df_01ORlog:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
df_01ORlog_svm:
LFQ:
columns: "MS/MS count_Map1_01K", "normalized profile_Map1_01K"
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Compartment"
SILAC:
columns: e.g. "Ratio H/L count_MAP2_80K", "Ratio H/L variability [%]_MAP1_03K", "normalized profile_MAP5_03K"
index: "Q-value", "Score", "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "id", "Compartment"
"""
df_01ORlog_svm = df_01ORlog.copy()
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01ORlog_svm.index.get_level_values("Map")+"_"+df_01ORlog_svm.index.get_level_values("Fraction")
index_ExpMap.name = "Map_Frac"
df_01ORlog_svm.set_index(index_ExpMap, append=True, inplace=True)
df_01ORlog_svm.index = df_01ORlog_svm.index.droplevel(["Map", "Fraction"])
df_01ORlog_svm = df_01ORlog_svm.unstack("Map_Frac")
#df_01ORlog_svm = df_01ORlog_svm.dropna(axis=0, subset=df_01ORlog_svm.loc[[], ["normalized profile"]].columns)
df_01ORlog_svm.columns = ["_".join(col) for col in df_01ORlog_svm.columns.values]
df_01ORlog_svm.rename(index={"undefined" : np.nan}, level="Compartment", inplace=True)
return df_01ORlog_svm
class SpatialDataSetComparison:
analysed_datasets_dict = SpatialDataSet.analysed_datasets_dict
css_color = SpatialDataSet.css_color
cache_stored_SVM = True
def __init__(self, ref_exp="Exp2", **kwargs): #clusters_for_ranking=["Proteasome", "Lysosome"]
#self.clusters_for_ranking = clusters_for_ranking
self.ref_exp = ref_exp
self.json_dict = {}
#self.fractions, self.map_names = [], [] #self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
#collapse_maps,collapse_cluster, cluster_of_interest_comparison, multi_choice, multi_choice_venn, x_PCA_comp, y_PCA_comp
#if "organism" not in kwargs.keys():
# self.markerproteins = self.markerproteins_set["Human - Swissprot"]
#else:
# assert kwargs["organism"] in self.markerproteins_set.keys()
# self.markerproteins = self.markerproteins_set[kwargs["organism"]]
# del kwargs["organism"]
#self.unique_proteins_total = unique_proteins_total
self.exp_names, self.exp_map_names = [], []
self.df_01_filtered_combined, self.df_distance_comp = pd.DataFrame(), pd.DataFrame()
self.df_quantity_pr_pg_combined, self.df_dynamicRange_combined = pd.DataFrame(), pd.DataFrame()
def read_jsonFile(self): #, content=None
"""
Read-out of the JSON-file and currently analysed dataset, stored in "analysed_datasets_dict". It wil create df_distances_combined ("Gene
names", "Cluster" are stacked; "Map" and Experiment names (are not stored in an additional level name) are unstacked. Layout will be
adjusted for distance-plotting.
Args:
self.json_dict: contains the dictionary stored in AnalysedDatasets.json
{"Experiment name" : {
"changes in shape after filtering" : {
##SILAC##
"Original size" : tuple,
"Shape after categorical filtering" : tuple,
"Shape after Ratio H/L count (>= 3)/var (count>=2, var<30) filtering" : tuple,
"Shape after filtering for complete profiles" : tuple,
##LFQ/spectronaut##
"Original size" : tuple,
"Shape after MS/MS value filtering" : tuple,
"Shape after consecutive value filtering" : tuple,
},
"quantity: profiles/protein groups" : df - number of protein groups | number of profiles | data completeness of profiles
"Unique Proteins": list,
"Analysis parameters" : {
"acquisition" : str,
"filename" : str,
##SILAC##
"Ratio H/L count 1 (>= X)" : int,
"Ratio H/L count 2 (>=Y, var<Z)" : int,
"Ratio variability (<Z, count>=Y)" : int,
##LFQ/spectronaut##
"consecutive data points" : int,
"summed MS/MS counts" : int,
},
"0/1 normalized data - mean" : df - mean of all datapoints,
"0/1 normalized data" : df - individual cluster,
"Distances to the median profile" : df - individual cluster,
"Manhattan distances" : df - individual cluster,
"Dynamic Range": df - individual cluster,
"Overview table" : df - individual cluster,
##if user perform the Misclassification Analysis befor downloading the dictionary AnalysedDatasets.json##
{"Misclassification Analysis": {
"True: ER" : {
"Recall": int,
"FDR": int,
"Precision": int,
"F1": int
}
"True: NPC" : {...}
...
"Summary": {
"Total - Recall": int,
"Membrane - Recall" : int,
"Av per organelle - Recall": int,
"Median per organelle - Recall" : int,
"Av precision organelles" : int,
"Av F1 organelles" : int,
"Av F1 all clusters" : int,
}
}
}
}
Returns:
self:
df_01_filtered_combined: df, "Fraction" is unstacked; "Experiment", "Gene names", "Map", "Exp_Map" are stacked
df_distance_comp: df, no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map", "Exp_Map", "distance"
"distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment"
unique_proteins_total: dict, key: Experiment name, value: unique protein (groups)
exp_map_names: list of unique Exp_Map - fusions e.g. LFQ_Map1
exp_names: list of unique Experiment names - e.g. LFQ
"""
json_dict = self.json_dict
#add experiments that are not stored in AnalysedDAtasets.json for comparison
#try:
#if len(SpatialDataSet.analysed_datasets_dict.keys())>=1:
# json_dict.update(SpatialDataSet.analysed_datasets_dict)
##except:
#else:
# pass
self.analysis_parameters_total = {}
unique_proteins_total = {}
df_01_combined = pd.DataFrame()
for exp_name in json_dict.keys():
for data_type in json_dict[exp_name].keys():
if data_type == "0/1 normalized data":
df_01_toadd = pd.read_json(json_dict[exp_name][data_type])
df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"], inplace=True)
if "Sequence" in df_01_toadd.columns:
df_01_toadd.set_index(["Sequence"], inplace=True, append=True)
df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")], inplace=True)
df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"])
df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True)
df_01_toadd.set_index(pd.Series(["?".join([str(i) for i in el]) for el in df_01_toadd.index.values], name="join"), append=True, inplace=True)
if len(df_01_combined) == 0:
df_01_combined = df_01_toadd.copy()
else:
df_01_combined = pd.concat([df_01_combined,df_01_toadd], sort=False, axis=1)
elif data_type == "quantity: profiles/protein groups" and exp_name == list(json_dict.keys())[0]:
df_quantity_pr_pg_combined = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_combined["Experiment"] = exp_name
elif data_type == "quantity: profiles/protein groups" and exp_name != list(json_dict.keys())[0]:
df_quantity_pr_pg_toadd = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_toadd["Experiment"] = exp_name
df_quantity_pr_pg_combined = pd.concat([df_quantity_pr_pg_combined, df_quantity_pr_pg_toadd])
elif data_type == "Manhattan distances" and exp_name == list(json_dict.keys())[0]:
df_distances_combined = pd.read_json(json_dict[exp_name][data_type])
df_distances_combined = df_distances_combined.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_combined.columns:
df_distances_combined.set_index(["Sequence"], inplace=True, append=True)
df_distances_combined = df_distances_combined[["distance"]].unstack(["Map"])
df_distances_combined.rename(columns = {"distance":exp_name}, inplace=True)
elif data_type == "Manhattan distances" and exp_name != list(json_dict.keys())[0]:
df_distances_toadd = pd.read_json(json_dict[exp_name][data_type])
df_distances_toadd = df_distances_toadd.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_toadd.columns:
df_distances_toadd.set_index(["Sequence"], inplace=True, append=True)
df_distances_toadd = df_distances_toadd[["distance"]].unstack(["Map"])
df_distances_toadd.rename(columns = {"distance":exp_name}, inplace=True)
df_distances_combined = pd.concat([df_distances_combined, df_distances_toadd], axis=1)#, join="inner")
elif data_type == "Dynamic Range" and exp_name == list(json_dict.keys())[0]:
df_dynamicRange_combined = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_combined["Experiment"] = exp_name
elif data_type == "Dynamic Range" and exp_name != list(json_dict.keys())[0]:
df_dynamicRange_toadd = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_toadd["Experiment"] = exp_name
df_dynamicRange_combined = pd.concat([df_dynamicRange_combined, df_dynamicRange_toadd])
# if data_type == "Overview table" and exp_name == list(json_dict.keys())[0]:
# #convert into dataframe
# df_distanceOverview_combined = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_combined["Experiment"] = exp_name
# df_distanceOverview_combined = df_distanceOverview_combined.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
#
# elif data_type == "Overview table" and exp_name != list(json_dict.keys())[0]:
# df_distanceOverview_toadd = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_toadd["Experiment"] = exp_name
# df_distanceOverview_toadd = df_distanceOverview_toadd.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
# #dataframes will be concatenated, only proteins/Profiles that are in both df will be retained
# df_distanceOverview_combined = pd.concat([df_distanceOverview_combined, df_distanceOverview_toadd])
elif data_type == "Unique Proteins":
unique_proteins_total[exp_name] = json_dict[exp_name][data_type]
elif data_type == "Analysis parameters":
self.analysis_parameters_total[exp_name] = json_dict[exp_name][data_type]
#try:
# for paramters in json_dict[exp_name][data_type].keys():
# if paramters=="acquisition":
# acquisition_loaded.append(json_dict[exp_name][data_type][paramters])
# #elif parameters=="Non valid profiles":
#except:
# continue
#
df_01_combined = df_01_combined.droplevel("join", axis=0)
#filter for consistently quantified proteins (they have to be in all fractions and all maps)
#df_01_filtered_combined = df_01_mean_combined.dropna()
df_01_combined.columns.names = ["Experiment", "Map", "Fraction"]
#reframe it to make it ready for PCA
df_01_filtered_combined = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0)
#df_01_filtered_combined = df_01_combined.stack(["Experiment"]).dropna(axis=1)
df_01_filtered_combined = df_01_filtered_combined.div(df_01_filtered_combined.sum(axis=1), axis=0)
#df_01_filtered_combined = df_01_combined.copy()
#df_01_filtered_combined.columns.names = ["Experiment", "Fraction", "Map"]
## Replace protein IDs by the unifying protein ID across experiments
#comparison_IDs = pd.Series([split_ids_uniprot(el) for el in df_01_filtered_combined.index.get_level_values("Protein IDs")],
# name="Protein IDs")
#df_01_filtered_combined.index = df_01_filtered_combined.index.droplevel("Protein IDs")
#df_01_filtered_combined.set_index(comparison_IDs, append=True, inplace=True)
##reframe it to make it ready for PCA | dropna: to make sure, that you do consider only fractions that are in all experiments
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01_filtered_combined.index.get_level_values("Experiment")+"_"+df_01_filtered_combined.index.get_level_values("Map")
index_ExpMap.name = "Exp_Map"
df_01_filtered_combined.set_index(index_ExpMap, append=True, inplace=True)
df_distances_combined.columns.names = ["Experiment", "Map"]
series = df_distances_combined.stack(["Experiment", "Map"])
series.name = "distance"
df_distance_comp = series.to_frame()
#fuse Experiment and Map into one column = "Exp_Map"
index_dist_ExpMap = df_distance_comp.index.get_level_values("Experiment")+"_"+df_distance_comp.index.get_level_values("Map")
index_dist_ExpMap.name = "Exp_Map"
df_distance_comp.set_index(index_dist_ExpMap, append=True, inplace=True)
#new
#self.df_distance_comp2 = df_distance_comp.copy()
df_distance_comp.reset_index(level=['Protein IDs'], inplace=True)
df_distance_comp["Protein IDs"] = df_distance_comp["Protein IDs"].str.split(";", expand=True)[0]
df_distance_comp = df_distance_comp.set_index("Protein IDs", append=True).unstack(["Experiment", "Exp_Map", "Map"]).dropna().stack(["Experiment", "Exp_Map", "Map"]).reset_index()
#df_distance_comp.reset_index(inplace=True)
self.unique_proteins_total = unique_proteins_total
self.exp_names = list(df_01_filtered_combined.index.get_level_values("Experiment").unique())
self.exp_map_names = list(index_dist_ExpMap.unique())
self.df_01_filtered_combined = df_01_filtered_combined
#self.df_01_mean_filtered_combined = df_01_mean_filtered_combined
self.df_quantity_pr_pg_combined = df_quantity_pr_pg_combined
self.df_dynamicRange_combined = df_dynamicRange_combined
self.df_distance_comp = df_distance_comp
try:
organism = json_dict[list(json_dict.keys())[0]]["Analysis parameters"]['organism']
except:
organism = "Homo sapiens - Uniprot"
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(organism)))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.clusters_for_ranking = self.markerproteins.keys()
def perform_pca_comparison(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
df_01_filtered_combined: df, which contains 0/1 normalized data for each map - for all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Protein IDs", "Gene names", "Compartment", "Experiment", "Map", "Exp_Map"
df_01_mean_filtered_combined: df, which contains (global) 0/1 normalized data across all maps (mean) - for all experiments and for all protein IDs,
that are consistent throughout all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Gene names", "Protein IDs", "Compartment", "Experiment"
Returns:
self:
df_pca_for_plotting: PCA processed dataframe
index: "Experiment", "Gene names", "Map", "Exp_Map"
columns: "PC1", "PC2", "PC3"
contains only marker genes, that are consistent throughout all maps / experiments
df_global_pca: PCA processed dataframe
index: "Gene names", "Protein IDs", "Compartment", "Experiment",
columns: "PC1", "PC2", "PC3"
contains all protein IDs, that are consistent throughout all experiments
"""
markerproteins = self.markerproteins.copy()
#df_01_filtered_combined = self.df_01_filtered_combined
#df_01_filtered_combined = self.df_01_filtered_combined
df_mean = pd.DataFrame()
for exp in self.exp_names:
df_exp = self.df_01_filtered_combined.stack("Fraction").unstack(["Experiment", "Map","Exp_Map"])[exp].mean(axis=1).to_frame(name=exp)
df_mean = pd.concat([df_mean, df_exp], axis=1)
df_mean = df_mean.rename_axis("Experiment", axis="columns").stack("Experiment").unstack("Fraction")
pca = PCA(n_components=3)
df_pca = pd.DataFrame(pca.fit_transform(df_mean))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_mean.index
try:
markerproteins["PSMA subunits"] = [item for sublist in [re.findall("PSMA.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
markerproteins["PSMB subunits"] = [item for sublist in [re.findall("PSMB.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
del markerproteins["Proteasome"]
except:
pass
###only one df, make annotation at that time
df_cluster = pd.DataFrame([(k, i) for k, l in markerproteins.items() for i in l], columns=["Cluster", "Gene names"])
df_global_pca = df_pca.reset_index().merge(df_cluster, how="left", on="Gene names")
df_global_pca.Cluster.replace(np.NaN, "Undefined", inplace=True)
self.markerproteins_splitProteasome = markerproteins
self.df_pca = df_pca
self.df_global_pca = df_global_pca
def plot_pca_comparison(self, cluster_of_interest_comparison="Proteasome", multi_choice=["Exp1", "Exp2"]):
"""
A PCA plot for desired experiments (multi_choice) and 1 desired cluster is generated.
Either the maps for every single experiment are displayed individually or in a combined manner
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
multi_choice: list of experiment names
cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome")
df_pca: PCA processed dataframe
index: "Experiment", "Gene names", "Map", "Exp_Map"
columns: "PC1", "PC2", "PC3"
contains only marker genes, that are consistent throughout all maps / experiments
Returns:
pca_figure: PCA plot for a specified protein cluster.
"""
df_pca = self.df_pca.copy()
markerproteins = self.markerproteins
try:
df_setofproteins_PCA = pd.DataFrame()
for map_or_exp in multi_choice:
for marker in markerproteins[cluster_of_interest_comparison]:
try:
plot_try_pca = df_pca.xs((marker, map_or_exp), level=["Gene names", "Experiment"], drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
df_setofproteins_PCA = df_setofproteins_PCA.assign(Experiment_lexicographic_sort=pd.Categorical(df_setofproteins_PCA["Experiment"], categories=multi_choice,
ordered=True))
df_setofproteins_PCA.sort_values("Experiment_lexicographic_sort", inplace=True)
pca_figure = px.scatter_3d(df_setofproteins_PCA,
x="PC1",
y="PC2",
z="PC3",
color="Experiment",
template="simple_white",
hover_data=["Gene names"]
)
pca_figure.update_layout(autosize=False,
width=700,
height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest_comparison),
template="simple_white"
)
return pca_figure
except:
return "This protein cluster was not identified in all experiments"
def plot_global_pca_comparison(self, cluster_of_interest_comparison="Proteasome", x_PCA="PC1", y_PCA="PC3",
markerset_or_cluster=False, multi_choice=["Exp1", "Exp2"]):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
multi_choice: list of experiment names
css_color: list of colors
df_global_pca: PCA processed dataframe
index: "Gene names", "Protein IDs", "Compartment", "Experiment",
columns: "PC1", "PC2", "PC3"
contains all protein IDs, that are consistent throughout all experiments
Returns:
pca_figure: global PCA plot, clusters based on the markerset based (df_organellarMarkerSet) are color coded.
"""
df_global_pca_exp = self.df_global_pca.loc[self.df_global_pca["Experiment"].isin(multi_choice)]
df_global_pca_exp.reset_index(inplace=True)
compartments = list(SpatialDataSet.df_organellarMarkerSet["Compartment"].unique())
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
compartments.insert(0, "undefined")
compartments.insert(len(compartments), "Selection")
cluster = self.markerproteins_splitProteasome.keys()
cluster_color = dict(zip(cluster, self.css_color))
cluster_color["Undefined"] = "lightgrey"
if markerset_or_cluster == True:
df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster!="Undefined"].sort_values(by="Cluster")
df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster=="Undefined"].append(df_global_pca)
else:
for i in self.markerproteins[cluster_of_interest_comparison]:
df_global_pca_exp.loc[df_global_pca_exp["Gene names"] == i, "Compartment"] = "Selection"
df_global_pca = df_global_pca_exp.assign(Compartment_lexicographic_sort = pd.Categorical(df_global_pca_exp["Compartment"],
categories=[x for x in compartments],
ordered=True))
df_global_pca.sort_values(["Compartment_lexicographic_sort", "Experiment"], inplace=True)
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment" if markerset_or_cluster == False else "Cluster",
color_discrete_map=compartment_color if markerset_or_cluster == False else cluster_color,
title="Protein subcellular localization by PCA",
hover_data=["Protein IDs", "Gene names", "Compartment"],
facet_col="Experiment",
facet_col_wrap=2,
opacity=0.9,
template="simple_white"
)
fig_global_pca.update_layout(autosize=False,
width=1800 if markerset_or_cluster == False else 1600,
height=400*(int(len(multi_choice) / 2) + (len(multi_choice) % 2 > 0)),
template="simple_white"
)
return fig_global_pca
def get_marker_proteins(self, experiments, cluster):
df_in = self.df_01_filtered_combined.copy()
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster = df_cluster.append(df_p)
if len(df_cluster) == 0:
return df_cluster
# filter for all selected experiments
df_cluster = df_cluster.droplevel("Exp_Map", axis=0)
df_cluster = df_cluster.unstack(["Experiment", "Map"])
if any([el not in df_cluster.columns.get_level_values("Experiment") for el in experiments]):
return pd.DataFrame()
drop_experiments = [el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments]
if len(drop_experiments) > 0:
df_cluster.drop([el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments],
level="Experiment", axis=1, inplace=True)
df_cluster.dropna(inplace=True)
if len(df_cluster) == 0:
return df_cluster
df_cluster.set_index(pd.Index(np.repeat(cluster, len(df_cluster)), name="Cluster"), append=True, inplace=True)
return df_cluster
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
df_distances = pd.DataFrame()
# loop over experiments
experiments = set(df_cluster.columns.get_level_values("Experiment"))
for exp in experiments:
df_exp = df_cluster.xs(exp, level="Experiment", axis=1)
ref_profile = pd.DataFrame(df_exp.apply(complex_profile, axis=0, result_type="expand")).T
# loop over maps
maps = set(df_exp.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_exp.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[(exp, m)], index=df_exp.index)
df_distances = pd.concat([df_distances, d_m], axis=1)
df_distances.columns = pd.MultiIndex.from_tuples(df_distances.columns, names=["Experiment", "Map"])
return df_distances
def calc_biological_precision(self, experiments=None, clusters=None):
"""
Method to calculate the distance table for assessing biological precision
"""
df_distances = pd.DataFrame()
if experiments is None:
experiments = self.exp_names
if clusters is None:
clusters = self.markerproteins.keys()
for cluster in clusters:
df_cluster = self.get_marker_proteins(experiments, cluster)
if len(df_cluster) == 0:
continue
dists_cluster = self.calc_cluster_distances(df_cluster)
df_distances = df_distances.append(dists_cluster)
df_distances = df_distances.stack(["Experiment", "Map"]).reset_index()\
.sort_values(["Experiment","Gene names"]).rename({0: "distance"}, axis=1)
df_distances.insert(0, "Exp_Map", ["_".join([e,m]) for e,m in zip(df_distances["Experiment"], df_distances["Map"])])
self.df_distance_comp = df_distances
return df_distances
def get_complex_coverage(self, min_n=5):
full_coverage = {}
for complx in self.markerproteins.keys():
df = self.get_marker_proteins(self.exp_names, complx)
if len(df) >= min_n:
full_coverage[complx] = len(df)
partial_coverage = {}
for exp in self.exp_names:
for complx in self.markerproteins.keys():
if complx in full_coverage.keys():
continue
df = self.get_marker_proteins([exp], complx)
#print(df)
if complx in partial_coverage.keys():
partial_coverage[complx].append(len(df))
else:
partial_coverage[complx] = [len(df)]
no_coverage = {}
for k in partial_coverage.keys():
if all([el < min_n for el in partial_coverage[k]]):
no_coverage[k] = partial_coverage[k]
for k in no_coverage.keys():
del partial_coverage[k]
self.coverage_lists = [full_coverage, partial_coverage, no_coverage]
return full_coverage, partial_coverage, no_coverage
def distance_boxplot_comparison(self, cluster_of_interest_comparison="Proteasome", collapse_maps=False, multi_choice=["Exp1", "Exp2"]):
"""
A box plot for desired experiments (multi_choice) and 1 desired cluster is generated displaying the distribution of the e.g.
Manhattan distance. Either the maps for every single experiment are displayed individually or in a combined manner.
Args:
self:
multi_choice: list of experiment names
collapse_maps: boolean
cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome")
map_names: individual map names are stored as an index
df_distance_comp: df_distance_comp: no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map",
"Exp_Map", "distance"
"distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
#an error massage, if no Experiments are selected, will be displayed already, that is why: return ""
if len(multi_choice)>=1:
pass
else:
return ("")
df_distance_comp = self.df_distance_comp.copy()
#set categroical column, allowing lexicographic sorting
df_distance_comp["Experiment_lexicographic_sort"] = | pd.Categorical(df_distance_comp["Experiment"], categories=multi_choice, ordered=True) | pandas.Categorical |
"""
Python source code to extract listing from mudah.my
"""
from functools import total_ordering
from jobstreet.config import General, Authentication, Location
import pandas as pd
import requests
import webbrowser as web
import urllib.parse as urlparse
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import dateutil.relativedelta as rd
import math
import mechanicalsoup
import logging as logger
import os
clear = lambda: os.system('cls') #on Windows System
# TODO - Advance criteria
# For logging purpose
logger.basicConfig(level=logger.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
urllib3_logger = logger.getLogger('urllib3')
urllib3_logger.setLevel(logger.CRITICAL)
class JobStreetExtractor:
"""
Extractor for getting job dataset from jobstreet malaysia
"""
__chrome_path__ = General.CHROME_PATH.value
__base_url__ = General.JOBSTREET_URL.value
# Mapping values to required Jobstreet parameter
# https://www.jobstreet.com.my/en/job-search/job-vacancy.php?key=Software&area=2&location=51200&position=3%2C4&job-type=5&experience-min=03&experience-max=-1&salary=6%2C000
# &salary-max=7%2C000&classified=1&salary-option=on&job-posted=0&src=1&ojs=4
# key
# area
# location
# position
# job-type : 5,10,16
# experience-min
# experience-max
# salary
# salary-max
# classified
# salary-option
# job-posted
# src
# ojs
# sort
# order
# pg
def __authenticate__(self):
login_url = Authentication.JOBSTREET_LOGIN_URL.value
browser = mechanicalsoup.StatefulBrowser()
browser.open(login_url)
browser.select_form('#login')
browser['login_id'] = Authentication.JOBSTREET_USERNAME.value
browser['password'] = Authentication.JOBSTREET_PASSWORD.value
browser.submit_selected()
return browser
def __scraping__(self, keyword=None, location=None, minSalary=None, maxSalary=None, minExperience=None, maxExperience=None):
# login
browser = self.__authenticate__(self)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
# construct filter criteria
filter_criteria = {}
if keyword is not None:
filter_criteria.update({'key': keyword })
if location is not None:
filter_criteria.update({'location' : location.value })
if minSalary is not None:
filter_criteria.update({'salary' : minSalary })
if maxSalary is not None:
filter_criteria.update({'salary-max' : maxSalary })
if minExperience is not None:
filter_criteria.update({'experience-min' : minExperience })
if maxExperience is not None:
filter_criteria.update({'experience-max' : maxExperience })
# filter_criteria = {
# 'key':'Software',
# 'area': '2',
# 'location':'51200',
# 'position':'3,4',
# 'job-type':'5',
# 'salary':'6000',
# 'salary-max':'7000',
# 'classified':'1',
# 'salary-option':'on',
# 'job-posted':'0',
# 'src':'1',
# 'ojs':'4',
# }
page_url = self.__base_url__
url_parts = list(urlparse.urlparse(page_url))
final_df = pd.DataFrame()
# test to get number of pages
page_criteria = {'pg': str(1)}
filter_criteria.update(page_criteria)
url_parts[4] = urlencode(filter_criteria)
page_url = urlparse.urlunparse(url_parts)
response = browser.open(page_url)
# get total lists
total_list = BeautifulSoup(response.content, "html.parser").find("span", class_="pagination-result-count").string
pages = 1
if total_list is not None:
logger.info(str(total_list))
total_list = total_list[total_list.find("of")+len("of"):total_list.rfind("jobs")]
total_list = total_list.strip().replace(',', '')
logger.info("Attempt to parse " + str(total_list) + " jobs at most")
pages = math.ceil(int(total_list) / 40) # 40 is item per page
# To prevent over-scraping
if General.PAGE_THRESHOLD.value != -1 and General.PAGE_THRESHOLD.value < pages :
pages = General.PAGE_THRESHOLD.value
for page in range(1, pages + 1):
job_titles = []
job_urls = []
com_names = []
com_urls = []
locations = []
salaries = []
descriptions = []
page_criteria = {'pg': str(page)}
filter_criteria.update(page_criteria)
url_parts[4] = urlencode(filter_criteria)
page_url = urlparse.urlunparse(url_parts)
logger.info("Processing Page " + str(page) + " : " + page_url)
response = browser.open(page_url)
if response.status_code != 200:
raise ConnectionError("Cannot connect to " + page_url)
# Get each job card
raw_listing = BeautifulSoup(response.content, "html.parser").find_all("div",
{
'id' : lambda value: value and value.startswith("job_ad")
})
# For each job card, get job informations
for element in raw_listing:
# Get job general information
job_el = element.find("a", {'class' : lambda value: value and value.startswith("position-title-link")})
job_titles.append(job_el.get('data-job-title'))
job_urls.append(job_el.get('href'))
# Get company information
com_el = element.find("a", {'id' : lambda value: value and value.startswith("company_name")})
if com_el is None:
com_el = element.find("span", {'id': lambda value: value and value.startswith("company_name")})
com_names.append(com_el.string)
com_urls.append(None)
else:
com_names.append(com_el.find('span').string)
com_urls.append(com_el.get('href'))
# Get location information
loc_el = element.find("li", {'class' : 'job-location'})
locations.append(loc_el.get('title'))
sal_el = element.find("li", {'id' : 'job_salary'})
# Get salary information
if sal_el:
font = sal_el.find("font")
if font:
salaries.append(sal_el.find("font").string)
else:
salaries.append(None)
# Get job description
des_el = element.find("ul", {'id' : lambda value: value and value.startswith("job_desc_detail")}).find("li",recursive=False)
if des_el:
descriptions.append(des_el.string)
else:
descriptions.append(None)
df = pd.concat([pd.Series(job_titles),
pd.Series(job_urls),
pd.Series(com_names),
pd.Series(com_urls),
pd.Series(locations),
| pd.Series(salaries) | pandas.Series |
"""
Written by <NAME>, UC Berkeley/ Lawrence Berkeley National Labs, NSDS Lab
<NAME>, UC Berkeley
This code is intended to create and implement structure supervised classification of coarsely
segmented trial behavior from the ReachMaster experimental system.
Functions are designed to work with a classifier of your choice.
Operates on a single block.
Edited: 9/14/2021
Required Folder 'DataFrames" with all kin and exp datafiles
"""
import argparse
import os
import matplotlib.pyplot as plt
import sklearn
from scipy import ndimage
import Classification_Utils as CU
import pandas as pd
import numpy as np
import h5py
import random
import joblib # for saving sklearn models
from imblearn.over_sampling import SMOTE # for adjusting class imbalances
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
from imblearn.pipeline import Pipeline as imblearn_Pipeline
from collections import Counter
# classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV, train_test_split, GridSearchCV, cross_validate, cross_val_score
from sklearn.pipeline import make_pipeline, Pipeline
# from imblearn.pipeline import Pipeline as imblearnPipeline
from sklearn.feature_selection import SelectKBest # feature selection
from sklearn.feature_selection import f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.neural_network import MLPClassifier
# set global random seed for reproducibility #
random.seed(246810)
np.random.seed(246810)
# Create folder in CWD to save data and plots #
current_directory = os.getcwd()
folder_name = 'ClassifyTrials'
final_directory = os.path.join(current_directory, folder_name)
if not os.path.exists(final_directory):
os.makedirs(final_directory)
class ReachClassifier:
# set random set for reproducibility
random.seed(246810)
np.random.seed(246810)
def __init__(self, model=None):
self.model = model
self.X = None
self.y = None
self.X_train = None
self.y_train = None
self.X_val = None
self.y_val = None
self.fs = None
def set_model(self, data):
self.model = data
def set_X(self, data):
self.X = data
def set_y(self, data):
self.y = data
def set_X_train(self, data):
self.X_train = data
def set_y_train(self, data):
self.y_train = data
def set_X_val(self, data):
self.X_val = data
def set_y_val(self, data):
self.y_val = data
def set_fs(self, data):
self.fs = data
def fit(self, X, y):
"""
Fits model to data.
Args:
X: features
y: labels
Returns: None
"""
self.model.fit(X, y)
def predict(self, X):
"""
Returns trained model predictions.
Args:
X: features
y: labels
Returns: preds
"""
return self.model.predict(X)
@staticmethod
def partition(X, y):
"""
Partitions data.
Args:
X: features
y: labels
Returns: X_train, X_val, y_train, y_val
"""
# partition into validation set
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
return X_train, X_val, y_train, y_val
@staticmethod
def evaluate(model, X, y):
"""
Performs 5-fold cross-validation and returns accuracy.
Args:
model: sklearn model
X: features
y: labels
Returns: avg_train_accuracy, avg_test_accuracy
"""
print("Cross validation:")
cv_results = cross_validate(model, X, y, cv=5, return_train_score=True)
train_results = cv_results['train_score']
test_results = cv_results['test_score']
avg_train_accuracy = sum(train_results) / len(train_results)
avg_test_accuracy = sum(test_results) / len(test_results)
print('averaged train accuracy:', avg_train_accuracy)
print('averaged validation accuracy:', avg_test_accuracy)
return avg_train_accuracy, avg_test_accuracy
@staticmethod
def adjust_class_imbalance(X, y):
"""
Adjusts for class imbalance.
Object to over-sample the minority class(es) by picking samples at random with replacement.
The dataset is transformed, first by oversampling the minority class, then undersampling the majority class.
Returns: new samples
References: https://machinelearningmastery.com/smote-oversampling-for-imbalanced-classification/
"""
oversampler = SMOTE(random_state=42)
# undersampler = RandomUnderSampler(random_state=42)
steps = [('o', oversampler)] # , ('u', undersampler)]
pipeline = imblearn_Pipeline(steps=steps)
X_res, y_res = pipeline.fit_resample(X, y)
return X_res, y_res
@staticmethod
def hyperparameter_tuning(X_train, X_val, y_train, y_val, model, param_grid, fullGridSearch=False):
"""
Performs hyperparameter tuning and returns best trained model.
Args:
model: sklearn
param_grid: grid of models and hyperparameters
fullGridSearch: True to run exhaustive param search, False runs RandomizedSearchCV
Returns:
tuned model
parameters found through search
accuracy of tuned model
Reference: https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74
"""
# Use the random grid to search for best hyperparameters
if fullGridSearch:
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=model, param_grid=param_grid,
cv=3, n_jobs=-1, verbose=2)
else:
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
grid_search = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=2, cv=5,
random_state=42, verbose=2, n_jobs=-1)
# Fit the random search model
grid_search.fit(X_train, y_train)
base_model = RandomForestClassifier()
base_model.fit(X_train, y_train)
base_train_accuracy, base_test_accuracy = ReachClassifier.evaluate(base_model, X_val, y_val)
best_grid = grid_search
best_model = grid_search.best_estimator_
best_train_accuracy, best_test_accuracy = ReachClassifier.evaluate(best_model, X_val, y_val)
print('Improvement % of', (100 * (best_test_accuracy - base_test_accuracy) / base_test_accuracy))
return best_model, best_grid.best_params_, best_test_accuracy
@staticmethod
def mean_df(df):
"""
Maps np.mean to all cells in df. For generating features.
Args:
df: (df)
Returns: df with mean of each cell as its values
"""
mean_df = df.applymap(np.mean)
return mean_df
@staticmethod
def do_feature_selection(X, y, k):
"""
Defines the feature selection and applies the feature selection procedure to the dataset.
Fit to data, then transform it.
Args:
k: top number of features to select
Returns: (array shape trials x k features) subset of the selected input features and feature estimator
references: https://machinelearningmastery.com/feature-selection-with-numerical-input-data/
"""
# configure to select a subset of features
fs = SelectKBest(score_func=f_classif, k=k)
# learn relationship from training data
fs.fit(X, y)
# transform train input data
X_train_fs = fs.transform(X)
return X_train_fs, fs
@staticmethod
def plot_features(fs, X):
"""
Plots and saves feature importances.
Returns: None
"""
for i in range(len(fs.scores_)):
print('Feature %d: %f' % (i, fs.scores_[i]))
# plot the scores
# x = [i for i in range(len(fs.scores_))]
x = X.columns
plt.bar(x, fs.scores_)
# rotate x axis to avoid overlap
plt.xticks(rotation=45)
plt.yticks(rotation=90)
plt.title("Input Features vs. Feature Importance")
plt.ylabel("Mutual Information Feature Importance")
plt.xlabel("Input Features")
plt.savefig(f'{folder_name}/feat_importance.png')
@staticmethod
def pre_classify(X, y, k=10):
"""
Partitions, adjusts class imbalance, and performs feature selection.
Args:
X: features
y: labels
k: (int) number of features to select
Returns: data ready for ML classification
"""
# adjust class imbalance
X_res, y_res = ReachClassifier.adjust_class_imbalance(X, y)
# feat selection
X_selected, fs = ReachClassifier.do_feature_selection(X_res, y_res, k)
return X_selected, y_res, fs
@staticmethod
def train_and_validate(X, y, param_grid, save=True, filename=None):
"""
Trains and Validates.
Args:
X: features
y: labels
param_grid: model and hyperparameters to search over
save: (bool) True to save model
filename: (str) name of model to save as
Returns: trained model, train model's CV score
"""
# partition
X_train, X_val, y_train, y_val = ReachClassifier.partition(X, y)
# hyperparameter and model tuning
base_model = Pipeline(steps=[('standardscaler', StandardScaler()),
('classifier', RandomForestClassifier())])
best_model, best_params_, best_test_accuracy = ReachClassifier.hyperparameter_tuning(
X_train, X_val, y_train, y_val, base_model, param_grid, fullGridSearch=False)
# fit and validate
best_model.fit(X_train, y_train)
_, val_score = ReachClassifier.evaluate(best_model, X_val, y_val)
# fit on all training data
best_model.fit(X, y)
# save model
if save:
joblib.dump(best_model, f"{filename}.joblib")
# print("MODEL SCORE", best_model.score(X_val_selected, y_val))
print("BEST MODEL", best_model)
print("CV SCORE", val_score)
return best_model, val_score
class ClassificationHierarchy:
random.seed(246810)
np.random.seed(246810)
def __init__(self):
pass
def split(self, preds, X, y, onesGoLeft=True):
"""
Splits X and y based on predictions.
Args:
preds: (list of ints) predictions of ones and zeros
X: features
y: labels
onesGoLeft: (bool) True for labels with prediction 1 to be on LHS.
Returns: split X, y data
"""
row_mask = list(map(bool, preds)) # True for 1, False otherwise
negate_row_mask = ~np.array(row_mask) # True for 0, False otherwise
if onesGoLeft:
X_left = X[row_mask]
y_left = y[row_mask]
X_right = X[negate_row_mask]
y_right = y[negate_row_mask]
else:
X_right = X[row_mask]
y_right = y[row_mask]
X_left = X[negate_row_mask]
y_left = y[negate_row_mask]
return X_left, y_left, X_right, y_right
def run_hierarchy(self, X, y, param_grid, models, save_models):
"""
Makes predictions through the whole classification hierarchy.
Args:
X: features
y: labels (Trial Type Num Reaches Which Hand)
param_grid: grid
models: (list) list of trained models or None
save_models: (bool) True to save
Returns:
"""
# load models
# model_0, model_1, model_2 = None, None, None
# if models:
# model_0 = joblib.load(models[0])
# model_1 = joblib.load(models[1])
# model_2 = joblib.load(models[2])
# TRIAL TYPE
classifier = ReachClassifier()
y_0 = y['Trial Type'].values # 0 for not null
y_0 = CU.onehot_nulls(y_0)
model_0, val_score_0 = self.fit(classifier, X, y_0, param_grid, save_models,
f'{folder_name}/TrialTypeModel')
# SPLIT
# X_null, y_null, X_NotNull, y_NotNull = self.split(preds_0, X, y, onesGoLeft=True) # 1 if null, 0 if real trial
# NUM REACHES
y_1 = y['Num Reaches'].values
y_1 = CU.onehot_num_reaches(y_1) # 0 if <1, 1 if > 1 reaches
classifier = ReachClassifier()
model_1, val_score_1 = self.fit(classifier, X, y_1, param_grid, save_models,
f'{folder_name}/NumReachesModel')
# SPLIT
# X_greater, y_greater, X_less, y_less = self.split(preds_1, X_NotNull, y_NotNull, onesGoLeft=True) # 0 if <1, 1 if > 1 reaches
# WHICH HAND
classifier = ReachClassifier()
y_2 = y['Which Hand'].values # # classify 0 as r/l
y_2 = CU.hand_type_onehot(y_2)
model_2, val_score_2 = self.fit(classifier, X, y_2, param_grid, save_models,
f'{folder_name}/WhichHandModel')
# X_bi, y_bi, X_rl, y_rl = self.split(preds_2, X_less, y_less, onesGoLeft=True) # classify 0 as r/l, 1 or non r/l
return [val_score_0, val_score_1, val_score_2]
def fit(self, classifier, X, y, param_grid, save, filename):
"""
Trains, validates, and/or makes predictions.
Args:
classifier: ReachClassifier object
X: features
y: labels
param_grid: grid
save: (bool) True to save
filename: (str) file name to save model as
best_model: model
doFit: (bool) True to train
Returns: model, validation score, predicitons
"""
# adjust class imbalance, feature selection
X_selected, y_res, fs = classifier.pre_classify(X, y)
# train and validate
assert (y is not None)
best_model, val_score = classifier.train_and_validate(X_selected, y_res, param_grid, save=save,
filename=filename)
return best_model, val_score
def run_hierarchy_pretrained(self, X, y, models):
"""
Makes predictions through the whole classification hierarchy.
Args:
X: features
y: labels (Trial Type Num Reaches Which Hand)
models: (list of str) list of trained models
Returns: list of validation accuracies
"""
# load models
model_0 = joblib.load(models[0])
model_1 = joblib.load(models[1])
model_2 = joblib.load(models[2])
# TRIAL TYPE
classifier = ReachClassifier()
y_0 = y['Trial Type'].values # 0 for not null
y_0 = CU.onehot_nulls(y_0)
val_score_0 = self.predict(X, y_0, model_0)
# SPLIT
# X_null, y_null, X_NotNull, y_NotNull = self.split(preds_0, X, y, onesGoLeft=True) # 1 if null, 0 if real trial
# NUM REACHES
y_1 = y['Num Reaches'].values
y_1 = CU.onehot_num_reaches(y_1) # 0 if <1, 1 if > 1 reaches
classifier = ReachClassifier()
val_score_1 = self.predict(X, y_1, model_1)
# SPLIT
# X_greater, y_greater, X_less, y_less = self.split(preds_1, X_NotNull, y_NotNull, onesGoLeft=True) # 0 if <1, 1 if > 1 reaches
# WHICH HAND
classifier = ReachClassifier()
y_2 = y['Which Hand'].values # # classify 0 as r/l
y_2 = CU.hand_type_onehot(y_2)
val_score_2 = self.predict(X, y_2, model_2)
# X_bi, y_bi, X_rl, y_rl = self.split(preds_2, X_less, y_less, onesGoLeft=True) # classify 0 as r/l, 1 or non r/l
return [val_score_0, val_score_1, val_score_2]
def predict(self, X, y, model):
# let
k = 5
X_selected, fs = ReachClassifier.do_feature_selection(X, y, k)
_, val_score = ReachClassifier.evaluate(model, X_selected, y)
return val_score
def trace_datapoint(self, X, arr=[]):
""" Q3.2
for a data point from the spam dataset, prints splits and thresholds
as it is classified down the tree.
"""
pass
class MakeFeatures:
# Operates on a single trial.
pos_names = ['Handle', 'Back Handle', 'Nose',
'Left Shoulder', 'Left Forearm', 'Left Wrist', 'Left Palm', 'Left Index Base', 'Left Index Tip',
'Left Middle Base', 'Left Middle Tip', 'Left Third Base',
'Left Third Tip', 'Left Fourth Finger Base', 'Left Fourth Finger Tip',
'Right Shoulder', 'Right Forearm', 'Right Wrist', 'Right Palm', 'Right Index Base',
'Right Index Tip', 'Right Middle Base', 'Right Middle Tip', 'Right Third Base',
'Right Third Tip', 'Right Fourth Finger Base', 'Right Fourth Finger Tip']
def __init__(self, trial_arr):
# partition coords and probabilities
self.num_bodyparts = 27
self.num_coords = 3
self.split_index = self.num_bodyparts * self.num_coords # 27 bodyparts * 3 XYZ coordinates for each = 81
self.coords = trial_arr[:self.split_index] # all XYZ coords of all bodyparts (81 rows of first half of array)
self.prob = trial_arr[self.split_index:] # all probability columns (81 rows of second half of array)
# display(coords, prob)
def calc_position(self):
# calculate position of each bodypart (x+y+z/3)
positions = [] # 2D array with rows are bodyparts, cols are frame nums
for i in np.arange(0, len(self.coords), self.num_coords): # for every bodypart
X = self.coords[i]
Y = self.coords[i + 1]
Z = self.coords[i + 2]
pos = (X + Y + Z) / self.num_coords # 1D array
positions.append(pos)
assert (len(positions) == self.num_bodyparts)
return positions
def calc_velocity_speed(self, time):
"""
Time is sliced from exp block 'time' column
"""
# calculate velocity for each XYZ bodypart (x1-x0/t0-t1)
velocities = [] # 2D array with rows are XYZ bodyparts, cols are frame nums
for i in np.arange(0, self.split_index, self.num_coords): # for every bodypart
X = self.coords[i]
Y = self.coords[i + 1]
Z = self.coords[i + 2]
for arr in [X, Y, Z]:
vel = []
for j in np.arange(len(arr) - 1):
x_0 = arr[j]
x_1 = arr[j + 1]
t_0 = time[j]
t_1 = time[j + 1]
vel.append(x_1 - x_0 / t_1 - t_0)
velocities.append(vel)
assert (len(velocities) == self.split_index)
# calculate speed of each bodypart (vel_x+vel_y+vel_z/3)
speeds = [] # 1D array with rows are bodyparts, cols are frame nums
for i in np.arange(0, self.split_index, self.num_coords):
x_vel = velocities[i]
y_vel = velocities[i + 1]
z_vel = velocities[i + 2]
x_squared = np.dot(x_vel, x_vel)
y_squared = np.dot(y_vel, y_vel)
z_squared = np.dot(z_vel, z_vel)
speed = (x_squared + y_squared + z_squared) / 3 # int
speeds.append(speed)
assert (len(speeds) == self.num_bodyparts)
return velocities, speeds
@staticmethod
def calc_all(trial_arr, time):
# Calculate
f = MakeFeatures(trial_arr)
positions = f.calc_position()
velocities, speeds = f.calc_velocity_speed(time)
# take mean & median of each bodypart for 2D arrays
mean_vel = np.mean(velocities, axis=1) # len = 81
median_vel = np.median(velocities, axis=1)
mean_pos = np.mean(positions, axis=1) # len = 27
median_pos = np.median(positions, axis=1)
# Create df
# concat all arrays
speeds.extend(mean_pos)
speeds.extend(median_pos)
speeds.extend(mean_vel)
speeds.extend(median_vel)
# create col names
col_names = [bodypart + ' speed' for bodypart in f.pos_names]
col_names.extend([bodypart + ' mean pos' for bodypart in f.pos_names])
col_names.extend([bodypart + ' median pos' for bodypart in f.pos_names])
xzy_pos_names = [bodypart + ' X' for bodypart in f.pos_names] + [bodypart + ' Y' for bodypart in
f.pos_names] + [bodypart + ' Z' for bodypart in
f.pos_names]
col_names.extend([bodypart + ' mean vel' for bodypart in xzy_pos_names])
col_names.extend([bodypart + ' median vel' for bodypart in xzy_pos_names])
# create df
df = pd.DataFrame([speeds], columns=col_names)
return df
@staticmethod
def make_block_features(trials, times):
df = pd.DataFrame()
for i in range(len(trials)):
# take trial
trial = trials[i]
time = times[i]
trial_arr = trial.values # convert df to array where rows are frame numbers, cols are bodyparts
trial_arr = trial_arr.T # array with rows are XYZ bodyparts, cols are frame nums
df = pd.concat([df, MakeFeatures.calc_all(trial_arr, time)])
df.reset_index(drop=True, inplace=True)
# rows are trials, cols are features
return df
@staticmethod
def match_labels(df, vec_label):
# create mask of labeled trials
labeled_trials_mask = []
for i, label in enumerate(vec_label):
label_trial_num = int(label[0])
labeled_trials_mask.append(label_trial_num)
return df.T[labeled_trials_mask].T
@staticmethod
def sel_feat_by_keyword(df):
"""
reference: https://towardsdatascience.com/interesting-ways-to-select-pandas-dataframe-columns-b29b82bbfb33
"""
return df.loc[:, [('Palm' in i) or ('Wrist' in i) for i in df.columns]]
@staticmethod
def randomize_feat(df):
return df.sample(n=len(df), replace=False, axis=0, random_state=42) # shuffles rows w.o repl
class Preprocessor:
def __init__(self):
"""
Trial-izes data into a ML compatible format.
"""
self.kin_data = None
self.exp_data = None
self.label = None # usage: CU.make_vectorized_labels(label)
self.kin_block = None
self.exp_block = None
self.all_exp_blocks = []
self.all_kin_blocks = []
# kin block
self.wv = None
self.window_length = None
self.pre = None
# ML dfs
self.formatted_kin_block = None # kinematic feature df
self.formatted_exp_block = None # robot feature df
def set_kin_data(self, data):
self.kin_data = data
def set_exp_data(self, data):
self.exp_data = data
def set_kin_block(self, data):
self.kin_block = data
self.format_kin_block()
def set_formatted_kin_block(self, data):
self.formatted_kin_block = data
def set_exp_block(self, data):
self.exp_block = data
def set_formatted_exp_block(self, data):
self.formatted_exp_block = data
def set_label(self, data):
self.label = data
def set_wv(self, data):
self.wv = data
def set_window_length(self, data):
self.window_length = data
def set_pre(self, data):
self.pre = data
@staticmethod
def load_data(filename, file_type='pkl'):
"""
Loads FILENAME as pandas DataFrame.
Args:
filename: (str) path to file to load
file_type: (str) file type to load
Returns: (df) pandas DataFrame
"""
assert file_type == 'pkl' or file_type == 'h5' or file_type == 'csv', f'{file_type} not a valid file type'
if file_type == 'pkl':
return pd.read_pickle(filename)
elif file_type == 'h5':
# get h5 key
with h5py.File(filename, "r") as f:
key = list(f.keys())[0]
return pd.read_hdf(filename, key)
elif file_type == 'csv':
return pd.read_csv(filename)
@staticmethod
def save_data(df, filename, file_type='csv'):
"""
Saves FILENAME.
Args:
df: (df) to save
filename: (str) path to file
file_type: (str) file type
Returns: None
"""
assert file_type == 'csv' or file_type == 'pkl' or file_type == 'h5', f'{file_type} not a valid file type'
if file_type == 'csv':
df.to_csv(filename)
if file_type == 'pkl':
df.to_pickle(filename)
if file_type == 'h5':
df.to_hdf(filename, key='df')
@staticmethod
def get_single_block(df, date, session, rat, save_as=None, format='exp'):
"""
Returns DataFrame from data with matching rat, date, session.
Args:
df: (df) DataFrame with all blocks
date: (str) date
session: (str) session number
rat: (str) rat name
save_as: (bool) True to save as csv file, else default None
format: (str) specifies which type of block to retrieve (kin or exp)
Returns: new_df: (df) with specified rat, date, session
"""
new_df = pd.DataFrame()
if format == 'exp':
rr = df.loc[df['Date'] == date]
rr = rr.loc[rr['S'] == session]
new_df = rr.loc[rr['rat'] == rat]
elif format == 'kin': # kin case
for block in df:
if isinstance(block, pd.DataFrame): # handles missing blocks in df
index = block.columns[0]
if rat == index[0] and session == index[1] and date == index[2]:
new_df = pd.DataFrame(block)
assert (len(new_df.index) != 0), "block does not exist in data!"
if save_as:
Preprocessor.save_data(new_df, save_as, file_type='pkl')
return new_df
@staticmethod
def apply_median_filter(df, wv=5):
"""
Applies a multidimensional median filter to DF columns.
Args:
df: (df)
wv: (int) the wavelet # for the median filter applied to the positional data (default 5)
Returns: Filtered df. Has the same shape as input.
"""
# iterate across columns
for (columnName, columnData) in df.iteritems():
# Apply median filter to column array values (bodypart, pos or prob)
df[columnName] = ndimage.median_filter(columnData.values, size=wv)
return df
@staticmethod
def stack(df):
"""
Reshapes DF. Stack the prescribed level(s) from columns to index.
Args:
df: (df)
Returns: stacked df
"""
df_out = df.stack()
df_out.index = df_out.index.map('{0[1]}_{0[0]}'.format)
if isinstance(df_out, pd.Series):
df_out = df_out.to_frame()
return df_out
def format_kin_block(self):
"""
Removes rat ID levels of a block df and applies median filter to column values.
Sets formatted_kin_block to (df) two level multi-index df with filtered values.
Returns: None
"""
# rm ID levels
index = self.kin_block.columns[0]
rm_levels_df = self.kin_block[index[0]][index[1]][index[2]][index[3]]
# filter bodypart columns
filtered_df = Preprocessor.apply_median_filter(rm_levels_df, wv=self.wv)
# update attribute
self.set_formatted_kin_block(filtered_df)
@staticmethod
def split_trial(formatted_kin_block, exp_block, window_length, pre):
"""
Partitions kinematic data into trials.
Args:
formatted_kin_block: (df) formatted kin block
exp_block: (df)
window_length (int): trial splitting window length, the number of frames to load data from (default 250)
Set to 4-500. 900 is too long.
pre: int, pre cut off before a trial starts, the number of frames to load data from before start time
For trial splitting, set to 10. 50 is too long. (default 10)
Returns: trials: (list of dfs) of length number of trials with index trial number
"""
assert (window_length > pre), "invalid slice!"
starting_frames = exp_block['r_start'].values[0]
trials = []
times = []
# iterate over starting frames
for frame_num in starting_frames:
start = frame_num - pre
# negative indices case
if (frame_num - pre) <= 0:
start = 0
# slice trials
trials.append(formatted_kin_block.loc[start:frame_num + window_length])
times.append(
exp_block['time'][0][start:frame_num + window_length + 1]) # plus 1 to adjust size diff with trial size
return trials, times
@staticmethod
def trialize_kin_blocks(formatted_kin_block, times):
"""
Returns a list of one column dfs, each representing a trial
Args:
formatted_kin_block: (list of dfs) split trial data
times: (list of arrays of ints) sliced time from exp block
Returns: ftrials: (list of one column dfs)
"""
# iterate over trials
ftrials = []
for trial in formatted_kin_block:
# match bodypart names
trial_size = len(trial.index)
trial.index = np.arange(trial_size)
# reshape df into one column for one trial
formatted_trial = Preprocessor.stack(Preprocessor.stack(trial))
ftrials.append(formatted_trial)
return ftrials
@staticmethod
def match_kin_to_label(formatted_kin_block, label):
"""
Selects labeled trials and matches them to their labels.
Args:
formatted_kin_block: (list of one column dfs) trialized data
label: (list of lists) vectorized labels
Returns: labeled_trials: (list of one row dfs) matched to labels
Note:
If a trial is not labeled, the trial is dropped and unused.
Trial numbers are zero-indexed.
"""
assert (len(label) <= len(formatted_kin_block)), \
f"More labels {len(label)} than trials {len(formatted_kin_block)}!"
# iterate over labels and trials
labeled_trials = []
for i, label in enumerate(label):
label_trial_num = int(label[0])
trialized_df = formatted_kin_block[label_trial_num] # trial nums are 0-indexed
# rename column of block df to trial num
trialized_df.columns = [label_trial_num]
# transpose so each row represents a trial
trialized_df = trialized_df.T
labeled_trials.append(trialized_df)
return labeled_trials
@staticmethod
def create_kin_feat_df(formatted_kin_block):
"""
Appends all formatted trials into a single DataFrame.
Args:
formatted_kin_block: list of formatted dfs
Returns: df: (df) where row represents trial num and columns are features.
"""
df = formatted_kin_block[0]
for trial in formatted_kin_block[1:]:
df = df.append(trial, ignore_index=True)
return df
def make_kin_feat_df(self):
"""
Given a kinematic block df, returns a ML ready feature df
Returns: (df) where row represents trial num and columns are features.
"""
trials, times = Preprocessor.split_trial(self.kin_block, self.exp_block, self.window_length, self.pre)
ftrials = Preprocessor.trialize_kin_blocks(trials)
labeled_trials = Preprocessor.match_kin_to_label(ftrials, self.label)
df = Preprocessor.create_kin_feat_df(labeled_trials)
self.set_formatted_kin_block(df)
return df
def make_kin_psv_feat_df(self, randomize=False):
"""
Returns: feature df of position, speed, and velocity
"""
trials, times = Preprocessor.split_trial(self.kin_block, self.exp_block, self.window_length, self.pre)
df = MakeFeatures.make_block_features(trials, times)
df = MakeFeatures.match_labels(df, self.label)
ret_df = MakeFeatures.sel_feat_by_keyword(df) # select just wrist and palms
if randomize:
return MakeFeatures.randomize_feat(ret_df)
return ret_df
@staticmethod
def match_exp_to_label(exp_feat_df, label):
"""
Selects labeled trials and matches them to their labels.
Args:
exp_feat_df: (df) exp df
label: (list of lists) vectorized labels
Returns: masked_exp_feat_df: (df) exp feature df matched with labels
Note:
If a trial is not labeled, the trial is dropped and unused.
Trial numbers are zero-indexed.
"""
assert (len(label) <= len(exp_feat_df)), \
f"More labels {len(label)} than trials {len(exp_feat_df)}!"
# match to labels
labeled_trial_nums = []
for i, label in enumerate(label):
labeled_trial_nums.append(int(label[0]))
# apply mask
masked_exp_feat_df = exp_feat_df.iloc[labeled_trial_nums]
return masked_exp_feat_df
def make_exp_feat_df(self):
"""
Given a robot block df, returns a ML ready feature df
Returns: (df) where row represents trial num and columns are features.
"""
# create exp features
start_frames = self.exp_block['r_start'].values[0]
exp_features = CU.import_experiment_features(self.exp_block, start_frames, self.window_length, self.pre)
hot_vector = CU.onehot(self.exp_block) # unused
exp_feat_df = CU.import_experiment_features_to_df(exp_features)
# match and expand
masked_exp_feat_df = Preprocessor.match_exp_to_label(exp_feat_df, self.label)
# update attribute
self.set_formatted_exp_block(masked_exp_feat_df)
return self.formatted_exp_block
@staticmethod
def concat(dfs, row=True):
"""
Concats a list of dataframes row or col-wise
Args:
dfs: (list of dfs) to concat
row: (bool) True to concat by row
Returns: new df
"""
assert (len(dfs) >= 2), "Must concat at least 2 dfs!"
if row:
df_0 = dfs[0]
for df in dfs[1:]:
assert (df_0.shape[1] == df.shape[1]), f'{df_0.shape} {df.shape} cols must match!'
df_0 = pd.concat([df_0, df], axis=0)
else:
df_0 = dfs[0]
for df in dfs[1:]:
assert (df_0.shape[0] == df.shape[0]), f'{df_0.shape} {df.shape} rows must match!'
df_0 = pd.concat([df_0, df], axis=1)
return df_0
def make_ml_feat_labels(self, kin_block, exp_block, label,
et, el, window_length=250, pre=10, wv=5):
"""
Returns ml feature and label arrays.
Args:
kin_block: (df)
exp_block: (df)
label: (list of list)
et: int, coordinate change variable
Will take the positional coordinates and put them into the robot reference frame.
el: int, coordinate change variable
Will take the positional coordinates and put them into the robot reference frame.
window_length (int): trial splitting window length, the number of frames to load data from (default 250)
Set to 4-500. 900 is too long.
pre: int, pre cut off before a trial starts, the number of frames to load data from before start time
For trial splitting, set to 10. 50 is too long. (default 10)
wv: (int) the wavelet # for the median filter applied to the positional data
Notes:
labels and blocks must match!
hot_vector: (array) one hot array of robot block data of length num trials
exp_features: (list) experimental features with shape (Num trials X Features X pre+window_length)
"""
# init instance attributes
self.set_exp_block(exp_block)
self.set_wv(wv) # must be set before kin block
self.set_window_length(window_length)
self.set_pre(pre)
self.set_kin_block(kin_block)
# vectorize label
vectorized_label, _ = CU.make_vectorized_labels(label)
self.set_label(vectorized_label)
# create kin features
# kin_feat_df = self.make_kin_feat_df()
kin_feat_df = self.make_kin_psv_feat_df() # todo randomize=True to change features
# create exp features
exp_feat_df = self.make_exp_feat_df()
return kin_feat_df, exp_feat_df
def main_run_all():
# LOAD DATA
preprocessor = Preprocessor()
# Define data paths
tkdf_16 = preprocessor.load_data('DataFrames/tkdf16_f.pkl')
tkdf_15 = preprocessor.load_data('DataFrames/3D_positions_RM15_f.pkl')
tkdf_14 = preprocessor.load_data('DataFrames/3D_positions_RM14_f.pkl')
tkdf_13 = preprocessor.load_data('DataFrames/3D_positions_RM13.pkl') # not _f version
tkdf_12 = preprocessor.load_data('DataFrames/3D_positions_RM12.pkl')
tkdf_11 = preprocessor.load_data('DataFrames/3D_positions_RM11.pkl')
tkdf_10 = preprocessor.load_data('DataFrames/3D_positions_RM10.pkl')
tkdf_9 = preprocessor.load_data('DataFrames/3D_positions_RM9.pkl')
RM16_expdf = preprocessor.load_data('DataFrames/RM16_expdf.pickle')
RM15_expdf = preprocessor.load_data('DataFrames/RM15_expdf.pickle')
RM14_expdf = preprocessor.load_data('DataFrames/RM14_expdf.pickle')
RM13_expdf = preprocessor.load_data('DataFrames/RM13_expdf.pickle')
RM12_expdf = preprocessor.load_data('DataFrames/RM12_expdf.pickle')
RM11_expdf = preprocessor.load_data('DataFrames/RM11_expdf.pickle')
RM10_expdf = preprocessor.load_data('DataFrames/RM10_expdf.pickle')
RM9_expdf = preprocessor.load_data('DataFrames/RM9_expdf.pickle')
# GET and SAVE BLOCKS
# (df, date, session, rat, save_as=None, format='exp')
exp_lst = [
preprocessor.get_single_block(RM16_expdf, '0190917', 'S1', '09172019', format='exp',
save_as=f'{folder_name}/exp_rm16_9_17_s1.pkl'),
preprocessor.get_single_block(RM16_expdf, '0190918', 'S1', '09182019', format='exp',
save_as=f'{folder_name}/exp_rm16_9_18_s1.pkl'),
preprocessor.get_single_block(RM16_expdf, '0190917', 'S2', '09172019', format='exp',
save_as=f'{folder_name}/exp_rm16_9_17_s2.pkl'),
preprocessor.get_single_block(RM16_expdf, '0190920', 'S3', '09202019', format='exp',
save_as=f'{folder_name}/exp_rm16_9_20_s3.pkl'),
preprocessor.get_single_block(RM16_expdf, '0190919', 'S3', '09192019', format='exp',
save_as=f'{folder_name}/exp_rm16_9_19_s3.pkl'),
preprocessor.get_single_block(RM15_expdf, '0190925', 'S3', '09252019', format='exp', # date sess rat
save_as=f'{folder_name}/exp_rm15_9_25_s3.pkl'),
preprocessor.get_single_block(RM15_expdf, '0190917', 'S4', '09172019', format='exp',
save_as=f'{folder_name}/exp_rm15_9_17_s4.pkl'),
preprocessor.get_single_block(RM14_expdf, '0190920', 'S1', '09202019', format='exp',
save_as=f'{folder_name}/exp_rm14_9_20_s1.pkl'),
preprocessor.get_single_block(RM14_expdf, '0190918', 'S2', '09182019', format='exp',
save_as=f'{folder_name}/exp_rm14_9_18_s2.pkl'),
preprocessor.get_single_block(RM13_expdf, '190920_', 'S3', '09202019', format='exp',
save_as=f'{folder_name}/exp_rm13_9_20_s3.pkl'), # adjusted date
preprocessor.get_single_block(RM12_expdf, '0190919', 'S1', '09192019', format='exp',
save_as=f'{folder_name}/exp_rm12_9_19_s1.pkl'),
preprocessor.get_single_block(RM11_expdf, '0190918', 'S4', '09182019', format='exp',
save_as=f'{folder_name}/exp_rm11_9_18_s4.pkl'),
preprocessor.get_single_block(RM10_expdf, '0190917', 'S2', '09172019', format='exp',
save_as=f'{folder_name}/exp_rm10_9_17_s2.pkl'),
preprocessor.get_single_block(RM9_expdf, '190919_', 'S3', '09192019', format='exp', # adjusted date
save_as=f'{folder_name}/exp_rm9_9_19_s3.pkl')
]
kin_lst = [
preprocessor.get_single_block(tkdf_16, '0190917', 'S1', '09172019', format='kin',
save_as=f'{folder_name}/kin_rm16_9_17_s1.pkl'),
preprocessor.get_single_block(tkdf_16, '0190918', 'S1', '09182019', format='kin',
save_as=f'{folder_name}/kin_rm16_9_18_s1.pkl'),
preprocessor.get_single_block(tkdf_16, '0190917', 'S2', '09172019', format='kin',
save_as=f'{folder_name}/kin_rm16_9_17_s2.pkl'),
preprocessor.get_single_block(tkdf_16, '0190920', 'S3', '09202019', format='kin',
save_as=f'{folder_name}/kin_rm16_9_20_s3.pkl'),
preprocessor.get_single_block(tkdf_16, '0190919', 'S3', '09192019', format='kin',
save_as=f'{folder_name}/kin_rm16_9_19_s3.pkl'),
preprocessor.get_single_block(tkdf_15, '0190925', 'S3', '09252019', format='kin',
save_as=f'{folder_name}/kin_rm15_9_25_s3.pkl'),
preprocessor.get_single_block(tkdf_15, '0190917', 'S4', '09172019', format='kin',
save_as=f'{folder_name}/kin_rm15_9_17_s4.pkl'),
preprocessor.get_single_block(tkdf_14, '0190920', 'S1', '09202019', format='kin',
save_as=f'{folder_name}/kin_rm14_9_20_s1.pkl'),
preprocessor.get_single_block(tkdf_14, '0190918', 'S2', '09182019', format='kin',
save_as=f'{folder_name}/kin_rm14_9_18_s2.pkl'),
preprocessor.get_single_block(tkdf_13, '190920_', 'S3', '09202019', format='kin', # adjusted date
save_as=f'{folder_name}/kin_rm13_9_20_s3.pkl'),
preprocessor.get_single_block(tkdf_12, '0190919', 'S1', '09192019', format='kin',
save_as=f'{folder_name}/kin_rm12_9_19_s1.pkl'),
preprocessor.get_single_block(tkdf_11, '0190918', 'S4', '09182019', format='kin',
save_as=f'{folder_name}/kin_rm11_9_18_s4.pkl'),
preprocessor.get_single_block(tkdf_10, '0190917', 'S2', '09172019', format='kin',
save_as=f'{folder_name}/kin_rm10_9_17_s2.pkl'),
preprocessor.get_single_block(tkdf_9, '190919_', 'S3', '09192019', format='kin', # adjusted date
save_as=f'{folder_name}/kin_rm9_9_19_s3.pkl')
]
"""# CREATE FEAT and LABEL DFS
kin_dfs = []
exp_dfs = []
label_dfs = []
for i in range(len(kin_lst)):
kin_block = kin_lst[i]
exp_block = exp_lst[i]
label = labels[i]
kin_feat_df, exp_feat_df = preprocessor.make_ml_feat_labels(kin_block, exp_block,
label, et, el,
window_length, pre,
wv)
# Check for NaNs and replace with zeros
if kin_feat_df.isnull().values.any():
print(f"{i}th Kin Block contains Nan!")
for column in kin_feat_df:
if kin_feat_df[column].isnull().values.any():
print(f"Kin '{kin_feat_df[column]}' contains NaN and replaced with 0!")
kin_feat_df.fillna(0)
if exp_feat_df.isnull().values.any():
print(f"{i}th Exp Block contains Nan!")
for column in kin_feat_df:
if exp_feat_df[column].isnull().values.any():
print(f" Exp '{exp_feat_df[column]}' contains NaN and replaced with 0!")
exp_feat_df.fillna(0)
# append
vec_labels, _ = CU.make_vectorized_labels(label)
label_df = CU.make_vectorized_labels_to_df(vec_labels)
label_dfs.append(label_df)
kin_dfs.append(kin_feat_df)
exp_dfs.append(exp_feat_df)
# concat
all_kin_features = Preprocessor.concat(kin_dfs, row=True)
all_exp_features = Preprocessor.concat(exp_dfs, row=True)
all_label_dfs = Preprocessor.concat(label_dfs, row=True)
# save ML dfs
Preprocessor.save_data(all_kin_features, f'{folder_name}/kin_feat.pkl', file_type='pkl')
Preprocessor.save_data(all_exp_features, f'{folder_name}/exp_feat.pkl', file_type='pkl')
Preprocessor.save_data(all_label_dfs, f'{folder_name}/label_dfs.pkl', file_type='pkl')
"""
def create_features():
# NEWEST
# GET SAVED BLOCKS
# (df, date, session, rat, save_as=None, format='exp')
exp_lst = [
[f'{folder_name}/exp_rm16_9_17_s1.pkl',
f'{folder_name}/exp_rm16_9_18_s1.pkl',
f'{folder_name}/exp_rm16_9_17_s2.pkl',
f'{folder_name}/exp_rm16_9_20_s3.pkl',
f'{folder_name}/exp_rm16_9_19_s3.pkl'],
[f'{folder_name}/exp_rm15_9_25_s3.pkl',
f'{folder_name}/exp_rm15_9_17_s4.pkl'],
[f'{folder_name}/exp_rm14_9_20_s1.pkl',
f'{folder_name}/exp_rm14_9_18_s2.pkl'],
[f'{folder_name}/exp_rm13_9_20_s3.pkl'],
[f'{folder_name}/exp_rm12_9_19_s1.pkl'],
[f'{folder_name}/exp_rm11_9_18_s4.pkl'],
[f'{folder_name}/exp_rm10_9_17_s2.pkl'],
[f'{folder_name}/exp_rm9_9_19_s3.pkl']
]
kin_lst = [
[f'{folder_name}/kin_rm16_9_17_s1.pkl',
f'{folder_name}/kin_rm16_9_18_s1.pkl',
f'{folder_name}/kin_rm16_9_17_s2.pkl',
f'{folder_name}/kin_rm16_9_20_s3.pkl',
f'{folder_name}/kin_rm16_9_19_s3.pkl'],
[f'{folder_name}/kin_rm15_9_25_s3.pkl',
f'{folder_name}/kin_rm15_9_17_s4.pkl'],
[f'{folder_name}/kin_rm14_9_20_s1.pkl',
f'{folder_name}/kin_rm14_9_18_s2.pkl'],
[f'{folder_name}/kin_rm13_9_20_s3.pkl'],
[f'{folder_name}/kin_rm12_9_19_s1.pkl'],
[f'{folder_name}/kin_rm11_9_18_s4.pkl'],
[f'{folder_name}/kin_rm10_9_17_s2.pkl'],
[f'{folder_name}/kin_rm9_9_19_s3.pkl']
]
#Append paths
block_paths = [
[['17', 'S1', 'RM16'],
['18', 'S1', 'RM16'],
['17', 'S2', 'RM16'],
['20', 'S3', 'RM16'],
['19', 'S3', 'RM16']],
[['25', 'S3', 'RM15'],
['17', 'S4', 'RM15']],
[['20', 'S1', 'RM14'],
['18', 'S2', 'RM14']],
[['20', 'S3', 'RM13']],
[['19', 'S1', 'RM12']],
[['18', 'S4', 'RM11']],
[['17', 'S2', 'RM10']],
[['19', 'S3', 'RM9']],
]
# CREATE FEAT and LABEL DFS
feat_dfs = []
for i in range(len(block_paths)): # for each rat
for j in range(len(block_paths[i])): # for each trial
kin_data = Preprocessor.load_data(kin_lst[i][j])
exp_data = Preprocessor.load_data(exp_lst[i][j])
date, session, rat = block_paths[i][j]
# Run ReachUtils
R = CU.ReachUtils(rat, date, session, exp_data, kin_data, 's') # init
print("saving")
data = R.create_and_save_classification_features()
print("SAVED block")
# append
feat_dfs.append(data)
# save ML dfs
Preprocessor.save_data(pd.DataFrame(feat_dfs), f'{folder_name}/feat_dfs.pkl', file_type='pkl')
def create_labels():
# NEWEST
# GET SAVED BLOCKS
# (df, date, session, rat, save_as=None, format='exp')
# Append paths
block_paths = [
[['17', 'S1', 'RM16'],
['18', 'S1', 'RM16'],
['17', 'S2', 'RM16'],
['20', 'S3', 'RM16'],
['19', 'S3', 'RM16']],
[['25', 'S3', 'RM15'],
['17', 'S4', 'RM15']],
[['20', 'S1', 'RM14'],
['18', 'S2', 'RM14']],
[['20', 'S3', 'RM13']],
[['19', 'S1', 'RM12']],
[['18', 'S4', 'RM11']],
[['17', 'S2', 'RM10']],
[['19', 'S3', 'RM9']],
]
# CREATE FEAT and LABEL DFS
label_dfs = []
for i in range(len(block_paths)): # for each rat
for j in range(len(block_paths[i])): # for each trial
label = labels[i][j]
# append
print(block_paths[i][j])
vec_labels, _ = CU.make_vectorized_labels(label)
label_df = CU.make_vectorized_labels_to_df(vec_labels)
label_dfs.append(label_df)
# save ML dfs
Preprocessor.save_data( | pd.DataFrame(label_dfs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas
import copy
_pd2hc_kind = {
"bar": "column",
"barh": "bar",
"area": "area",
"line": "line",
"pie": "pie"
}
def pd2hc_kind(kind):
if kind not in _pd2hc_kind:
raise ValueError("%(kind)s plots are not yet supported" % locals())
return _pd2hc_kind[kind]
_pd2hc_linestyle = {
"-": "Solid",
"--": "Dash",
"-.": "DashDot",
":": "Dot"
}
def pd2hc_linestyle(linestyle):
if linestyle not in _pd2hc_linestyle:
raise ValueError("%(linestyle)s linestyles are not yet supported" % locals())
return _pd2hc_linestyle[linestyle]
def json_encode(obj):
return pandas.io.json.dumps(obj)
def serialize(df, output_type="javascript", chart_type="default", *args, **kwargs):
def serialize_chart(df, output, *args, **kwargs):
output["chart"] = {}
if 'render_to' in kwargs:
output['chart']['renderTo'] = kwargs['render_to']
if "figsize" in kwargs:
output["chart"]["width"] = kwargs["figsize"][0]
output["chart"]["height"] = kwargs["figsize"][1]
if "kind" in kwargs:
output["chart"]["type"] = pd2hc_kind(kwargs["kind"])
if kwargs.get('polar'):
output['chart']['polar'] = True
def serialize_colors(df, output, *args, **kwargs):
pass
def serialize_credits(df, output, *args, **kwargs):
pass
def serialize_data(df, output, *args, **kwargs):
pass
def serialize_drilldown(df, output, *args, **kwargs):
pass
def serialize_exporting(df, output, *args, **kwargs):
pass
def serialize_labels(df, output, *args, **kwargs):
pass
def serialize_legend(df, output, *args, **kwargs):
output["legend"] = {
"enabled": kwargs.get("legend", True)
}
def serialize_loading(df, output, *args, **kwargs):
pass
def serialize_navigation(df, output, *args, **kwargs):
pass
def serialize_noData(df, output, *args, **kwargs):
pass
def serialize_pane(df, output, *args, **kwargs):
pass
def serialize_plotOptions(df, output, *args, **kwargs):
pass
def serialize_series(df, output, *args, **kwargs):
def is_secondary(c, **kwargs):
return c in kwargs.get("secondary_y", [])
if kwargs.get('sort_columns'):
df = df.sort_index()
series = df.to_dict('series')
output["series"] = []
for name, data in series.items():
if df[name].dtype.kind in "biufc":
sec = is_secondary(name, **kwargs)
d = {
"name": name if not sec or not kwargs.get("mark_right", True) else name + " (right)",
"yAxis": int(sec),
"data": list(zip(df.index, data.values.tolist()))
}
if kwargs.get('polar'):
d['data'] = [v for k, v in d['data']]
if kwargs.get("kind") == "area" and kwargs.get("stacked", True):
d["stacking"] = 'normal'
if kwargs.get("style"):
d["dashStyle"] = pd2hc_linestyle(kwargs["style"].get(name, "-"))
output["series"].append(d)
output['series'].sort(key=lambda s: s['name'])
def serialize_subtitle(df, output, *args, **kwargs):
pass
def serialize_title(df, output, *args, **kwargs):
if "title" in kwargs:
output["title"] = {"text": kwargs["title"]}
def serialize_tooltip(df, output, *args, **kwargs):
if 'tooltip' in kwargs:
output['tooltip'] = kwargs['tooltip']
def serialize_xAxis(df, output, *args, **kwargs):
output["xAxis"] = {}
if df.index.name:
output["xAxis"]["title"] = {"text": df.index.name}
if df.index.dtype.kind in "M":
output["xAxis"]["type"] = "datetime"
if df.index.dtype.kind == 'O':
output['xAxis']['categories'] = sorted(list(df.index)) if kwargs.get('sort_columns') else list(df.index)
if kwargs.get("grid"):
output["xAxis"]["gridLineWidth"] = 1
output["xAxis"]["gridLineDashStyle"] = "Dot"
if kwargs.get("loglog") or kwargs.get("logx"):
output["xAxis"]["type"] = 'logarithmic'
if "xlim" in kwargs:
output["xAxis"]["min"] = kwargs["xlim"][0]
output["xAxis"]["max"] = kwargs["xlim"][1]
if "rot" in kwargs:
output["xAxis"]["labels"] = {"rotation": kwargs["rot"]}
if "fontsize" in kwargs:
output["xAxis"].setdefault("labels", {})["style"] = {"fontSize": kwargs["fontsize"]}
if "xticks" in kwargs:
output["xAxis"]["tickPositions"] = kwargs["xticks"]
def serialize_yAxis(df, output, *args, **kwargs):
yAxis = {}
if kwargs.get("grid"):
yAxis["gridLineWidth"] = 1
yAxis["gridLineDashStyle"] = "Dot"
if kwargs.get("loglog") or kwargs.get("logy"):
yAxis["type"] = 'logarithmic'
if "ylim" in kwargs:
yAxis["min"] = kwargs["ylim"][0]
yAxis["max"] = kwargs["ylim"][1]
if "rot" in kwargs:
yAxis["labels"] = {"rotation": kwargs["rot"]}
if "fontsize" in kwargs:
yAxis.setdefault("labels", {})["style"] = {"fontSize": kwargs["fontsize"]}
if "yticks" in kwargs:
yAxis["tickPositions"] = kwargs["yticks"]
output["yAxis"] = [yAxis]
if kwargs.get("secondary_y"):
yAxis2 = copy.deepcopy(yAxis)
yAxis2["opposite"] = True
output["yAxis"].append(yAxis2)
def serialize_zoom(df, output, *args, **kwargs):
if "zoom" in kwargs:
if kwargs["zoom"] not in ("x", "y", "xy"):
raise ValueError("zoom must be in ('x', 'y', 'xy')")
output["chart"]["zoomType"] = kwargs["zoom"]
output = {}
df_copy = copy.deepcopy(df)
if "x" in kwargs:
df_copy.index = df_copy.pop(kwargs["x"])
if kwargs.get("use_index", True) is False:
df_copy = df_copy.reset_index()
if "y" in kwargs:
df_copy = | pandas.DataFrame(df_copy, columns=kwargs["y"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 11:33:22 2019
@author: babin
"""
import os
import datetime
from Bio import SeqIO
from Bio.SeqUtils import GC
from Bio import Entrez
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from Bio.SeqIO.FastaIO import SimpleFastaParser # low level fast fasta parser
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import vcf
from time import sleep, time
import matplotlib.pyplot as plt
import seaborn as sns
from math import log2
from http.client import IncompleteRead
from socket import gaierror
from urllib.error import HTTPError
import pandas as pd
sns.set()
def _get_current_time():
time_stamp = datetime.datetime.fromtimestamp(
time()).strftime('%Y-%m-%d %H:%M:%S')
return time_stamp
def _format_time_stamp(time_stamp):
days, day_time = time_stamp.split(" ")
day_time = day_time.split(":")
day_time = "_".join(day_time)
time_stamp = days + "_time-" + day_time
return time_stamp
def _load_from_genbank(f_obj, seq_id, rettype):
handle = Entrez.efetch(db="nucleotide", id=seq_id, rettype=rettype, retmode="text")
fetched = handle.read()
f_obj.write(fetched)
def fetch_seq(ids, seq_format="fasta", sep=False):
"""downloads sequences from nucleotide database
by id nums and saves them in genbank format
----------
ids : str or list of str
sequence genbank id or list of ids
seq_format : str
gb - genbank files
fasta (by default) - fasta files
sep : bool
False - download bunch of sequences as one file
True - donwload bunch of sequences as separate files
"""
# your email in here
Entrez.email = ""
count = 0
if type(ids) == str:
with open("downloaded_" + ids + "." + seq_format, "w") as f_obj:
_load_from_genbank(f_obj, ids, seq_format)
print("a sequence " + ids + " was downloaded")
elif type(ids) == list:
if sep:
for i in ids:
with open("downloaded_" + i + "." + seq_format, "w") as f_obj:
_load_from_genbank(f_obj, i, seq_format)
count += 1
sleep(0.5)
print("a total of %s sequences were downloaded" %count)
else:
time_stamp = _get_current_time()
time_stamp = _format_time_stamp(time_stamp)
with open("downloaded_bunch_" + time_stamp + "." + seq_format, "w") as f_obj:
for i in ids:
_load_from_genbank(f_obj, i, seq_format)
count += 1
sleep(0.5)
print("a total of %s sequences were downloaded" %count)
else:
print("invalid ids parameter type")
def _fetch_blast_results(record, e_thresh, hits):
result_handle = NCBIWWW.qblast("blastn", "nt", record.seq, hitlist_size=hits)
blast_record = NCBIXML.read(result_handle)
blast_results_record = []
for alignt in blast_record.alignments:
for hsp in alignt.hsps:
if hsp.expect < e_thresh:
blast_results_record.append([record.id, alignt.title, str(alignt.length), str(hsp.expect)])
return blast_results_record
def blast_fasta(query, e_thresh=0.1, hits=1):
"""blast records from a fasta file
writes results into the tab-delimited txt file
Parameters:
-----------
query: str
path to the input file
e_thresh: float
e-value blast threshold
hits: int
a number of hits to return, 1 by default
"""
fasta = SeqIO.parse(query, "fasta")
blast_results_total = []
for record in fasta:
try:
blast_results_record = _fetch_blast_results(record, e_thresh, hits)
for res in blast_results_record:
blast_results_total.append(res)
time_stamp = _get_current_time()
print(record.id, " blasted at: ", time_stamp)
except IncompleteRead as e:
print("Network problem: ", e, "Second and final attempt is under way...")
blast_results_record = _fetch_blast_results(record, e_thresh, hits)
for res in blast_results_record:
blast_results_total.append(res)
time_stamp = _get_current_time()
print(record.id, " blasted at: ", time_stamp)
except gaierror as e:
print("some other problem, 'gaierror': ", e)
except HTTPError as e:
print("urllib.error.HTTPError: ", e)
df = pd.DataFrame(blast_results_total, columns=["record_id", "hit_name", "hit_length", "e_value"])
df.to_csv("blast_results.csv", sep="\t")
print("job done. the results are in {0}".format(os.path.abspath("blast_results.csv")))
def _get_id_length_gc(file):
ids_len_and_gc = []
records = SeqIO.parse(file, "fasta")
num_records = 0
for rec in records:
ids_len_and_gc.append((rec.id, len(rec.seq), GC(rec.seq)))
num_records += 1
return num_records, ids_len_and_gc
def _show_fasta_info(file, num_records, ids_len_and_gc):
print("file '{0}' contains {1} sequences".format(file, num_records))
print("", "sequence id", "length", "GC%", sep="\t")
for counter, value in enumerate(ids_len_and_gc, 1):
print(counter, value[0], value[1], round(value[2], 2), sep="\t")
print("------------------------------------")
def fasta_info(path_to=False):
"""prints out information about fasta files:
number of sequences in the file, sequence id numbers,
lengths of sequences and GC content
without arguments takes as an input
all fasta files in the current dir
Parameters
----------
path_to_fasta : str or list
path to input file, or list of paths
"""
fasta_extensions = ["fa", "fas", "fasta"]
if type(path_to) == str:
num_records, len_and_gc = _get_id_length_gc(path_to)
_show_fasta_info(path_to, num_records, len_and_gc)
elif type(path_to) == list:
for path in path_to:
num_records, len_and_gc = _get_id_length_gc(path)
_show_fasta_info(path, num_records, len_and_gc)
else:
current_dir_content = os.listdir()
for f in current_dir_content:
if f.rsplit(".", 1)[-1] in fasta_extensions:
num_records, ids_len_and_gc = _get_id_length_gc(f)
_show_fasta_info(f, num_records, ids_len_and_gc)
def _get_fastq_num_records(path_to):
with open(path_to) as in_handle:
total_reads = 0
reads_ids = []
for title, seq, qual in FastqGeneralIterator(in_handle):
total_reads += 1
reads_ids.append(title.split(" ")[0])
num_uniq_reads = len(set(reads_ids))
return total_reads, num_uniq_reads
def _show_fastq_info(f, total_reads, num_uniq_reads):
print("file {0} contains:".format(f))
print("{0} total reads".format(total_reads))
print("{0} unique reads ids".format(num_uniq_reads))
print("--------------------------")
def fastq_info(path_to=False):
"""prints out information about fastq files:
number of sequences in the file,
and number of unique ids in the file
without arguments takes as an input
all fastq files in the current dir
Parameters
----------
path_to_fasta : str or list
path to input file, or list of paths
"""
if type(path_to) == str:
total_reads, num_uniq_reads = _get_fastq_num_records(path_to)
_show_fastq_info(path_to, total_reads, num_uniq_reads)
elif type(path_to) == list:
for path in path_to:
total_reads, num_uniq_reads = _get_fastq_num_records(path)
_show_fastq_info(path, total_reads, num_uniq_reads)
else:
current_dir_content = os.listdir()
for f in current_dir_content:
if f.rsplit(".", 1)[-1] == "fastq":
total_reads, num_uniq_reads = _get_fastq_num_records(f)
_show_fastq_info(f, total_reads, num_uniq_reads)
def split_fasta(path_to, path_out=False):
"""splits fasta file containing several
sequences into the corresponding number of
fasta files.
Parameters:
----------
path to : str
path to the input file
path_out : str
path to output dir
"""
if path_out:
if not os.path.exists(path_out):
os.mkdir(path_out)
for record in SeqIO.parse(path_to, "fasta"):
SeqIO.write(record, path_out + record.id + ".fasta", "fasta")
print("file {0} was splitted. the results are in the {1}".format(path_to, path_out))
else:
for record in SeqIO.parse(path_to, "fasta"):
SeqIO.write(record, record.id + ".fasta", "fasta")
print("file {0} was splitted. the results are in the {1}".format(path_to, os.getcwd()))
def _cat_fasta_records(file):
cat_seq = ""
for record in SeqIO.parse(file, "fasta"):
cat_seq += record
return cat_seq
def cat_fasta_seq(path_to, fas_name="cat_seq.fasta", fas_id="cat_seq", fas_descr=""):
"""concatenates sequences from fasta files
into one long sequence. takes one multifasta
or several fasta files as an input
Parameters:
----------
path_to : str or list
path to input file or files
fas_name : str, optional
name of the fasta file
fas_id : str, optional
id of the concatenated sequence
fas_descr : str, optional
description of the fasta sequence
"""
if type(path_to) == str:
cat_seq = _cat_fasta_records(path_to)
elif type(path_to) == list:
cat_seq = ""
for file in path_to:
cat_seq += _cat_fasta_records(file)
cat_seq.id = fas_id
cat_seq.description = fas_descr
SeqIO.write(cat_seq, fas_name, "fasta")
def plot_contigs_cover_gc(path_to):
"""takes spades assembler output which is fasta
file containing contigs, and
creates two plots:
1. distribution of GC content in contigs
2. GC content vs log2 coverage depth
Parameters:
-----------
path_to : str
path to input file
"""
container = []
for seq_record in SeqIO.parse(path_to, "fasta"):
entry = (seq_record.id, GC(seq_record.seq))
container.append(entry)
gc = [x[1] for x in container]
fig = plt.figure()
sns.distplot(gc, hist=False, kde_kws={"shade":True})
plt.title("GC_distribution")
plt.xlabel("GC content, %")
plt.savefig("contigs_GC_distribution.jpeg", format="jpeg")
fig.close()
coverage = []
for el in container:
cov = el[0].split("_")[-1]
coverage.append(float(cov))
cov_log2 = [log2(x) for x in coverage]
fig1 = plt.figure(figsize=(10, 8))
plt.scatter(gc, cov_log2, s=5)
plt.xlabel("GC content, %")
plt.ylabel("log2 coverage depth")
plt.title("coverage of the contigs vs GC content", fontsize=15)
plt.savefig("GC_content_vs_contigs_coverage.jpeg", format="jpeg")
fig1.close()
def count_indels(vcf_file, min_depth=10, verbose="True"):
"""counts indels in vcf file
----------------
vcf_file: str
input vcf
min_depth: int
minimum depth in favour of indel, 10 by default
verbose: bool
True - prints information about the variants
False - keeps silent
"""
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
counter = 0
if verbose:
for record in vcf_reader:
if "INDEL" not in record.INFO.keys():
continue
elif record.INFO["DP4"][2] + record.INFO["DP4"][3] < min_depth:
continue
else:
print("chromosome: %s, position: %s, ref: %s, indel variant: %s" \
% (record.CHROM, record.POS, record.REF, record.ALT ))
print("depth at position: %s" % record.INFO["DP"])
print("reads supporting reference: %d" %(record.INFO["DP4"][0] + record.INFO["DP4"][1]))
print("reads supporting indel variant: %d" %(record.INFO["DP4"][2] + record.INFO["DP4"][3]))
print("==========================================================================")
counter += 1
else:
for record in vcf_reader:
if "INDEL" not in record.INFO.keys():
continue
elif record.INFO["DP4"][2] + record.INFO["DP4"][3] < min_depth:
continue
else:
counter += 1
print("total number of indels %s" %counter)
def count_snps(vcf_file, min_depth=10, verbose="True"):
"""counts SNPs in vcf file
----------------
vcf_file: str
input vcf
min_depth: int
minimum depth in favour of SNPs, 10 by default
verbose: bool
True - prints information about the variants
False - keeps silent
"""
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
counter = 0
if verbose:
for record in vcf_reader:
if "INDEL" in record.INFO.keys():
continue
elif record.INFO["DP4"][2] + record.INFO["DP4"][3] < min_depth:
continue
else:
print("chromosome: %s, position: %s, ref: %s, snp variant: %s" \
% (record.CHROM, record.POS, record.REF, record.ALT ))
print("depth at position: %s" % record.INFO["DP"])
print("reads supporting reference: %d" %(record.INFO["DP4"][0] + record.INFO["DP4"][1]))
print("reads supporting snp variant: %d" %(record.INFO["DP4"][2] + record.INFO["DP4"][3]))
print("==========================================================================")
counter += 1
else:
for record in vcf_reader:
if "INDEL" in record.INFO.keys():
continue
elif record.INFO["DP4"][2] + record.INFO["DP4"][3] < min_depth:
continue
else:
counter += 1
print("total number of SNPs %s" %counter)
def vcf_to_df(vcf_file, min_depth=10, var_type="snp"):
"""creates pandas dataframe from the vcf file data
----------------
vcf_file: str
input vcf
min_depth: int
minimum depth in favour of variant, 10 by default
var_type: str
snp - prints information about the variants
indel - keeps silent
"""
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
vcf_data ={"chrom": [], "pos": [], "ref": [], "var": [], "total_depth": [],
"depth_ref": [], "depth_var": []}
if var_type == "snp":
for record in vcf_reader:
if "INDEL" in record.INFO.keys():
continue
elif record.INFO["DP4"][2] + record.INFO["DP4"][3] < min_depth:
continue
else:
vcf_data["chrom"].append(record.CHROM)
vcf_data["pos"].append(record.POS)
vcf_data["ref"].append(record.REF)
vcf_data["var"].append(record.ALT)
vcf_data["total_depth"].append(record.INFO["DP"])
vcf_data["depth_ref"].append(record.INFO["DP4"][0] + record.INFO["DP4"][1])
vcf_data["depth_var"].append(record.INFO["DP4"][2] + record.INFO["DP4"][3])
elif var_type == "indel":
for record in vcf_reader:
if "INDEL" not in record.INFO.keys():
continue
elif record.INFO["DP4"][2] + record.INFO["DP4"][3] < min_depth:
continue
else:
vcf_data["chrom"].append(record.CHROM)
vcf_data["pos"].append(record.POS)
vcf_data["ref"].append(record.REF)
vcf_data["var"].append(record.ALT)
vcf_data["total_depth"].append(record.INFO["DP"])
vcf_data["depth_ref"].append(record.INFO["DP4"][0] + record.INFO["DP4"][1])
vcf_data["depth_var"].append(record.INFO["DP4"][2] + record.INFO["DP4"][3])
else:
print("var_type arg not valid")
df = | pd.DataFrame.from_dict(vcf_data) | pandas.DataFrame.from_dict |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = | Series([3, 1, 0], expected_idx, name="B") | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.