prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Various post-processing steps of batches of simulations.
All steps work incrementally, detecting which sims have already been processed and which haven't.
All steps load and save the batch multiple times.
"""
import logging
from tqdm.auto import tqdm as pbar
import numpy as np
import pandas as pd
import gc
from tctx.util import spike_trains as spt, conn_check
from tctx.analysis import simbatch as sb
from tctx.analysis import simstats
from tctx.analysis import sequences as sqs
from tctx.analysis import extent as ext
from tctx.analysis import traversed_connectivity as tc
from tctx.analysis import order_entropy as oe
from tctx.analysis import branches
def clean_slate(batch_full):
"""Return a brand new clean copy of the batch"""
batch_full = batch_full.copy_clean_reg().add_cols(batch_full.reg[['full_path', 'sim_idx']])
batch_full.register_raw()
return batch_full
def register_raw(working_filename):
"""register raw results as store access"""
batch_full = sb.SimBatch.load(working_filename)
missing_raw = batch_full.subsection(batch_full.get_missing('cells_raw_path'))
if len(missing_raw) > 0:
missing_raw.register_raw()
batch_full = batch_full.add_cols(missing_raw.reg[[
'cells_raw_idx', 'cells_raw_path',
'spikes_raw_idx', 'spikes_raw_path',
'conns_raw_idx', 'conns_raw_path',
]])
batch_full.save_registry(working_filename)
def extract_simstats(working_filename):
"""add columns to the registry indicating firing rates and other simulation stats"""
batch_full = sb.SimBatch.load(working_filename)
for sbatch in batch_full.iter_missing_chunks('cell_hz_induction_total'):
ssts = simstats.extract_simstats(
sbatch.reg,
batch_full.stores['cells_raw'],
batch_full.stores['spikes_raw'],
)
batch_full = batch_full.add_cols(ssts)
batch_full.save_registry(working_filename)
def extract_followers(working_filename, res_folder, mask_col=None, exec_mode=None):
batch_full = sb.SimBatch.load(working_filename)
new_cols = [
'e_foll_count', 'e_foll_gids',
'i_foll_count', 'i_foll_gids',
'cells_idx', 'cells_path',
'ewins_idx', 'ewins_path',
'spikes_idx', 'spikes_path',
]
if mask_col is None:
chunks = batch_full.iter_missing_chunks('spikes_path')
else:
chunks = batch_full.subsection(batch_full.reg[mask_col]).iter_missing_chunks('spikes_path')
for sbatch in chunks:
updated_batch = sqs.compute_sequence_details_batch(
sbatch,
batch_folder=str(sb.abs_path(res_folder)),
exec_mode=exec_mode,
)
batch_full = batch_full.add_cols(updated_batch.reg[new_cols])
batch_full.save_registry(working_filename)
def _extract_sim_foll_cells(batch, sim_gid):
foll_gids = (
batch.reg.loc[sim_gid, 'e_foll_gids'] +
batch.reg.loc[sim_gid, 'i_foll_gids']
)
cells = batch.stores['cells'][sim_gid]
folls = cells.loc[list(foll_gids) + [batch.reg.loc[sim_gid, 'targeted_gid']]]
centered = spt.center_cells(
folls['x'], folls['y'],
folls.loc[folls['is_targeted'], ['x', 'y']].iloc[0],
batch.reg.loc[sim_gid, 'side_um']
).add_suffix('_centered')
folls = pd.concat([folls.drop(['x', 'y'], axis=1), centered], axis=1)
spikes = batch.stores['spikes'][sim_gid]
sb.CAT.add_cats_spikes(spikes)
spikes = spikes[spikes['gid'].isin(folls.index)]
delays = spikes[spikes['cat'] == 'effect'].sort_values(['gid', 'delay_in_window'])
first_spikes = delays.groupby(['gid', 'win_idx'])['delay_in_window'].min()
timing = pd.DataFrame({
'mean_delay': delays.groupby('gid')['delay_in_window'].mean(),
'median_delay': delays.groupby('gid')['delay_in_window'].median(),
'mean_first_delay': first_spikes.groupby('gid').mean(),
'median_first_delay': first_spikes.groupby('gid').median(),
})
folls = pd.concat([folls, timing], axis=1)
return folls
def extract_all_foll_cells(working_filename, output_path):
batch_full = sb.SimBatch.load(working_filename)
for sbatch in batch_full.sel(instance_label='original').iter_missing_chunks('foll_cells_path', chunk_size=16):
saved = {}
for sim_gid in sbatch.reg.index:
folls = _extract_sim_foll_cells(sbatch, sim_gid)
sb.CAT.remove_cats_cells(folls)
saved[sim_gid] = (str(output_path), f's{sim_gid:06d}')
folls.to_hdf(*saved[sim_gid])
name = 'foll_cells'
saved = pd.DataFrame.from_dict(saved, orient='index', columns=[f'{name}_path', f'{name}_idx'])
batch_full = batch_full.add_cols(saved)
batch_full.save_registry(working_filename)
def extract_net_foll_conns(working_filename, output_path, instance_label='original'):
"""
This can take a lot of time (~5 hours for 300 nets).
"""
batch_full = sb.SimBatch.load(working_filename)
if 'net_foll_conns_path' not in batch_full.reg.columns:
batch_full.reg['net_foll_conns_path'] = np.nan
if 'net_foll_conns_idx' not in batch_full.reg.columns:
batch_full.reg['net_foll_conns_idx'] = np.nan
grouped = batch_full.sel(instance_label=instance_label).reg.groupby([
'instance_path', 'instance_idx', 'targeted_gid']).groups.items()
to_extract = []
for ((instance_path, instance_idx, targeted_gid), sim_gids) in grouped:
if batch_full.reg.loc[sim_gids, 'net_foll_conns_path'].isna().any():
if batch_full.reg.loc[sim_gids, 'net_foll_conns_path'].notna().any():
logging.warning('Overriding existing net folls for sims')
batch_full.reg.loc[sim_gids, 'net_foll_conns_idx'] = np.nan
batch_full.reg.loc[sim_gids, 'net_foll_conns_path'] = np.nan
to_extract.append((targeted_gid, sim_gids))
print(f'{len(to_extract)} nets missing')
def get_new_key():
existing = [int(k[1:]) for k in batch_full.reg['net_foll_conns_idx'].dropna().unique()]
if len(existing) > 0:
index = max(existing) + 1
else:
index = 0
return f'c{index:06g}'
for targeted_gid, sim_gids in pbar(to_extract):
important_cell_gids = np.unique(np.append(
targeted_gid,
np.concatenate(batch_full.reg.loc[sim_gids, ['e_foll_gids', 'i_foll_gids']].values.ravel())
))
all_conns = batch_full.stores['conns_raw'][sim_gids[0]]
mask = (
all_conns['source'].isin(important_cell_gids)
& all_conns['target'].isin(important_cell_gids)
)
net_conns = all_conns[mask].copy()
sb.CAT.remove_cats_conns(net_conns)
output_key = get_new_key()
net_conns.to_hdf(output_path, output_key, format='fixed')
partial = {}
for sim_gid in sim_gids:
partial[sim_gid] = str(output_path), str(output_key)
partial = pd.DataFrame.from_dict(partial, orient='index', columns=['net_foll_conns_path', 'net_foll_conns_idx'])
batch_full = batch_full.add_cols(partial)
batch_full.save_registry(working_filename)
batch_full.stores['conns_raw'].empty_cache()
gc.collect()
def extract_str_foll_conns(working_filename, output_path, strength_thresh=15, mask_col=None):
batch_full = sb.SimBatch.load(working_filename)
name = 'str_foll_conn'
for col in f'{name}_path', f'{name}_idx':
if col not in batch_full.reg.columns:
batch_full.reg[col] = np.nan
if mask_col is None:
sel_gids = batch_full.reg.index
else:
sel_gids = batch_full.subsection(batch_full.reg[mask_col]).reg.index
missing = batch_full.subsection(sel_gids).get_missing(f'{name}_path')
instance_key_cols = ['instance_path', 'instance_idx']
for instance_key, sims in pbar(batch_full.reg.loc[missing].groupby(instance_key_cols), desc='net'):
for sim_gid in pbar(sims.index):
all_conns = batch_full.stores['conns_raw'][sims.index[0]]
targeted_gid = sims.loc[sim_gid, 'targeted_gid']
important_cell_gids = np.unique(
(targeted_gid,)
+ sims.loc[sim_gid, 'e_foll_gids']
+ sims.loc[sim_gid, 'i_foll_gids']
)
mask = all_conns['source'].isin(important_cell_gids) & all_conns['target'].isin(important_cell_gids)
mask &= (all_conns['weight'] >= strength_thresh)
str_foll_conns = all_conns[mask].copy()
sb.CAT.remove_cats_conns(str_foll_conns)
output_key = f's{sim_gid:06d}'
str_foll_conns.to_hdf(output_path, output_key, format='fixed')
batch_full.reg.loc[sim_gid, f'{name}_path'] = output_path
batch_full.reg.loc[sim_gid, f'{name}_idx'] = output_key
batch_full.save_registry(working_filename)
batch_full.stores['conns_raw'].empty_cache()
gc.collect()
def extract_extents(working_filename):
batch_full = sb.SimBatch.load(working_filename)
for ei_type in 'ei':
batch_sel = batch_full.subsection(batch_full.reg[f'{ei_type}_foll_count'] > 0).sel(instance_label='original')
for sbatch in batch_sel.iter_missing_chunks(f'{ei_type}_furthest_follower_distance', chunk_size=16):
for sim_gid in sbatch.reg.index:
extent = ext.extract_extent(batch_full, sim_gid)
for k, v in extent.items():
batch_full.reg.loc[sim_gid, k] = v
batch_full.save_registry(working_filename)
def extract_all_jumps(working_filename, output_path, mask_col='uniform_sampling_sub'):
batch_full = sb.SimBatch.load(working_filename)
name = 'jumps'
cols = [f'{name}_path', f'{name}_idx']
if mask_col is None:
sel_gids = batch_full.reg.index
else:
sel_gids = batch_full.subsection(batch_full.reg[mask_col]).reg.index
missing = batch_full.subsection(sel_gids).get_missing(cols[0])
grouped = batch_full.reg.loc[missing].groupby(['instance_path', 'instance_idx', 'targeted_gid']).groups
for _, sim_gids in pbar(grouped.items(), desc='instance'):
batch_full.stores['conns_raw'].empty_cache()
gc.collect()
saved = {}
conns = batch_full.stores['conns_raw'][sim_gids[0]]
sb.CAT.add_cats_conns(conns)
for sim_gid in pbar(sim_gids, desc='sims'):
spikes = batch_full.stores['spikes'][sim_gid]
sb.CAT.add_cats_spikes(spikes)
jumps = tc.extract_spike_jumps(spikes, conns)
sb.CAT.remove_cats_conns(jumps)
saved[sim_gid] = (str(output_path), f's{sim_gid:06d}')
jumps.to_hdf(*saved[sim_gid], format='fixed')
saved = | pd.DataFrame.from_dict(saved, orient='index', columns=cols) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from time import time
from utils.preprocessing import DfInfo
from utils.preprocessing import inverse_dummy
from alibi_cf.wrappers import AlibiBinaryPredictWrapper, AlibiBinaryNNPredictWrapper
from alibi.explainers import CounterFactual
'''
Acronym:
dt -> Decision Tree
rfc -> Random Forest Classifier
nn -> Nueral Network
ohe -> One-hot encoding format
'''
class Recorder:
pass
def get_cat_vars_info(cat_feature_names, train_df):
'''
Get information of categorical columns (one-hot).
'''
# Extract information of categorical features for alibi counterfactual prototype.
cat_vars_idx_info = []
for cat_col in cat_feature_names:
num_unique_v = len(
[col for col in train_df.columns if col.startswith(f"{cat_col}_")])
first_index = min([list(train_df.columns).index(col)
for col in train_df.columns if col.startswith(f"{cat_col}_")])
cat_vars_idx_info.append({
"col": cat_col,
"num_unique_v": num_unique_v,
"first_index": first_index
})
# Encode the information to required format. { first_idx: num_unqiue_values }
cat_vars_ohe = {}
for idx_info in cat_vars_idx_info:
cat_vars_ohe[idx_info['first_index']] = idx_info['num_unique_v']
return cat_vars_idx_info, cat_vars_ohe
def alibi_wrap_models(models, output_int):
'''
Wrap the model to meet the requirements to Alibi.
'''
return {
'dt': AlibiBinaryPredictWrapper(models['dt'], output_int=output_int),
'rfc': AlibiBinaryPredictWrapper(models['rfc'], output_int=output_int),
'nn': AlibiBinaryNNPredictWrapper(models['nn'], output_int=output_int),
}
def get_watcher_cfs(wrapped_models, feature_range, X_train, max_iters):
'''
Get CF generator.
More information on CounterfactualProto -> (`https://docs.seldon.io/projects/alibi/en/latest/api/alibi.explainers.html?highlight=CounterFactualProto#alibi.explainers.CounterFactualProto`)
'''
watcher_cfs = {}
for k in wrapped_models.keys():
watcher_cfs[k] = CounterFactual(
wrapped_models[k].predict_proba,
X_train[0].reshape(1, -1).shape,
feature_range=feature_range,
max_iter=max_iters,
)
return watcher_cfs
def generate_watcher_result(
df_info: DfInfo,
train_df,
models,
num_instances,
num_cf_per_instance,
X_train, X_test, y_test,
max_iters=1000,
models_to_run=['dt', 'rfc', 'nn'],
output_int=True
):
'''
Generate counterfactuals using CounterfactualProto.
This counterfactul generating algorithms supports categorical features and numerical columns.
[`df_info`] -> DfInfo instance containing all the data information required for generating counterfactuals.
[`train_df`] -> Data frame contaning training data. (One-hot encoded format)
[`models`] -> Dictionay of models (Usually containe <1> dt (Decision Tree) (2) rfc (RandomForest) (3) nn (Neural Network))
[`num_instances`] -> Number of instances to generate counterfactuals. The instance is extracted from the testset. For example,
if `num_instances = 20`, it means the first 20 instances in the testset will be used for generating the counterfactuals.
[`num_cf_per_instance`] -> Number of counterfactuals for each instance to generate. If `num_cf_per_instance = 5`, this function will
run five times for each instance to search its counterfactual. Therefore, if you have `num_instances = 20, num_cf_per_instance = 5`, 100 searchings
will be conducted. (Note: not promise that 100 counterfactuals will be found.)
[`X_train, X_test, y_test`] -> Training and test data.
[`max_iters`] -> Max iterations to run for searching a single counterfactual. It's a parameters in CounterfactualProto class. (`https://docs.seldon.io/projects/alibi/en/latest/api/alibi.explainers.html?highlight=CounterFactualProto#alibi.explainers.CounterFactualProto`)
'''
# Get all categorical columns names that are not label column.
cat_feature_names = [
col for col in df_info.categorical_cols if col != df_info.target_name]
# Get one-hot encoding informations (Albii algorithm need it recognise categorical columns, or it will be treated as a numerical columns.)
# _, cat_vars_ohe = get_cat_vars_info(cat_feature_names, train_df)
# Get wrapped models to meet the input and output of Alibi algorithms.
wrapped_models = alibi_wrap_models(models, output_int)
Recorder.wrapped_models = wrapped_models
# Since we use min-max scaler and one-hot encoding, we can contraint the range in [0, 1]
feature_range = (np.ones((1, len(df_info.feature_names))),
np.zeros((1, len(df_info.feature_names))))
# Get counterfactual generator instance.
watcher_cfs = get_watcher_cfs(
wrapped_models, feature_range, X_train, max_iters)
Recorder.watcher_cfs = watcher_cfs
# Initialise the result dictionary.(It will be the return value.)
results = {}
# Loop through every models (dt, rfc, nn)
for k in models_to_run:
# Intialise the result for the classifier (predicting model).
results[k] = []
print(f"Finding counterfactual for {k}")
# Looping throguh first `num_instances` in the test set.
for idx, instance in enumerate(X_test[0:num_instances]):
print(f"instance {idx}")
# Reshape the input instance to make it 2D array (Which predictitive model accpeted) from 1D array.
example = instance.reshape(1, -1)
# Conduct the searching multiple (num_cf_per_instance) times for a single instance.
for num_cf in range(num_cf_per_instance):
print(f"CF {num_cf}")
start_t = time()
exp = watcher_cfs[k].explain(example)
end_t = time()
# Calculate the running time.
running_time = end_t - start_t
# Get the prediction from original predictive model in a human-understandable format.
if k == 'nn':
# nn return float [0, 1], so we need to define a threshold for it. (It's usually 0.5 for most of the classifier).
prediction = df_info.target_label_encoder.inverse_transform(
(models[k].predict(example)[0] > 0.5).astype(int))[0]
else:
# dt and rfc return int {1, 0}, so we don't need to define a threshold to get the final prediction.
prediction = df_info.target_label_encoder.inverse_transform(
models[k].predict(example))[0]
# Checking if cf is found for this iteration.
if (not exp.cf is None) and (len(exp.cf) > 0):
print("Found CF")
# Change the found CF from ohe format to original format.
cf = inverse_dummy(pd.DataFrame(
exp.cf['X'], columns=df_info.ohe_feature_names), df_info.cat_to_ohe_cat)
# Change the predicted value to the label we understand.
cf.loc[0, df_info.target_name] = df_info.target_label_encoder.inverse_transform([
exp.cf['class']])[0]
else:
print("CF not found")
cf = None
# Change the found input from ohe format to original format.
input_df = inverse_dummy(pd.DataFrame(
example, columns=df_info.ohe_feature_names), df_info.cat_to_ohe_cat)
input_df.loc[0, df_info.target_name] = prediction
results[k].append({
"input": input_df,
"cf": cf,
"running_time": running_time,
"ground_truth": df_info.target_label_encoder.inverse_transform([y_test[idx]])[0],
"prediction": prediction,
})
return results
def process_result(results, df_info):
'''
Process the result dictionary to construct data frames for each (dt, rfc, nn).
'''
results_df = {}
# Loop through ['dt', 'rfc', 'nn']
for k in results.keys():
all_data = []
for i in range(len(results[k])):
final_df = | pd.DataFrame([{}]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import copy
import re
import string
# Note: this requires nltk.download() first as described in the README.
# from nltk.book import *
from nltk.corpus import stopwords
from nltk.tokenize import TreebankWordTokenizer
from collections import Counter, OrderedDict
from sklearn.model_selection import train_test_split
from app.lib.utils.jsonl import jsonl_to_df
"""
Sources:
Loading JSONL: https://medium.com/@galea/how-to-love-jsonl-using-json-line-format-in-your-workflow-b6884f65175b
NLTK Reference: http://www.nltk.org/book/ch01.html
NLTK word counter reference: https://www.strehle.de/tim/weblog/archives/2015/09/03/1569
"""
class WordTokenizer(object):
def __init__(self):
pass
def _user_grouper(self, filename):
# For each unique user, join all tweets into one tweet row in the new df.
db_cols = ['search_query', 'id_str', 'full_text', 'created_at', 'favorite_count', 'username', 'user_description']
tweets_df = jsonl_to_df(filename, db_cols)
users = list(tweets_df['username'].unique())
tweets_by_user_df = pd.DataFrame(columns=['username', 'user_description', 'tweets'])
# Iterate through all users.
for i, user in enumerate(users):
trunc_df = tweets_df[tweets_df['username'] == user]
user_description = trunc_df['user_description'].tolist()[0]
string = ' '.join(trunc_df["full_text"])
tweets_by_user_df = tweets_by_user_df.append({'username': user, 'user_description': user_description, 'tweets': string}, ignore_index=True)
# Return the data frame with one row per user, tweets concatenated into one string.
return tweets_by_user_df
def _parse_doc(self, text):
text = text.lower()
text = re.sub(r'&(.)+', "", text) # no & references
text = re.sub(r'pct', 'percent', text) # replace pct abreviation
text = re.sub(r"[^\w\d'\s]+", '', text) # no punct except single quote
text = re.sub(r'[^\x00-\x7f]', r'', text) # no non-ASCII strings
# Omit words that are all digits
if text.isdigit():
text = ""
# # Get rid of escape codes
# for code in codelist:
# text = re.sub(code, ' ', text)
# Replace multiple spacess with one space
text = re.sub('\s+', ' ', text)
return text
def _parse_words(self, text):
# split document into individual words
tokens = text.split()
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
tokens = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out tokens that are one or two characters long
tokens = [word for word in tokens if len(word) > 2]
# filter out tokens that are more than twenty characters long
tokens = [word for word in tokens if len(word) < 21]
# recreate the document string from parsed words
text = ''
for token in tokens:
text = text + ' ' + token
return tokens, text
def _get_train_test_data(self, filename, only_known=True):
# Get df, and list of all users' tweets.
tweets_by_user_df = self._user_grouper(filename)
# Get user classes
db_cols = ['class', 'user_description', 'username']
user_class_df = jsonl_to_df('users', db_cols)
user_class_df = user_class_df[['username', 'class']]
tagged_df = | pd.merge(tweets_by_user_df, user_class_df, left_on='username', right_on='username') | pandas.merge |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import os
import platform
import string
import unittest
from copy import deepcopy
from itertools import product
import numpy as np
import pandas as pd
from numba.core.errors import TypingError
from sdc.hiframes.rolling import supported_rolling_funcs
from sdc.tests.test_base import TestCase
from sdc.tests.test_series import gen_frand_array
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def rolling_std_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).std(ddof)
def rolling_var_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = self.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
# XXX: skipping min/max for this test since the behavior of Pandas
# is inconsistent: it assigns NaN to last output instead of 4!
if func_name not in ('min', 'max'):
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
sizes = (1, 2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_variable_apply1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable_apply2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# TODO: this crashes on Travis (3 process config) with size 1
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').{}()\n".format(w, func_name)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_apply_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_series_fixed1(self):
# test series rolling functions
# all functions except apply
S1 = pd.Series([0, 1, 2, np.nan, 4])
S2 = | pd.Series([0, 1, 2, -2, 4]) | pandas.Series |
# from IPython.core.display import display, HTML
# display(HTML("<style>.container { width:100% !important; }</style>"))
_ = None
import argparse
import json as J
import os
import shutil
import tempfile
import joblib
import mlflow
import functools as F
from importlib import reload as rl
import copy
import pandas as pd
import numpy as np
import scipy.stats
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from collections import Counter as C
from sklearn.metrics import accuracy_score
from pylab import ma, cm
from sklearn.utils import Bunch
from sklearn.preprocessing import LabelEncoder
import lightgbm
from tqdm import tqdm
from pymfe.mfe import MFE
import src.models as M
import src.mstream as MS
import src.aux as A
np.random.seed(42)
# https://stackoverflow.com/questions/4971269/<br><br>
from matplotlib.cm import get_cmap
n = "Accent"
cmap = get_cmap(n) # type: matplotlib.colors.ListedColormap
colors = cmap.colors # type: list
PATH = Path(tempfile.mkdtemp())
os.makedirs(PATH/'png')
os.makedirs(PATH/'csv')
os.makedirs(PATH/'joblib')
par = argparse.ArgumentParser()
par.add_argument('--base', type=str, help='Database to use', default='elec2')
par.add_argument('--nrows', type=int, help='How many samples will be used at most', default=30_000)
par.add_argument('--train', type=int, help='Size of train set', default=300)
par.add_argument('--horizon', type=int, help='Size of horizon set', default=0)
par.add_argument('--test', type=int, help='Size of test window', default=10)
# par.add_argument('--metric', help='Metric to use on base models')
par.add_argument('--metabase_initial_size', type=int, help='Size of initial metabase', default=410)
par.add_argument('--online_size', type=int, help='How many metaexamples to test (online phase)', default=100)
par.add_argument('--offline_size', type=int, help='How many metaexamples to test (online phase)', default=100)
par.add_argument('--meta_retrain_interval', type=int, help='How many new metaexample til retrain', default=1)
par.add_argument('--base_retrain_interval', type=int, help='How many new base examples til retrain', default=10)
par.add_argument('--meta_train_window', type=int, help='How many metaexamples to train on', default=300)
par.add_argument('--gamma', type=int,
help='Batch size. Zero means to predict one algorithm to whole window', default=0)
par.add_argument('--is_incremental', type=int, help='To use or not the incremental metamodel', default=0)
par.add_argument('--reverse_models', type=int, help='To use or not reverse models order', default=0)
par.add_argument('--supress_warning', type=int, help='Whether to supress warnings', default=1)
par.add_argument('--choice', type=str, help='Which model will have preference when Tie happens', default='NysSvm')
par.add_argument('--tune', type=int, help='Whether or not to fine tune base models', default=1)
args, rest = par.parse_known_args()
params = Bunch(**args.__dict__)
print(*params.items(), sep='\n')
if args.supress_warning:
A.IGNORE_WARNING()
del args.supress_warning
# args.online_size = 2000
# args.meta_retrain_interval = 1
# args.is_incremental = 0
labelizer = F.partial(A.biggest_labelizer_arbitrary, choice=args.choice)
joblib.dump(labelizer, PATH/'joblib'/'labelizer.joblib')
BASE=Path('csv')
# mapa = {ex.name: ex.experiment_id for ex in mlflow.list_experiments()}
EXPERIMENT_NAME = f'{args.base}_meta'
exp = mlflow.get_experiment_by_name(EXPERIMENT_NAME)
if not exp:
print(f"Criando experimento {EXPERIMENT_NAME} pela primeira vez")
experiment_id = mlflow.create_experiment(name=EXPERIMENT_NAME)
else:
experiment_id = exp.experiment_id
run = mlflow.start_run(experiment_id=experiment_id)
mlflow.log_params(args.__dict__)
META_MODEL='LgbCustomSkWrapper'
MODELS=[
'NysSvm',
'Rf',
]
mlflow.set_tag('meta_model', META_MODEL)
METRIC='acc'
META_METRICS=['acc', 'kappa_custom', 'geometric_mean']
META_RETRAIN_INTERVAL=args.meta_retrain_interval
MT_TRAIN_FEATURES = [
"best_node","elite_nn","linear_discr",
"naive_bayes","one_nn","random_node","worst_node",
"can_cor","cor", "cov","g_mean",
"gravity","h_mean","iq_range","kurtosis",
"lh_trace",
"mad",
"max","mean",
"median",
"min",
"nr_cor_attr","nr_disc","nr_norm","nr_outliers",
"p_trace","range","roy_root","sd","sd_ratio",
"skewness","sparsity","t_mean","var","w_lambda"
]
MT_HOR_FEATURES = []
MT_TEST_FEATURES = []
HP_GRID_LIS = [
{"svm__C": [1,10,100],
"nys__kernel": ['poly', 'rbf', 'sigmoid']
},
{ "max_depth": [3, 5, None],
"n_estimators": [100, 200, 300],
"min_samples_split": scipy.stats.randint(2, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]
}
]
HP_META_MODEL = {
'boosting_type': 'dart',
'learning_rate': 0.01,
'tree_learner': 'feature',
'metric': 'multi_error,multi_logloss',
'objective': 'multiclassova',
'num_class': len(MODELS),
'is_unbalance': True,
'verbose': -1,
'seed': 42,
}
if args.reverse_models:
print("reversing..")
MODELS = MODELS[::-1]
HP_GRID_LIS = HP_GRID_LIS[::-1]
mlflow.set_tag('models', MODELS)
mlflow.set_tag('strategy', 'incremental' if args.is_incremental else 'nao-incremental')
mlflow.set_tag('meta-retreinamento', args.meta_retrain_interval)
joblib.dump(HP_META_MODEL, PATH/'joblib'/'hp_meta.joblib')
mlflow.log_params(A.prefixify(HP_META_MODEL, 'metaHp'))
df = pd.read_csv( BASE / f'{args.base}.csv', nrows=args.nrows)
X, y = df.iloc[:, 1:-1].fillna(0), df.iloc[:, -1]
lbe = LabelEncoder()
yt = lbe.fit_transform(y)
# runinfo_lis = mlflow.list_run_infos(EXPERIMENT_ID)
# df_lis = []
# for rinfo in runinfo_lis:
# try:
# df_lis.append(pd.read_csv(
# 'mlruns/{}/{}/artifacts/metabase.csv'.format(
# EXPERIMENT_ID, rinfo.run_id),
# index_col=False)
# )
# except:
# pass
# df_cache = pd.concat(df_lis, axis=1)
# class CacheMtF:
# def extractor(self, df_cache, prefix='tr'):
# test_cols = [i for i in df_cache.columns
# if i.startswith(prefix)]
# df = df_cache[test_cols]
# df = df.rename(lambda x: '_'.join(x.split('_')[1:]), axis=1)
# for mtf in df.apply(lambda x: x.to_dict(), axis=1):
# yield mtf
# def __init__(self, df_cache, prefix):
# self.generator = self.extractor(df_cache, prefix)
# def __call__(self, *args, **kwargs):
# return next(self.generator)
# train_extractor = CacheMtF(df_cache, 'tr')
# test_extractor = CacheMtF(df_cache, 'tes')
rl(M)
rl(A)
train_extractor = F.partial(A.su_extractor, ext=MFE(
features=MT_TRAIN_FEATURES,
random_state=42,
))
horizon_extractor = lambda x: {}
test_extractor = lambda x: {}
meta_model = M.CLF[META_MODEL](
fit_params=HP_META_MODEL,
classes=[m for m in MODELS],
)
models = [
Bunch(name=n, model=M.CLF[n]())
for n in MODELS
]
opt_params = A.random_params.copy()
opt_params['cv'] = args.test
def fun(model, x, y, retrain_window = META_RETRAIN_INTERVAL):
x = x[-retrain_window:]
y = y[-retrain_window:]
model.fit(x, y, incremental=True)
return model
incremental_trainer = fun if args.is_incremental else None
if args.tune:
print("SOME TUNING...")
optmize_data = args.metabase_initial_size * args.test + args.train
for m, hp in zip(models, HP_GRID_LIS):
A.random_tuner(
model=m.model,
params=hp,
opt_params=opt_params,
X=X[:optmize_data], y=yt[:optmize_data],
)
else:
print("NO TUNING AT ALL")
for m in models:
mlflow.sklearn.log_model(m.model, m.name)
#
# - Nota: faz sentido rodar uma vez com tudo e, depois, só carregar isso (ocupa espaço, poupa tempo)
METABASE_INITIAL_SIZE=args.metabase_initial_size
init_params = dict(
meta_model=meta_model,
base_models=models,
base_tuners=[],
train_extractor=train_extractor,
horizon_extractor=horizon_extractor,
test_extractor=test_extractor,
labelizer=labelizer,
scorer=A.accuracy_score,
meta_retrain_interval=META_RETRAIN_INTERVAL,
is_incremental=args.is_incremental,
incremental_trainer=incremental_trainer, # POHA PARA DE SER BIZONHO
)
fit_params = dict(
X=X,
Y=yt,
meta_window=args.meta_train_window,
train=args.train,
horizon=args.horizon,
test=args.test,
metabase_initial_size=METABASE_INITIAL_SIZE,
)
rl(MS)
FT_HISTORY = []
ms = MS.MetaStream(**init_params)
ms.fit(**fit_params, verbose=True, skip_tune=True);
FT_HISTORY.append(meta_model.lgb.feature_importance())
# In[31]:
# Backup para poder recomeçar a fase online sem gerar a metabase novamente
meta_x = ms.meta_x.copy()
meta_y = ms.meta_y.copy()
nxtr, nytr = ms.next_x_train.copy(), ms.next_y_train.copy()
nxhr, nyhr = ms.next_y_horizon.copy(), ms.next_y_horizon.copy()
cached_metafeatures = ms.cached_metafeatures.copy()
base_evals = ms._base_evals.copy()
stream = copy.deepcopy(ms.current_stream)
counter_labels = copy.deepcopy(ms._counter_labels)
# # PAra testar com implementação do
# rl(MS)
# FT_HISTORY = []
# ms2 = MS.MetaStream(**init_params)
# ms2.fit(**fit_params, verbose=True, skip_tune=True);
# # FT_HISTORY.append(meta_model.lgb.feature_importance())
rl(M)
rl(A)
mmetrics_fun = [M.METRICS_CLF[met] for met in META_METRICS]
off_meta_eval = []
off_preds = []
off_targets = []
print("FASE OFFLINE")
mm = M.CLF[META_MODEL](
fit_params=HP_META_MODEL,
classes=[m for m in MODELS],
)
train_idx_lis, test_idx_lis = A.TimeSeriesCVWindows(
n=args.offline_size, train=args.train, test=args.test
)
df_meta_x = pd.DataFrame(ms.meta_x)
fnames = df_meta_x.columns
meta_x_off = df_meta_x.values
meta_y_off =pd.Series(ms.meta_y).values
for (train_idx, test_idx) in tqdm(zip(train_idx_lis, test_idx_lis)):
xtrain, ytrain = meta_x_off[train_idx], meta_y_off[train_idx]
xtest, ytest = meta_x_off[test_idx], meta_y_off[test_idx]
mm.fit(pd.DataFrame(xtrain, columns=fnames), ytrain)
predictions = mm.predict(xtest)
off_preds.append(predictions)
off_targets.append(ytest)
off_meta_eval.append(
[m(y_true=ytest,
y_pred=mm.label_encoder.inverse_transform(predictions))
for m in mmetrics_fun]
)
del fnames, df_meta_x, meta_x_off, meta_y_off, mm
print("FIM FASE OFFLINE")
print("gamma:", args.gamma)
FT_HISTORY = []
lis = []
true_lis = []
online_size = args.online_size
predict_lis = []
processed = ms._processed
print("INÍCIO FASE ONLINE")
for i in tqdm(range(1, online_size+1)):
if not ms.current_stream.has_more_samples():
print(f"Acabaram os dados no índice {i}")
break
xtest, ytest = (
i.tolist() for i in ms.current_stream.next_sample(args.test)
)
# Predição (nível meta)
pred=ms.predict(xtest, sel=args.gamma)
lis.extend(pred)
pre_dict = {
'true': np.array(ytest),
}
# Predição nível base
for m in models:
pre_dict[m.name] = m.model.predict(
xtest
)
predict_lis.append(pre_dict)
try:
ms.update_stream(
xtest,
ytest,
sel=args.gamma,
base_retrain=True,
# verbose=True,
)
true_lis.append(ms.meta_y[-1])
except Exception as e:
print("Acabaram-se os generators")
raise e
break
FT_HISTORY.append(meta_model.lgb.feature_importance())
df_fti = pd.DataFrame(FT_HISTORY, columns=ms.meta_model.lgb.feature_name())
df_fti.to_csv(PATH/'csv'/'df_fti.csv', index=False,)
# Motivo: lgbm trabalha com números, então pegamos o nome
# do modelo usando o transformação inversa
lis = ms.meta_model.label_encoder.inverse_transform(
lis
)
joblib.dump(lis, PATH/'joblib'/'meta_predicts.joblib')
joblib.dump(ms.meta_y, PATH/'joblib'/'meta_labels.joblib')
joblib.dump(ms._base_evals, PATH/'joblib'/'base_evals.joblib')
print("FIM FASE ONLINE")
print("DAQUI PARA BAIXO SÃO APENAS DUMPS E PLOTS")
df_base_online_predict = pd.DataFrame(predict_lis)
aux = df_base_online_predict.apply(
lambda x: [accuracy_score(i, x[0]) for i in x],
axis=1,
)
df_online_scores = pd.DataFrame(aux.to_list(), columns=df_base_online_predict.columns)
df_online_scores.to_csv(PATH/'csv'/'df_online_scores.csv', index=False)
def log_meta_offline_metrics(off_meta_eval):
def inner(s):
mean, std = np.mean(s), np.std(s)
mean, std = np.round(np.mean(s), 3), np.round(np.std(s), 3)
res = f"{s.name.capitalize().ljust(16)}: {mean:.3} ± {std:.3}"
print(res)
return mean, std
df_offline_meta_eval = pd.DataFrame(off_meta_eval, columns=META_METRICS)
mts = df_offline_meta_eval.apply(inner)
mts.index = ['mean', 'std']
mts.to_csv(PATH/'csv'/'meta_offline_metrics.csv', index='index')
# Para ler:
# pd.read_csv('offline_metrics.csv', index_col=0)
return mts
log_meta_offline_metrics(off_meta_eval)
def log_meta_online_metrics(y_true, y_pred):
mp = dict()
for mtr, mtr_name in zip(mmetrics_fun, META_METRICS):
mp[mtr_name] = (np.round(mtr(y_true=y_true, y_pred=y_pred), 3))
mp = pd.Series(mp)
joblib.dump(mp, PATH/'joblib'/'meta_online_metrics.joblib', )
# Para ler:
# pd.read_csv('offline_metrics.csv', index_col=0)
return mp
mp = log_meta_online_metrics(true_lis, lis)
def plot_offline_performance(colors, PATH, MODELS, off_labels, df_off_scores):
# df_off_scores = pd.DataFrame(base_evals)
# off_labels = pd.Series(labels)
f, (ax1, ax2) = plt.subplots(1, 2, dpi=200, figsize=(13, 6.6))
f.suptitle('Fase OFFline', c='r', fontweight='bold', fontsize='x-large')
A.plot_pie(off_labels, colors=colors, ax=ax1);
def mean_std(x):
return f"{np.mean(x):.3} ± {np.std(x):.3}"
ax1.legend(df_off_scores.apply(mean_std), loc='lower right');
ax1.set_title('Desempenho absoluto \n(empate = modelo que vem primeiro na lista de modelos)',)
# df_off_evals = pd.DataFrame(base_evals)
df_label_acc = df_off_scores.apply(lambda x: A.biggest_labelizer_draw(x.to_dict()), axis=1)
df_off_labels = pd.DataFrame(
df_label_acc.to_list(), columns=['label', 'acc']
)
A.plot_pie(df_off_labels.label, colors=colors, ax=ax2, autopct='%.1f')
ax2.set_title('Distribuição e desempenho setorizados por melhor caso');
def mean_std2(x):
return f"{x.mean()[0]:.3} ± {x.std()[0]:.3}"
ax2.legend(
[mean_std2(df_off_labels[df_off_labels.label == c]) for c in MODELS + ['draw']] [::-1]
, loc='upper right');
plt.savefig(PATH/'png'/'initial-metabase-performance.png');
off_labels = pd.Series(ms.meta_y[:METABASE_INITIAL_SIZE])
vc=off_labels.value_counts()
DEFAULT = vc.index[vc.argmax()]
mlflow.set_tag('default', DEFAULT)
del vc
df_off_scores = pd.DataFrame(ms._base_evals[:METABASE_INITIAL_SIZE])
plot_offline_performance(
colors, PATH, MODELS, off_labels, df_off_scores,
)
# In[ ]:
joblib.dump(ms.meta_y, PATH/'joblib'/'all_meta_y.joblib')
joblib.dump(ms._base_evals, PATH/'joblib'/'all_base_evals.joblib')
df_off = | pd.DataFrame(ms._base_evals[:METABASE_INITIAL_SIZE]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
import re
import tubular
import tubular.testing.helpers as h
import tubular.testing.test_data as data_generators_p
import input_checker
from input_checker._version import __version__
from input_checker.checker import InputChecker
from input_checker.exceptions import InputCheckerError
class TestInit(object):
"""Tests for InputChecker.init()."""
def test_super_init_called(self, mocker):
"""Test that init calls BaseTransformer.init."""
expected_call_args = {0: {"args": (), "kwargs": {"columns": ["a", "b"]}}}
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
InputChecker(columns=["a", "b"])
def test_inheritance(self):
"""Test that InputChecker inherits from tubular.base.BaseTransformer."""
x = InputChecker()
h.assert_inheritance(x, tubular.base.BaseTransformer)
def test_arguments(self):
"""Test that InputChecker init has expected arguments."""
h.test_function_arguments(
func=InputChecker.__init__,
expected_arguments=[
"self",
"columns",
"categorical_columns",
"numerical_columns",
"datetime_columns",
"skip_infer_columns",
],
expected_default_values=(None, None, None, None, None),
)
def test_version_attribute(self):
"""Test that __version__ attribute takes expected value."""
x = InputChecker(columns=["a"])
h.assert_equal_dispatch(
expected=__version__,
actual=x.version_,
msg="__version__ attribute",
)
def test_columns_attributes_generated(self):
"""Test all columns attributes are saved with InputChecker init"""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert hasattr(x, "columns") is True, "columns attribute not present after init"
assert (
hasattr(x, "numerical_columns") is True
), "numerical_columns attribute not present after init"
assert (
hasattr(x, "categorical_columns") is True
), "categorical_columns attribute not present after init"
assert (
hasattr(x, "datetime_columns") is True
), "datetime_columns attribute not present after init"
assert (
hasattr(x, "skip_infer_columns") is True
), "skip_infer_columns attribute not present after init"
def test_check_type_called(self, mocker):
"""Test all check type is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_check_type")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._check_type with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (
x,
["a", "b", "c", "d"],
"input columns",
[list, type(None), str],
)
expected_pos_args_1 = (
x,
["b"],
"categorical columns",
[list, str, type(None)],
)
expected_pos_args_2 = (
x,
["a"],
"numerical columns",
[list, dict, str, type(None)],
)
expected_pos_args_3 = (
x,
["d"],
"datetime columns",
[list, dict, str, type(None)],
)
expected_pos_args_4 = (
x,
["c"],
"skip infer columns",
[list, type(None)],
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _check_type call for columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _check_type call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _check_type call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _check_type call for datetime columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _check_type call for skip infer columns argument"
def test_check_is_string_value_called(self, mocker):
"""Test all check string is called by the init method when option set to infer."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_string_value")
x = InputChecker(
numerical_columns="infer",
categorical_columns="infer",
datetime_columns="infer",
)
assert (
spy.call_count == 3
), "unexpected number of calls to InputChecker._is_string_value with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
expected_pos_args_0 = (x, x.categorical_columns, "categorical columns", "infer")
expected_pos_args_1 = (x, x.numerical_columns, "numerical columns", "infer")
expected_pos_args_2 = (x, x.datetime_columns, "datetime columns", "infer")
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_string_value call for numerical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
def test_check_is_empty_called(self, mocker):
"""Test all check is empty is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_empty")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 4
), "unexpected number of calls to InputChecker._is_empty with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
expected_pos_args_0 = (x, "input columns", ["a", "b", "c", "d"])
expected_pos_args_1 = (x, "categorical columns", ["b", "c"])
expected_pos_args_2 = (x, "numerical columns", ["a"])
expected_pos_args_3 = (x, "datetime columns", ["d"])
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_empty call for categorical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
def test_check_is_listed_in_columns_called(self, mocker):
spy = mocker.spy(input_checker.checker.InputChecker, "_is_listed_in_columns")
InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._is_listed_in_columns with init"
class TestConsolidateInputs(object):
def test_arguments(self):
"""Test that _consolidate_inputs has expected arguments."""
h.test_function_arguments(
func=InputChecker._consolidate_inputs,
expected_arguments=["self", "X"],
expected_default_values=None,
)
def test_infer_datetime_columns(self):
"""Test that _consolidate_inputs infers the correct datetime columns"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert x.datetime_columns == [
"d",
"e",
], "infer datetime not finding correct columns"
def test_infer_datetime_dict(self):
"""Test that _consolidate_inputs infers the correct datetime dict"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.datetime_dict["d"]["maximum"] is False
), "infer numerical not specifying maximum value check as true"
assert (
x.datetime_dict["d"]["minimum"] is True
), "infer numerical not specifying maximum value check as true"
def test_infer_categorical_columns(self):
"""Test that _consolidate_inputs infers the correct categorical columns"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x.fit(df)
assert x.categorical_columns == [
"b",
"c",
"d",
], "infer categorical not finding correct columns"
def test_infer_numerical_columns(self):
"""Test that _consolidate_inputs infers the correct numerical columns"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert x.numerical_columns == [
"a"
], "infer numerical not finding correct columns"
def test_infer_numerical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring numerical"""
x = InputChecker(numerical_columns="infer", skip_infer_columns=["a"])
df = data_generators_p.create_df_2()
df["d"] = df["a"]
x.fit(df)
assert x.numerical_columns == [
"d"
], "infer numerical not finding correct columns when skipping infer columns"
def test_infer_categorical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring categorical"""
x = InputChecker(categorical_columns="infer", skip_infer_columns=["b"])
df = data_generators_p.create_df_2()
x.fit(df)
assert x.categorical_columns == [
"c"
], "infer categorical not finding correct columns when skipping infer columns"
def test_infer_datetime_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring datetime"""
x = InputChecker(datetime_columns="infer", skip_infer_columns=["d"])
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["a"] = df["d"]
x.fit(df)
assert x.datetime_columns == [
"a"
], "infer datetime not finding correct columns when skipping infer columns"
def test_infer_numerical_dict(self):
"""Test that _consolidate_inputs infers the correct numerical dict"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
x.numerical_dict["a"]["maximum"] is True
), "infer numerical not specifying maximum value check as true"
assert (
x.numerical_dict["a"]["minimum"] is True
), "infer numerical not specifying minimum value check as true"
def test_datetime_type(self):
"""Test that datetime columns is a list after calling _consolidate_inputs"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
type(x.datetime_columns) is list
), f"incorrect datetime_columns type returned from _consolidate_inputs - expected: list but got: {type(x.datetime_columns)} "
def test_categorical_type(self):
"""Test that categorical columns is a list after calling _consolidate_inputs"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.categorical_columns) is list
), f"incorrect categorical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.categorical_columns)} "
def test_numerical_type(self):
"""Test that numerical columns and dict are a list and dict after calling _consolidate_inputs"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.numerical_columns) is list
), f"incorrect numerical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.numerical_columns)} "
assert (
type(x.numerical_dict) is dict
), f"incorrect numerical_dict type returned from _consolidate_inputs - expected: dict but got: {type(x.numerical_dict)} "
def test_check_is_subset_called(self, mocker):
"""Test all check _is_subset is called by the _consolidate_inputs method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["c"],
datetime_columns=["d"],
skip_infer_columns=["b"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
spy = mocker.spy(input_checker.checker.InputChecker, "_is_subset")
x.fit(df)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._is_subset with _consolidate_inputs"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (x, "skip infer columns", ["b"], df)
expected_pos_args_1 = (x, "input columns", ["a", "b", "c", "d"], df)
expected_pos_args_2 = (x, "categorical columns", ["c"], df)
expected_pos_args_3 = (x, "numerical columns", ["a"], df)
expected_pos_args_4 = (x, "datetime columns", ["d"], df)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_subset call for skip_infer_columns columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_subset call for input columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_subset call for categorical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_subset call for numerical columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _is_subset call for datetime columns argument"
class TestFitTypeChecker(object):
"""Tests for InputChecker._fit_type_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_type_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_type_checker, expected_arguments=["self", "X"]
)
def test_no_column_classes_before_fit(self):
"""Test column_classes is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "column_classes") is False
), "column_classes attribute present before fit"
def test_column_classes_after_fit(self):
"""Test column_classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(
x, "column_classes"
), "column_classes attribute not present after fit"
def test_correct_columns_classes(self):
"""Test fit type checker saves types for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.column_classes.keys()) == [
"a"
], f"incorrect values returned from _fit_value_checker - expected: ['a'] but got: {list(x.column_classes.keys())}"
def test_correct_classes_identified(self):
"""Test fit type checker identifies correct classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.column_classes["a"] == "float64"
), f"incorrect type returned from _fit_type_checker for column 'a' - expected: float64 but got: {x.column_classes['a']}"
assert (
x.column_classes["b"] == "object"
), f"incorrect type returned from _fit_type_checker for column 'b' - expected: object but got: {x.column_classes['b']}"
assert (
x.column_classes["c"] == "category"
), f"incorrect type returned from _fit_type_checker for column 'c' - expected: category but got: {x.column_classes['c']}"
assert (
x.column_classes["d"] == "datetime64[ns]"
), f"incorrect type returned from _fit_type_checker for column 'd' - expected: datetime64[ns] but got: {x.column_classes['d']}"
class TestFitNullChecker(object):
"""Tests for InputChecker._fit_null_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_null_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_null_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test null_map is not present before fit called"""
x = InputChecker()
assert hasattr(x, "null_map") is False, "null_map attribute present before fit"
def test_expected_values_after_fit(self):
"""Test null_map is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(x, "null_map"), "null_map attribute not present after fit"
def test_correct_columns_nulls(self):
"""Test fit nulls checker saves map for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.null_map.keys()) == [
"a"
], f"incorrect values returned from _fit_null_checker - expected: ['a'] but got: {list(x.null_map.keys())}"
def test_correct_classes_identified(self):
"""Test fit null checker identifies correct columns with nulls after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["b"] = df["b"].fillna("a")
x.fit(df)
assert (
x.null_map["a"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['a']}"
assert (
x.null_map["b"] == 0
), f"incorrect values returned from _fit_null_checker - expected: 0 but got: {x.null_map['b']}"
assert (
x.null_map["c"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['c']}"
class TestFitValueChecker(object):
"""Tests for InputChecker._fit_value_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_value_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(categorical_columns=["b", "c"])
assert (
hasattr(x, "expected_values") is False
), "expected_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test expected_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert hasattr(
x, "expected_values"
), "expected_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit value checker saves levels for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert list(x.expected_values.keys()) == [
"b",
"c",
], f"incorrect values returned from _fit_value_checker - expected: ['b', 'c'] but got: {list(x.expected_values.keys())}"
def test_correct_values_identified(self):
"""Test fit value checker identifies corrcet levels after fit called"""
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x = InputChecker(categorical_columns=["b", "c", "d"])
x.fit(df)
assert x.expected_values["b"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['b']}"
assert x.expected_values["c"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['c']}"
assert x.expected_values["d"] == [
True,
False,
], f"incorrect values returned from _fit_value_checker - expected: [True, False, np.nan] but got: {x.expected_values['d']}"
class TestFitNumericalChecker(object):
"""Tests for InputChecker._fit_numerical_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_numerical_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_numerical_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test numerical_values is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "numerical_values") is False
), "numerical_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test numerical_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert hasattr(
x, "numerical_values"
), "numerical_values attribute not present after fit"
def test_correct_columns_num_values(self):
"""Test fit numerical checker saves values for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert list(x.numerical_values.keys()) == [
"a"
], f"incorrect values returned from numerical_values - expected: ['a'] but got: {list(x.numerical_values.keys())}"
def test_correct_numerical_values_identified(self):
"""Test fit numerical checker identifies correct range values after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] == 1
), f"incorrect values returned from _fit_numerical_checker - expected: 0 but got: {x.numerical_values['a']['minimum']}"
def test_correct_numerical_values_identified_dict(self):
"""Test fit numerical checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = True
numerical_dict["a"]["minimum"] = False
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] is None
), f"incorrect values returned from _fit_numerical_checker - expected: None but got: {x.numerical_values['a']['minimum']}"
class TestFitDatetimeChecker(object):
"""Tests for InputChecker._fit_datetime_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_datetime_checker, expected_arguments=["self", "X"]
)
def test_no_datetime_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(datetime_columns=["b", "c"])
assert (
hasattr(x, "datetime_values") is False
), "datetime_values attribute present before fit"
def test_datetime_values_after_fit(self):
"""Test datetime_values is present after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert hasattr(
x, "datetime_values"
), "datetime_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit datetime checker saves minimum dates for correct columns after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert list(x.datetime_values.keys()) == [
"d",
"e",
], f"incorrect values returned from _fit_datetime_checker - expected: ['d', 'e'] but got: {list(x.datetime_values.keys())} "
def test_correct_datetime_values_identified(self):
"""Test fit datetime checker identifies correct minimum bound after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
actual_min_d = x.datetime_values["d"]["minimum"]
actual_max_d = x.datetime_values["d"]["maximum"]
assert (
actual_min_d == expected_min_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_min_d}, but got: {actual_min_d}"
assert (
actual_max_d is None
), f"incorrect values returned from _fit_datetime_checker - expected: None, but got: {actual_max_d}"
def test_correct_datetime_values_identified_dict(self):
"""Test fit datetime checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
datetime_dict = {"d": {"maximum": True, "minimum": True}}
x = InputChecker(datetime_columns=datetime_dict)
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
expected_max_d = pd.to_datetime("01/02/2021").date()
actual_min_d = x.datetime_values["d"]["minimum"]
actual_max_d = x.datetime_values["d"]["maximum"]
assert (
actual_min_d == expected_min_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_min_d}, but got: {actual_min_d}"
assert (
actual_max_d == expected_max_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_max_d}, but got: {actual_max_d}"
class TestFit(object):
"""Tests for InputChecker.fit()."""
def test_arguments(self):
"""Test that InputChecker fit has expected arguments."""
h.test_function_arguments(
func=InputChecker.fit,
expected_arguments=["self", "X", "y"],
expected_default_values=(None,),
)
def test_super_fit_called(self, mocker):
"""Test that BaseTransformer fit called."""
expected_call_args = {
0: {"args": (data_generators_p.create_df_2(), None), "kwargs": {}}
}
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "fit", expected_call_args
):
x.fit(df)
def test_all_columns_selected(self):
"""Test fit selects all columns when columns parameter set to None"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=None)
assert (
x.columns is None
), f"incorrect columns attribute before fit when columns parameter set to None - expected: None but got: {x.columns}"
x.fit(df)
assert x.columns == [
"a",
"b",
"c",
], f"incorrect columns identified when columns parameter set to None - expected: ['a', 'b', 'c'] but got: {x.columns}"
def test_fit_returns_self(self):
"""Test fit returns self?"""
df = data_generators_p.create_df_2()
x = InputChecker()
x_fitted = x.fit(df)
assert x_fitted is x, "Returned value from InputChecker.fit not as expected."
def test_no_optional_calls_fit(self):
"""Test numerical_values and expected_values is not present after fit if parameters set to None"""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "numerical_values") is False
), "numerical_values attribute present with numerical_columns set to None"
assert (
hasattr(x, "expected_values") is False
), "expected_values attribute present with categorical_columns set to None"
assert (
hasattr(x, "datetime_values") is False
), "datetime_values attribute present with datetime_columns set to None"
def test_compulsory_checks_generated_with_no_optional_calls_fit(self):
"""Test null_map and column_classes are present after fit when optional parameters set to None"""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "null_map") is True
), "null_map attribute not present when optional checks set to None"
assert (
hasattr(x, "column_classes") is True
), "column_classes attribute not present when optional checks set to None"
def test_all_checks_generated(self):
"""Test all checks are generated when all optional parameters set"""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
hasattr(x, "numerical_values") is True
), "numerical_values attribute not present after fit with numerical_columns set"
assert (
hasattr(x, "expected_values") is True
), "expected_values attribute not present after fit with categorical_columns set"
assert (
hasattr(x, "datetime_values") is True
), "expected_values attribute not present after fit with datetime_columns set"
assert (
hasattr(x, "null_map") is True
), "null_map attribute not present after fit"
assert (
hasattr(x, "column_classes") is True
), "column_classes attribute not present after fit"
def test_check_df_is_empty_called(self, mocker):
"""Test check is df empty is called by the fit method."""
x = InputChecker(
columns=["a", "b", "c"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
)
df = data_generators_p.create_df_2()
spy = mocker.spy(input_checker.checker.InputChecker, "_df_is_empty")
x.fit(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._df_is_empty with fit"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
expected_pos_args_0 = (x, "input dataframe", df)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _df_is_empty call for dataframe argument"
class TestTransformTypeChecker(object):
"""Tests for InputChecker._transform_type_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_type_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_type_checker,
expected_arguments=["self", "X", "batch_mode"],
expected_default_values=(False,),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["column_classes"],), "kwargs": {}}}
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_type_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_type_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df)
assert isinstance(
type_checker_failed_checks, dict
), f"incorrect type results type identified - expected: dict but got: {type(type_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_type_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_passes_column_all_nulls(self):
"""Test _transform_type_checker passes all the checks on the training dataframe when a column contains only nulls"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
df["c"] = np.nan
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_type_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
exp_type = df["a"].dtypes
df.loc[5, "a"] = "a"
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks["a"]["actual"] == df["a"].dtypes
), f"incorrect values saved to type_checker_failed_checks bad types - expected: [{type('a')}] but got: {type_checker_failed_checks['a']['types']}"
assert (
type_checker_failed_checks["a"]["expected"] == exp_type
), f"incorrect values saved to type_checker_failed_checks expected types - expected: [{exp_type}] but got: {type_checker_failed_checks['a']['types']}"
def test_transform_passes_batch_mode(self):
"""Test _transform_type_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df, batch_mode=True)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_captures_failed_test_batch_mode(self):
"""Test _transform_type_checker handles mixed types"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
print(df)
x = InputChecker()
x.fit(df)
exp_type = df["a"].dtypes
print(exp_type)
df.loc[5, "a"] = "a"
df.loc[1, "d"] = "a"
df.loc[3, "b"] = 1
type_checker_failed_checks = x._transform_type_checker(df, batch_mode=True)
expected_output = {
"a": {"idxs": [5], "actual": {5: "str"}, "expected": "float"},
"b": {"idxs": [3], "actual": {3: "int"}, "expected": "str"},
"d": {"idxs": [1], "actual": {1: "str"}, "expected": "Timestamp"},
}
for k, v in expected_output.items():
assert (
k in type_checker_failed_checks.keys()
), f"expected column {k} in type_checker_failed_checks output"
assert (
type(type_checker_failed_checks[k]) == dict
), f"expected dict for column {k} in type_checker_failed_checks output"
for sub_k, sub_v in expected_output[k].items():
assert (
sub_k in type_checker_failed_checks[k].keys()
), f"expected {sub_k} as dict key in type_checker_failed_checks output"
assert (
sub_v == type_checker_failed_checks[k][sub_k]
), f"expected {sub_v} as value for {sub_k} in column {k} output of type_checker_failed_checks output"
class TestTransformNullChecker(object):
"""Tests for InputChecker._transform_null_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_null_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_null_checker, expected_arguments=["self", "X"]
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["null_map"],), "kwargs": {}}}
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_null_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_null_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
null_checker_failed_checks = x._transform_null_checker(df)
assert isinstance(
null_checker_failed_checks, dict
), f"incorrect null results type identified - expected: dict but got: {type(null_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_null_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
null_checker_failed_checks = x._transform_null_checker(df)
assert (
null_checker_failed_checks == {}
), f"Null checker found failed tests - {list(null_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_null_checker captures a failed check"""
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
df.loc[5, "b"] = np.nan
null_checker_failed_checks = x._transform_null_checker(df)
assert null_checker_failed_checks["b"] == [
5
], f"incorrect values saved to value_checker_failed_checks - expected: [5] but got: {null_checker_failed_checks['b']}"
class TestTransformNumericalChecker(object):
"""Tests for InputChecker._transform_numerical_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_numerical_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_numerical_checker,
expected_arguments=["self", "X", "type_fails", "batch_mode"],
expected_default_values=(
{},
False,
),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["numerical_values"],), "kwargs": {}}}
x = InputChecker(numerical_columns=["a"])
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_numerical_checker(df, {})
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_numerical_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert isinstance(
numerical_checker_failed_checks, dict
), f"incorrect numerical results type identified - expected: dict but got: {type(numerical_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_numerical_checker passes all the numerical checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert (
numerical_checker_failed_checks == {}
), f"Numerical checker found failed tests - {list(numerical_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_numerical_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
expected_max = {5: 7.0}
expected_min = {0: -1.0}
assert (
numerical_checker_failed_checks["a"]["maximum"] == expected_max
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_max} but got: {numerical_checker_failed_checks['a']['maximum']}"
assert (
numerical_checker_failed_checks["a"]["minimum"] == expected_min
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_min} but got: {numerical_checker_failed_checks['a']['minimum']}"
def test_transform_captures_failed_test_only_maximum(self):
"""Test _transform_numerical_checker captures a failed check when the check includes a maximum value but no minimum value"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = True
numerical_dict["a"]["minimum"] = False
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
expected_max = {5: 7.0}
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert (
numerical_checker_failed_checks["a"]["maximum"] == expected_max
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_max} but got: {numerical_checker_failed_checks['a']['maximum']}"
assert (
"minimum" not in numerical_checker_failed_checks["a"]
), "No minimum value results expected given input the numerical dict"
def test_transform_captures_failed_test_only_minimum(self):
"""Test _transform_numerical_checker captures a failed check when the check includes a minimum value but no maximum value"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = False
numerical_dict["a"]["minimum"] = True
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
expected_min = {0: -1.0}
assert (
numerical_checker_failed_checks["a"]["minimum"] == expected_min
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_min} but got: {numerical_checker_failed_checks['a']['minimum']}"
assert (
"maximum" not in numerical_checker_failed_checks["a"]
), "No maximum value results expected given input the numerical dict"
def test_transform_skips_failed_type_checks_batch_mode(self):
"""Test _transform_numerical_checker skips checks for rows which aren't numerical
when operating in batch mode"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
df.loc[4, "a"] = "z"
df.loc[1, "a"] = 1
df.loc[2, "a"] = 100
type_fails_dict = {
"a": {"idxs": [1, 4], "actual": {1: "int", 4: "str"}, "expected": "float"}
}
expected_output = {"a": {"max idxs": [2], "maximum": {2: 100}}}
numerical_checker_failed_checks = x._transform_numerical_checker(
df, type_fails_dict, batch_mode=True
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks,
expected=expected_output,
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
def test_transform_skips_failed_type_checks(self):
"""Test _transform_numerical_checker skips checks for columns which aren't numerical
when not operating in batch mode"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
# Case 1: check will not be performed as column a is not numerical
df_test = pd.DataFrame({"a": ["z", "zz", "zzz"]})
type_fails_dict = {
"a": {"actual": df_test["a"].dtypes, "expected": df["a"].dtypes}
}
numerical_checker_failed_checks = x._transform_numerical_checker(
df_test, type_fails_dict, batch_mode=False
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks,
expected={},
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
# Case 2: column a should still get checked because even though type does not match,
# int != float the column is still numerical
df_test2 = pd.DataFrame({"a": [5, 3, 222]})
type_fails_dict2 = {
"a": {"actual": df_test2["a"].dtypes, "expected": df["a"].dtypes}
}
numerical_checker_failed_checks2 = x._transform_numerical_checker(
df_test2, type_fails_dict2, batch_mode=False
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks2,
expected={"a": {"max idxs": [2], "maximum": {2: 222}}},
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
class TestTransformValueChecker(object):
"""Tests for InputChecker._transform_value_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_value_checker, expected_arguments=["self", "X"]
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["expected_values"],), "kwargs": {}}}
x = InputChecker(categorical_columns=["b", "c"])
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_value_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_value_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
value_checker_failed_checks = x._transform_value_checker(df)
assert isinstance(
value_checker_failed_checks, dict
), f"incorrect numerical results type identified - expected: dict but got: {type(value_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_value_checker passes all the categorical checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
value_checker_failed_checks = x._transform_value_checker(df)
assert (
value_checker_failed_checks == {}
), f"Categorical checker found failed tests - {list(value_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_value_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
df.loc[5, "b"] = "u"
value_checker_failed_checks = x._transform_value_checker(df)
assert value_checker_failed_checks["b"]["values"] == [
"u"
], f"incorrect values saved to value_checker_failed_checks - expected: ['u'] but got: {value_checker_failed_checks['b']['values']}"
assert value_checker_failed_checks["b"]["idxs"] == [
5
], f"incorrect values saved to value_checker_failed_checks - expected: [5] but got: {value_checker_failed_checks['b']['idxs']}"
class TestTransformDatetimeChecker(object):
"""Tests for InputChecker._transform_datetime_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_datetime_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_datetime_checker,
expected_arguments=["self", "X", "type_fails", "batch_mode"],
expected_default_values=(
{},
False,
),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["datetime_values"],), "kwargs": {}}}
x = InputChecker(datetime_columns=["d"])
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_datetime_checker(df, {})
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_datetime_checker returns results dictionary"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
assert isinstance(
datetime_checker_failed_checks, dict
), f"incorrect datetime results type identified - expected: dict but got: {type(datetime_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_datetime_checker passes all the numerical checks on the training dataframe"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
assert (
datetime_checker_failed_checks == {}
), f"Datetime checker found failed tests - {list(datetime_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_datetime_checker captures a failed check"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
outliers_1 = pd.to_datetime("15/09/2017", utc=False)
outliers_2 = pd.to_datetime("13/09/2017", utc=False)
df.loc[0, "d"] = outliers_1
df.loc[1, "d"] = outliers_2
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
results = datetime_checker_failed_checks["d"]["minimum"]
assert results[0] == outliers_1, (
f"incorrect values saved to datetime_checker_failed_checks - "
f"expected: {outliers_1} but got: {results[0]} "
)
assert results[1] == outliers_2, (
f"incorrect values saved to datetime_checker_failed_checks - "
f"expected: {outliers_2} but got: {results[1]} "
)
def test_transform_captures_failed_test_both_minimum_and_maximum(self):
"""Test _transform_datetime_checker captures a failed check when the check includes a maximum value and a
minimum value"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
datetime_dict = {"d": {"maximum": True, "minimum": True}}
x = InputChecker(datetime_columns=datetime_dict)
x.fit(df)
lower_outliers = pd.to_datetime("15/09/2017", utc=False)
upper_outliers = pd.to_datetime("20/01/2021", utc=False)
df.loc[0, "d"] = lower_outliers
df.loc[5, "d"] = upper_outliers
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
expected_min = {0: lower_outliers}
expected_max = {5: upper_outliers}
assert datetime_checker_failed_checks["d"]["maximum"] == expected_max, (
f"incorrect values saved to "
f"datetime_checker_failed_checks - "
f"expected: {expected_max} but got: "
f"{datetime_checker_failed_checks['d']['maximum']} "
)
assert datetime_checker_failed_checks["d"]["minimum"] == expected_min, (
f"incorrect values saved to "
f"datetime_checker_failed_checks - "
f"expected: {expected_min} but got: "
f"{datetime_checker_failed_checks['d']['minimum']} "
)
def test_transform_skips_failed_type_checks_batch_mode(self):
"""Test _transform_datetime_checker skips checks for rows which aren't datetime type
when operating in batch mode"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
df.loc[3, "d"] = 1
df.loc[4, "d"] = "z"
df.loc[5, "d"] = pd.to_datetime("20/09/2011", utc=False)
type_fails_dict = {
"d": {
"idxs": [3, 4],
"actual": {3: "int", 4: "str"},
"expected": "Timestamp",
}
}
datetime_checker_failed_checks = x._transform_datetime_checker(
df, type_fails_dict, batch_mode=True
)
h.assert_equal_dispatch(
actual=datetime_checker_failed_checks,
expected={
"d": {
"minimum": {5: pd.to_datetime("20/09/2011", utc=False)},
"min idxs": [5],
}
},
msg="rows failing type check have not been removed by _transform_datetime_checker",
)
def test_transform_skips_failed_type_checks(self):
"""Test _transform_datetime_checker skips checks for columns which aren't datetime
when not operating in batch mode"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
df_test = pd.DataFrame({"d": ["z", "zz", "zzz"]})
type_fails_dict = {
"d": {"actual": df_test["d"].dtypes, "expected": df["d"].dtypes}
}
datetime_checker_failed_checks = x._transform_datetime_checker(
df_test, type_fails_dict, batch_mode=False
)
h.assert_equal_dispatch(
actual=datetime_checker_failed_checks,
expected={},
msg="rows failing type check have not been removed by _transform_datetime_checker",
)
class TestTransform(object):
"""Tests for InputChecker.transform()."""
def test_arguments(self):
"""Test that transform has expected arguments."""
h.test_function_arguments(
func=InputChecker.transform,
expected_arguments=["self", "X", "batch_mode"],
expected_default_values=(False,),
)
def test_super_transform_called(self, mocker):
"""Test super transform is called by the transform method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy = mocker.spy(tubular.base.BaseTransformer, "transform")
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to tubular.base.BaseTransformer.transform with transform"
def test_transform_returns_df(self):
"""Test fit returns df"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker()
x.fit(df)
df_transformed = x.transform(df)
assert df_transformed.equals(
df
), "Returned value from InputChecker.transform not as expected."
def test_batch_mode_transform_returns_df(self):
"""Test fit returns df"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker()
x.fit(df)
df_transformed, bad_df = x.transform(df, batch_mode=True)
assert df_transformed.equals(
df
), "Returned value from InputChecker.transform not as expected."
h.assert_equal_dispatch(
expected=df,
actual=df_transformed,
msg="Returned df of passed rows from InputChecker.transform not as expected.",
)
h.assert_equal_dispatch(
expected=pd.DataFrame(
columns=df.columns.values.tolist() + ["failed_checks"]
),
actual=bad_df,
msg="Returned df of failed rows from InputChecker.transform not as expected.",
)
def test_check_df_is_empty_called(self, mocker):
"""Test check is df empty is called by the transform method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy = mocker.spy(input_checker.checker.InputChecker, "_df_is_empty")
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._df_is_empty with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
expected_pos_args_0 = (x, "scoring dataframe", df)
h.assert_equal_dispatch(
expected=expected_pos_args_0,
actual=call_0_pos_args,
msg="positional args unexpected in _df_is_empty call for scoring dataframe argument",
)
def test_non_optional_transforms_always_called(self, mocker):
"""Test non-optional checks are called by the transform method irrespective of categorical_columns,
numerical_columns & datetime_columns values."""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy_null = mocker.spy(
input_checker.checker.InputChecker, "_transform_null_checker"
)
spy_type = mocker.spy(
input_checker.checker.InputChecker, "_transform_type_checker"
)
df = x.transform(df)
assert spy_null.call_count == 1, (
"unexpected number of calls to _transform_null_checker with transform when numerical_columns and "
"categorical_columns set to None "
)
assert spy_type.call_count == 1, (
"unexpected number of calls to _transform_type_checker with transform when numerical_columns and "
"categorical_columns set to None "
)
def test_optional_transforms_not_called(self, mocker):
"""Test optional checks are not called by the transform method."""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy_numerical = mocker.spy(
input_checker.checker.InputChecker, "_transform_numerical_checker"
)
spy_categorical = mocker.spy(
input_checker.checker.InputChecker, "_transform_value_checker"
)
spy_datetime = mocker.spy(
input_checker.checker.InputChecker, "_transform_datetime_checker"
)
df = x.transform(df)
assert (
spy_numerical.call_count == 0
), "unexpected number of calls to _transform_numerical_checker with transform when numerical_columns set to None"
assert (
spy_categorical.call_count == 0
), "unexpected number of calls to _transform_value_checker with transform when categorical_columns set to None"
assert (
spy_datetime.call_count == 0
), "unexpected number of calls to _transform_datetime_checker with transform when datetime_columns set to None"
def test_raise_exception_if_checks_fail_called_no_optionals(self, mocker):
"""Test raise exception is called by the transform method when categorical, numerical_& datetime columns set
to None."""
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "raise_exception_if_checks_fail"
)
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.raise_exception_if_checks_fail with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = {}
numerical_failed_checks = {}
datetime_failed_checks = {}
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in raise_exception_if_checks_fail call in transform method"
def test_raise_exception_if_checks_fail_called_all_checks(self, mocker):
"""Test raise exception is called by the transform method when categorical_columns and numerical_columns set
to None."""
x = InputChecker(
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "raise_exception_if_checks_fail"
)
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.raise_exception_if_checks_fail with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = x._transform_value_checker(df)
numerical_failed_checks = x._transform_numerical_checker(df)
datetime_failed_checks = x._transform_datetime_checker(df)
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in raise_exception_if_checks_fail call in transform method"
def test_separate_passes_and_fails_called_no_optionals(self, mocker):
"""Test raise exception is called by the transform method when categorical, numerical_& datetime columns set
to None."""
x = InputChecker()
df = data_generators_p.create_df_2()
orig_df = df.copy(deep=True)
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "separate_passes_and_fails"
)
df, bad_df = x.transform(df, batch_mode=True)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.separate_passes_and_fails with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = {}
numerical_failed_checks = {}
datetime_failed_checks = {}
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
orig_df,
)
h.assert_equal_dispatch(
expected=expected_pos_args_0,
actual=call_0_pos_args,
msg="positional args unexpected in separate_passes_and_fails call in transform method",
)
def test_separate_passes_and_fails_called_all_checks(self, mocker):
"""Test raise exception is called by the transform method when categorical_columns and numerical_columns set
to None."""
x = InputChecker(
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
orig_df = df.copy(deep=True)
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "separate_passes_and_fails"
)
df, bad_df = x.transform(df, batch_mode=True)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.separate_passes_and_fails with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = x._transform_value_checker(df)
numerical_failed_checks = x._transform_numerical_checker(df)
datetime_failed_checks = x._transform_datetime_checker(df)
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
orig_df,
)
h.assert_equal_dispatch(
expected=expected_pos_args_0,
actual=call_0_pos_args,
msg="positional args unexpected in separate_passes_and_fails call in transform method",
)
class TestRaiseExceptionIfChecksFail(object):
"""Tests for InputChecker.raise_exception_if_checks_fail()."""
def test_arguments(self):
"""Test that raise_exception_if_checks_fail has expected arguments."""
h.test_function_arguments(
func=InputChecker.raise_exception_if_checks_fail,
expected_arguments=[
"self",
"type_failed_checks",
"null_failed_checks",
"value_failed_checks",
"numerical_failed_checks",
"datetime_failed_checks",
],
expected_default_values=None,
)
def test_no_failed_checks_before_transform(self):
"""Test validation_failed_checks is not present before transform"""
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "validation_failed_checks") is False
), "validation_failed_checks attribute present before transform"
def test_validation_failed_checks_saved(self):
"""Test raise_exception_if_checks_fail saves the validation results"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
df = x.transform(df)
assert (
hasattr(x, "validation_failed_checks") is True
), "validation_failed_checks attribute not present after transform"
assert isinstance(
x.validation_failed_checks, dict
), f"incorrect validation results type identified - expected: dict but got: {type(x.validation_failed_checks)}"
def test_correct_validation_failed_checks(self):
"""Test raise_exception_if_checks_fail saves and prints the correct error message"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
df = x.transform(df)
assert isinstance(
x.validation_failed_checks["Failed type checks"], dict
), f"incorrect type validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed type checks'])}"
assert isinstance(
x.validation_failed_checks["Failed null checks"], dict
), f"incorrect null validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed null checks'])}"
assert isinstance(
x.validation_failed_checks["Failed categorical checks"], dict
), f"incorrect categorical validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed categorical checks'])}"
assert isinstance(
x.validation_failed_checks["Failed numerical checks"], dict
), f"incorrect numerical validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed numerical checks'])}"
assert isinstance(
x.validation_failed_checks["Failed datetime checks"], dict
), f"incorrect datetime validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed datetime checks'])}"
assert isinstance(
x.validation_failed_checks["Exception message"], str
), f"incorrect exception message type identified - expected: str but got: {type(x.validation_failed_checks['Exception message'])}"
def test_input_checker_error_raised_type(self):
"""Test InputCheckerError is raised if type test fails"""
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
df.loc[5, "a"] = "a"
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_nulls(self):
"""Test InputCheckerError is raised if null test fails"""
x = InputChecker()
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
df.loc[5, "b"] = np.nan
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_categorical(self):
"""Test InputCheckerError is raised if categorical test fails"""
x = InputChecker(categorical_columns=["b"])
df = data_generators_p.create_df_2()
x.fit(df)
df.loc[5, "b"] = "u"
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_numerical(self):
"""Test InputCheckerError is raised if numerical test fails"""
x = InputChecker(numerical_columns=["a"])
df = data_generators_p.create_df_2()
x.fit(df)
df.loc[0, "a"] = -1
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_datetime(self):
"""Test InputCheckerError is raised if datetime test fails"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
outliers_1 = pd.to_datetime("15/09/2017")
outliers_2 = pd.to_datetime("13/09/2017")
df.loc[0, "d"] = outliers_1
df.loc[1, "d"] = outliers_2
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_validation_failed_checks_correctly_stores_fails(self):
"""Test correct data is saved in validation_failed_checks after a failed check exception"""
x = InputChecker()
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
df["b"] = df["b"].fillna("a")
x.fit(df)
df.loc[0, "a"] = -1
df.loc[4, "b"] = "u"
df.loc[5, "b"] = np.nan
df["c"] = [True, True, False, True, True, False, np.nan]
df["c"] = df["c"].astype("bool")
df.loc[0, "d"] = | pd.to_datetime("15/09/2017") | pandas.to_datetime |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_numpy_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
tm.assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError, match=msg):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
tm.assert_series_equal(result, expected)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
tm.assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
tm.assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert s.shape == rs.shape
assert rs is not s
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
tm.assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
tm.assert_series_equal(rs, expected)
def test_where_non_keyword_deprecation():
# GH 41485
s = Series(range(5))
msg = (
"In a future version of pandas all arguments of "
"Series.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.where(s > 1, 10, False)
expected = Series([10, 10, 2, 3, 4])
tm.assert_series_equal(expected, result)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where(1)
with pytest.raises(ValueError, match=msg):
s.where(cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
tm.assert_series_equal(s, expected)
# failures
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[[True, False]] = [0, 2, 3]
msg = (
"NumPy boolean array indexing assignment cannot assign 0 input "
"values to the 1 output values where the mask is true"
)
with pytest.raises(ValueError, match=msg):
s[[True, False]] = []
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
],
)
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
msg = (
lambda x: f"cannot set using a {x} indexer with a "
"different length than the value"
)
# slice
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:3] = list(range(27))
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
tm.assert_series_equal(s.astype(np.int64), expected)
# slice with step
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:4:2] = list(range(27))
s = Series(list("abcdef"))
s[0:4:2] = list(range(2))
expected = Series([0, "b", 1, "d", "e", "f"])
tm.assert_series_equal(s, expected)
# neg slices
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[:-1] = list(range(27))
s[-3:-1] = list(range(2))
expected = Series(["a", "b", "c", 0, 1, "f"])
tm.assert_series_equal(s, expected)
# list
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(27))
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(2))
# scalar
s = Series(list("abc"))
s[0] = list(range(10))
expected = Series([list(range(10)), "b", "c"])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("size", range(2, 6))
@pytest.mark.parametrize(
"mask", [[True, False, False, False, False], [True, False], [False]]
)
@pytest.mark.parametrize(
"item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min]
)
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize(
"box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
)
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series(
[item if use_item else data[i] for i, use_item in enumerate(selection)]
)
s = Series(data)
s[selection] = box(item)
tm.assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
tm.assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
tm.assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
tm.assert_series_equal(rs.dropna(), s[cond])
tm.assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = | Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2]) | pandas.Series |
import os
from os.path import join as pjoin
import re
import multiprocessing as mp
from multiprocessing import Pool
from Bio.Seq import Seq
from Bio import SeqIO, SeqFeature
from Bio.SeqRecord import SeqRecord
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import time
import sqlite3 as sql
from collections import defaultdict
import gc
import sys
from gtr_utils import change_ToWorkingDirectory, make_OutputDirectory, merge_ManyFiles, multiprocesssing_Submission, generate_AssemblyId, chunks
def blast_Seed(assembly_id, query_file, blast_db_path, blast_hits_path):
'''
'''
full_db_path = pjoin(blast_db_path, assembly_id)
full_hits_path = pjoin(blast_hits_path, assembly_id+'.csv')
os.system('blastp -query {} -db {} -max_target_seqs 100 -evalue 1e-6 -outfmt "10 qseqid sseqid mismatch positive gaps ppos pident qcovs evalue bitscore qframe sframe sstart send slen qstart qend qlen" -num_threads 1 -out {}'.format(query_file, full_db_path, full_hits_path))
## write seed region gene content to file ##
def test_RegionOverlap(x1,x2,y1,y2):
'''
Asks: is the largest of the smallest distances less than or equal to the smallest of the largest distances?
Returns True or False
'''
return( max(x1,y1) <= min(x2,y2) )
def filter_BlastHits(df, identity_cutoff, coverage_cutoff):
'''
Remove hits that do not meet identity and coverage cutoffs
'''
df = df[ (df['pident'] >= identity_cutoff ) & (df['qcovs'] >= coverage_cutoff) ]
return(df)
def extract_SeedRegions(assembly_id, upstream_search_length, downstream_search_length, identity_cutoff, coverage_cutoff, hits_cutoff):
'''
Check for overlaps in seed hits. When this occurs, pick the best hit for overlap.
Extract x basepairs upstream and downstream of a seed blast hit.
Write to sql database under table 'seed_regions'
'''
#--------- Troubleshooting --------#
# pd.set_option('display.max_columns', None)
# upstream_search_length, downstream_search_length, identity_cutoff, coverage_cutoff, hits_cutoff = 10000,10000,20,80, 1
# # UserInput_main_dir = '/projects/b1042/HartmannLab/alex/GeneGrouper_test/testbed/dataset1/test1'#'/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/syntenease_project/gtr/testbed/dataset1/test1'
# UserInput_main_dir = '/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/syntenease_project/gtr/testbed/dataset3/test1'
# UserInput_output_dir_name = pjoin(UserInput_main_dir,'pdua')
# os.chdir(UserInput_main_dir)
# conn = sql.connect(pjoin(UserInput_main_dir,'genomes.db')) # genome database holds all base info
# conn2 = sql.connect(pjoin(UserInput_output_dir_name,'seed_results.db')) # seed_results database holds all seed search specific info.
# assembly_id = 'GCF_009800085' #GCF_009800085_02009 dbscan_label 3 ,global_strand -1 has more
# assembly_id = 'GCF_000583895' #GCF_000583895_02788 dbscan_label 13 ,global_strand 1 has fewer
# assembly_id = 'GCF_001077835'
# assembly_id = 'GCF_000251025'
#--------- Troubleshooting --------#
try:
df_hits = pd.read_csv(pjoin('temp_blast_results',assembly_id+'.csv'),names=['qseqid','sseqid','mismatch', 'positive','gaps', 'ppos','pident','qcovs','evalue','bitscore','qframe','sframe','sstart', 'send', 'slen', 'qstart', 'qend', 'qlen'])
except:
return( | pd.DataFrame() | pandas.DataFrame |
import fiona
import numpy as np
import pandas as pd
from shapely.geometry import shape, MultiPolygon
from shapely import speedups
from matplotlib.collections import PatchCollection
from descartes import PolygonPatch
import pickle
if speedups.available:
speedups.enable()
# location of data
refDataCSV = 'data/EU-referendum-result-data.csv'
refDataNICSV = 'data/NIrefData.csv' # data collated from www.eoni.org.uk
# http://www.eoni.org.uk/getmedia/fc176a12-39ee-46b6-8587-68b5b948762d/EU-REFERENDUM-2016-NORTHERN-IRELAND-COUNT-
# TOTALS-DECLARATION
# http://www.eoni.org.uk/getmedia/c371ecda-c0b7-4914-83ca-50d38b5ca8ea/EU-REFERENDUM-2016-CONSTITUENCY-COUNT-TOTALS
# http://www.eoni.org.uk/getmedia/65f3947d-90bf-4b3b-aaf0-2e5fa760e706/EU-REFERENDUM-2016-CONSTITUENCY-TURNOUT_1
GBRshpfile = 'data/distorted/GBR/GBRremainPlusLeave.shp'
NIshpfile = 'data/distorted/NI/NIremainPlusLeave.shp' # NI shapefile from here:
# http://osni.spatial-ni.opendata.arcgis.com/datasets/563dc2ec3d9943428e3fe68966d40deb_3
GIBshpfile = 'data/GIB_adm/GIB_adm0.shp' # http://www.diva-gis.org
def makePolyLists(collection):
polyList = []
polyCount = []
for entry in collection:
if entry['geometry']['type'] == 'Polygon':
x = shape(entry['geometry'])
polyList.append(x)
polyCount.append(1)
if entry['geometry']['type'] == 'MultiPolygon':
x = shape(entry['geometry'])
polyCount.append(len(x))
for i, poly in enumerate(x):
polyList.append(x[i])
return polyList, polyCount
# convert and format csv data into dataframes
GBRdataDF = pd.read_csv(refDataCSV, sep=',', header=0, index_col=0) # create dataframe
GBRdataDF.drop(['Region_Code', 'ExpectedBallots', 'Votes_Cast', 'Valid_Votes', 'No_official_mark',
'Voting_for_both_answers', 'Writing_or_mark',
'Unmarked_or_void', 'Pct_Rejected'], axis=1, inplace=True) # drop unwanted bumf
GIBdataDF = GBRdataDF.loc[382] # Gibraltar data
NItotalDF = GBRdataDF.loc[381] # N.I. *as a whole* data
GBRdataDF.drop([381, 382], inplace=True) # removing N. Ireland and Gibraltar so is just GBR
NIdataDF = pd.read_csv(refDataNICSV, sep=',', header=0, index_col=0) # create dataframe
NIdataDF['Pct_Remain'] = (100.0 * NIdataDF.Remain)/(NIdataDF.Remain+NIdataDF.Leave)
NIdataDF['Pct_Leave'] = 100.0 - NIdataDF['Pct_Remain']
# create multipolygon objects from shapefiles
GBRcollection = fiona.open(GBRshpfile)
# GBRmp = MultiPolygon([shape(entry['geometry']) for entry in GBRcollection]) # create list of polygons from shapefile
GBRpolyList, GBRpolyCount = makePolyLists(GBRcollection)
GBRmp = MultiPolygon(GBRpolyList)
# map the referendum data to the areas in the shapefiles/multipolygon objects
areaProperties = []
for i, entry in enumerate(GBRcollection):
areaCode = entry['properties']['CODE']
turnOut = GBRdataDF[GBRdataDF['Area_Code'] == areaCode]['Pct_Turnout']
pctLeave = GBRdataDF[GBRdataDF['Area_Code'] == areaCode]['Pct_Leave']
pctRemain = GBRdataDF[GBRdataDF['Area_Code'] == areaCode]['Pct_Remain']
areaProperties.append([i, turnOut.values, pctLeave.values, pctRemain.values])
# referendum data reordered to match order of polygons
GBRpropertiesDF = | pd.DataFrame(areaProperties, columns=('id', 'Turnout', 'Leave', 'Remain')) | pandas.DataFrame |
# %%
"""
# sentiment-based product recommendation system:
Performed following tasks:
1.Data sourcing and sentiment analysis
2.Building a recommendation system
3.Recommending top 5 products
4.Deploying the end-to-end project with a user interface
"""
# %%
# Importing libraries
import numpy as np
import pandas as pd
import random
import pickle
import pylab
from numpy import *
import matplotlib.pyplot as plt
import seaborn as sns
import time
from wordcloud import WordCloud
from collections import Counter
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import re
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import RandomizedSearchCV
from imblearn.over_sampling import SMOTE
from collections import Counter
from sklearn.metrics import f1_score, classification_report,precision_score,recall_score,confusion_matrix, roc_auc_score, roc_curve
from sklearn.metrics.pairwise import pairwise_distances
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# %%
#df = pd.read_csv('/content/gdrive/MyDrive/sample30.csv')
df = pd.read_csv('sample30.csv')
# %%
"""
### Preprocessing:
Steps followed:
1. Handling null values
2. Preprocessing reviews text and visualization
"""
# %%
"""
#### 1. Handling null values:
Replaced NaN in reviews_title by empty space and merged reviews and reviews_title.
"""
# %%
df.isnull().sum()
# %%
# only one null value in target change it into 0
# change positive to 1 and negative to 0
df['user_sentiment']= df['user_sentiment'].apply(lambda x:1 if x=='Positive' else 0)
# %%
# Replace nulls
df['reviews_title'].fillna('',inplace=True)
# %%
# merge reviews columns
df['reviews']=df['reviews_text']+df['reviews_title']
df.drop(['reviews_text','reviews_title'],axis=1,inplace=True)
df.head()
# %%
# df_clean -> cleaned columns for recommendation and sentiment models
df_clean = df[['name','reviews_username','reviews','reviews_rating','user_sentiment']]
df_clean.head()
# %%
df_clean.dropna(inplace=True)
# %%
df_clean.info()
# %%
"""
#### 2. Text preprocessing:
Removed stops words after converting the text into lowercase.
"""
# %%
# function to convert text into lowercase, remove stopwords and special characters
def text_process(token):
tokens = word_tokenize(token)
words_lower = [word.lower() for word in tokens]
words_nostop = [word for word in words_lower if word not in stopwords.words('english')]
text = ' '.join(re.sub('[^a-zA-Z0-9]+', ' ', word) for word in words_nostop)
return text
# %%
# text preprocessing
df_clean['reviews'] = df_clean['reviews'].apply(lambda x:text_process(x))
# %%
"""
# Sentiment analysis:
To build sentiment analysis model, take reviews given by the users.
Steps followed:
1. Feature extraction using tf-idf
2. Handling imbalance
3. Build 3 ML models
"""
# %%
# dataframe for sentiment analysis
Review = df_clean[['name','reviews','user_sentiment']]
Review.head()
# %%
# splitting into test and train
X_train, X_test, y_train, y_test = train_test_split(Review['reviews'], Review['user_sentiment'],test_size=0.30, random_state=42)
# %%
X_train.shape
# %%
"""
#### 1. Feature extarction:
Used tf-idf vectorizer to extract features from text.
"""
# %%
# tf-idf
vectorizer= TfidfVectorizer(max_features=3000, lowercase=True, analyzer='word', stop_words= 'english')
tf_x_train = vectorizer.fit_transform(X_train).toarray()
tf_x_test = vectorizer.transform(X_test)
# %%
tf_x_train.shape
# %%
"""
#### 2. Handling imbalance:
Used SMOTE to handle class imbalance.
"""
# %%
# SMOTE
print('Before Sampling')
print(Counter(y_train))
sm = SMOTE(random_state=42)
X_train_sm ,y_train_sm = sm.fit_sample(tf_x_train,y_train)
print('After Sampling')
print(Counter(y_train_sm))
# %%
"""
#### 3. Model building:
"""
# %%
"""
#### Logistic Regression model:
"""
# %%
lr=LogisticRegression()
params={'C':[10, 1, 0.5, 0.1],'penalty':['l1','l2'],'class_weight':['balanced']}
# Create grid search using 4-fold cross validation
grid_search = GridSearchCV(lr, params, cv=4, scoring='roc_auc', n_jobs=-1)
grid_search.fit(X_train_sm, y_train_sm)
model_LR = grid_search.best_estimator_
model_LR.fit(X_train_sm, y_train_sm)
# %%
# Logitic model evalution
y_prob_test=model_LR.predict_proba(tf_x_test)
y_pred_test=model_LR.predict(tf_x_test)
print('Test Score:')
print('Confusion Matrix')
print('='*60)
print(confusion_matrix(y_test,y_pred_test),"\n")
print('Classification Report')
print('='*60)
print(classification_report(y_test,y_pred_test),"\n")
print('AUC-ROC=',roc_auc_score(y_test, y_prob_test[:,1]))
fpr_LR, tpr_LR, thresholds_LR = roc_curve(y_test, y_prob_test[:,1])
AUC_ROC_LR = roc_auc_score(y_test, y_prob_test[:,1])
# %%
# store tf-idf model
with open("tfidf_model.pkl", 'wb') as file:
pickle.dump(vectorizer, file)
# %%
# save logistic regression model
with open('LR_sentiment_model.pkl', 'wb') as file:
pickle.dump(model_LR, file)
# %%
# save logistic regression model
with open('df_sentiment_model.pkl', 'wb') as file:
pickle.dump(df_clean, file)
# %%
"""
# Recommendation system:
"""
# %%
"""
To build recommendation system taks user name , product name and review ratings.
"""
# %%
# create recommedation data frame
recomm_df = df_clean[['reviews_username','reviews_rating','name']]
recomm_df.head()
# %%
"""
**Create train and test set**
"""
# %%
# Test and Train split of the dataset
train, test = train_test_split(recomm_df, test_size=0.30, random_state=31)
# %%
# Pivot the train dataset into matrix format in which columns are products and the rows are user names.
df_pivot = train.pivot_table(
index='reviews_username',
columns='name',
values='reviews_rating'
).fillna(0)
df_pivot.head()
# %%
df_pivot.shape
# %%
"""
**Creating Dummy train and test**
In the process of building a recommendation system, we do not want to recommend a product that the user has already rated or in some cases has performed some action on it such as view, like, share or comment. To eliminate these products from the recommendation list, you will need to take the help of a ‘dummy data set’.
"""
# %%
# Copy the train dataset into dummy_train
dummy_train = train.copy()
# %%
# The movies not rated by user is marked as 1 for prediction.
dummy_train['reviews_rating'] = dummy_train['reviews_rating'].apply(lambda x: 0 if x>=1 else 1)
# %%
# Convert the dummy train dataset into matrix format.
dummy_train = dummy_train.pivot_table(
index='reviews_username',
columns='name',
values='reviews_rating'
).fillna(1)
dummy_train.head()
# %%
dummy_train.shape
# %%
"""
#### User Similarity Matrix:
"""
# %%
"""
### Using adjusted Cosine similarity:
Here, we are not removing the NaN values and calculating the mean only for the movies rated by the user
"""
# %%
df_pivot = train.pivot_table(
index='reviews_username',
columns='name',
values='reviews_rating'
)
df_pivot.head(3)
# %%
#Normalising the rating of the movie for each user around 0 mean
mean = np.nanmean(df_pivot, axis=1)
df_subtracted = (df_pivot.T-mean).T
df_subtracted.head()
# %%
"""
#### Find cosine similarity:
Used pairwise distance to find similarity.
"""
# %%
# Creating the User Similarity Matrix using pairwise_distance function.
user_correlation = 1 - pairwise_distances(df_subtracted.fillna(0), metric='cosine')
user_correlation[np.isnan(user_correlation)] = 0
print(user_correlation)
# %%
"""
#### Prediction:
"""
# %%
# Ignore the correlation for values less than 0.
user_correlation[user_correlation<0]=0
user_correlation
# %%
"""
Rating predicted by the user is the weighted sum of correlation with the product rating.
"""
# %%
user_predicted_ratings = np.dot(user_correlation, df_pivot.fillna(0))
user_predicted_ratings
# %%
user_predicted_ratings.shape
# %%
# user_final_rating -> this contains predicted ratings for products
user_final_rating = np.multiply(user_predicted_ratings,dummy_train)
user_final_rating.head()
# %%
"""
#### Find the top 5 recommendation for the *user*
"""
# %%
# Take the user ID as input [bob,00sab00]
#user_input = str(input("Enter your user name"))
user_input = str('00sab00') # for checking
# %%
# Recommended products for the selected user based on ratings
out_recommendation = user_final_rating.loc[user_input].sort_values(ascending=False)[:20]
out_recommendation
# %%
"""
#### Evaluation - User User
"""
# %%
# Find out the common users of test and train dataset.
common = test[test.reviews_username.isin(train.reviews_username)]
common.shape
# %%
# convert into the user-product matrix.
common_user_based_matrix = common.pivot_table(index='reviews_username', columns='name',
values='reviews_rating')
# %%
# Convert the user_correlation matrix into dataframe.
user_correlation_df = | pd.DataFrame(user_correlation) | pandas.DataFrame |
"""JFAが公開する試合情報を読み込んでCSV化
まずは、プリンス関東のデータを読み込む仕様として、今後パラメータ選択でいろいろなカテゴリを読みに行く作りに変更
年度指定もできるようにする。
"""
import re
import pandas as pd
import json
import requests
import argparse
from typing import Dict, Any
SCHEDULE_URL = 'URL'
CSV_FILENAME = 'CSV'
GROUP_NAMES = 'GROUP'
MATCHES_IN_SECTION = 'MATCHES_IN_SECTION'
COMPETITION_CONF = {
'Olympic': {
SCHEDULE_URL: 'https://www.jfa.jp/national_team/u24_2021/tokyo_olympic_2020/group{}/match/schedule.json',
CSV_FILENAME: '../docs/csv/2021_allmatch_result-Olympic_GS.csv',
GROUP_NAMES: ['A', 'B', 'C', 'D']
},
# 'ACL2021GL': {
# SCHEDULE_URL: 'http://www.jfa.jp/match/acl_2021/group{}/match/schedule.json',
# CSV_FILENAME: '../docs/csv/2021_allmatch_result-ACL_GL.csv',
# GROUP_NAMES: ['G', 'H', 'I', 'J']
# }, # A~Fのグループ情報が無い
'PrinceKanto': {
SCHEDULE_URL: 'https://www.jfa.jp/match_47fa/103_kanto/takamado_jfa_u18_prince2021/match/schedule.json',
CSV_FILENAME: '../docs/csv/2021_allmatch_result-PrinceKanto.csv',
GROUP_NAMES: ['']
},
'PrincePremierE': {
SCHEDULE_URL: 'https://www.jfa.jp/match/takamado_jfa_u18_premier2021/east/match/schedule.json',
CSV_FILENAME: '../docs/csv/2021_allmatch_result-PrincePremierE.csv',
GROUP_NAMES: ['']
},
'PrincePremierW': {
SCHEDULE_URL: 'https://www.jfa.jp/match/takamado_jfa_u18_premier2021/west/match/schedule.json',
CSV_FILENAME: '../docs/csv/2021_allmatch_result-PrincePremierW.csv',
GROUP_NAMES: ['']
},
'WC2022AFC_F': {
SCHEDULE_URL: 'https://www.jfa.jp/national_team/samuraiblue/worldcup2022/final_q/group{}/match/schedule.json',
CSV_FILENAME: '../docs/csv/allmatch_result-wc2022afc_final.csv',
GROUP_NAMES: ['A', 'B'],
MATCHES_IN_SECTION: 3
}
}
SCHEDULE_CONTAINER_NAME = 'matchScheduleList'
SCHEDULE_LIST_NAME = 'matchSchedule'
SECTION_NO = re.compile(r'.*(\d+).*')
REPLACE_KEY_DICT = {
'match_date': 'matchDateJpn',
'section_no': 'matchTypeName',
'start_time': 'matchTimeJpn',
'stadium': 'venue',
'home_team': 'homeTeamName',
'away_team': 'awayTeamName',
'status': 'matchStatus',
'matchNumber': 'matchNumber'
}
SCORE_DATA_KEY_LIST = {
'home_goal': 'homeScore',
'away_goal': 'awayScore',
'extraTime': 'exMatch'
}
def read_match_json(_url: str) -> Dict[str, Any]:
"""指定したURLの試合リスト情報をjfaのJSON形式で返す
"""
print(f'access {_url}...')
return json.loads(requests.get(_url).text)
def read_match_df(_url: str, matches_in_section: int=None) -> pd.DataFrame:
"""各グループの試合リスト情報を自分たちのDataFrame形式で返す
JFA形式のJSONは、1試合の情報が下記のような内容
{'matchTypeName': '第1節',
'matchNumber': '1', # どうやら、Competitionで通しの番号
'matchDate': '2021/07/22', # 未使用
'matchDateJpn': '2021/07/22',
'matchDateWeek': '木', # 未使用
'matchTime': '20:00', # 未使用
'matchTimeJpn': '20:00',
'venue': '東京スタジアム',
'venueFullName': '東京/東京スタジアム', # 未使用
'homeTeamName': '日本',
'homeTeamQualificationDescription': '', # 未使用
'awayTeamName': '南アフリカ',
'awayTeamQualificationDescription': '', # 未使用
'score': {
'homeWinFlag': False, # 未使用
'awayWinFlag': False, # 未使用
'homeScore': '',
'awayScore': '',
'homeTeamScore1st': '', # 未使用 前半得点
'awayTeamScore1st': '', # 未使用 前半得点
'homeTeamScore2nd': '', # 未使用 後半得点
'awayTeamScore2nd': '', # 未使用 後半得点
'exMatch': False,
'homeTeamScore1ex': '', # 未使用 延長前半得点
'awayTeamScore1ex': '', # 未使用 延長前半得点
'homeTeamScore2ex': '', # 未使用 延長後半得点
'awayTeamScore2ex': '', # 未使用 延長後半得点
'homePKScore': '', # 未使用 PK得点
'awayPKScore': '' # 未使用 PK得点
},
'scorer': {
'homeScorer': [], # 未使用
'awayScorer': [] # 未使用
},
'matchStatus': '',
'officialReportURL': '' # 未使用
}
"""
match_list = read_match_json(_url)[SCHEDULE_CONTAINER_NAME][SCHEDULE_LIST_NAME]
# print(match_list)
result_list = []
match_index_dict = {}
for (_count, _match_data) in enumerate(match_list):
_row = {}
for (target_key, org_key) in REPLACE_KEY_DICT.items():
_row[target_key] = _match_data[org_key]
for (target_key, org_key) in SCORE_DATA_KEY_LIST.items():
_row[target_key] = _match_data['score'][org_key]
_regexp_result = SECTION_NO.match(_row['section_no'])
if _regexp_result:
section_no = _regexp_result[1]
elif matches_in_section is not None: # 節数の記載が無く、節ごとの試合数が分かっている時は計算
section_no = int(_count / matches_in_section) + 1
else: # 節数不明
section_no = 0
_row['section_no'] = section_no
if section_no not in match_index_dict:
match_index_dict[section_no] = 1
else:
match_index_dict[section_no] += 1
_row['match_index_in_section'] = match_index_dict[section_no]
# U18高円宮杯プリンス関東リーグでの中止情報は、なぜか 'venueFullName' に入っていたので暫定対応
if '【中止】' in _match_data['venueFullName']:
print('Cancel Game## ' + _match_data['venueFullName'])
_row['status'] = '試合中止'
else:
print('No Cancel## ' + _match_data['venueFullName'])
result_list.append(_row)
return pd.DataFrame(result_list)
def read_group(competition: str) -> None:
"""指定された大会のグループ全体を読み込んでCSV化
"""
match_df = | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import collections
import pytest
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from pandas.compat import StringIO, u
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesToCSV(TestData):
def read_csv(self, path, **kwargs):
params = dict(squeeze=True, index_col=0,
header=None, parse_dates=True)
params.update(**kwargs)
header = params.get("header")
out = pd.read_csv(path, **params)
if header is None:
out.name = out.index.name = None
return out
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean() as path:
self.ts.to_csv(path)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = self.read_csv(path)
depr_ts = Series.from_csv(path)
assert_series_equal(depr_ts, ts)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = self.read_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
depr_ts = Series.from_csv(path)
assert_series_equal(depr_ts, ts)
# see gh-10483
self.ts.to_csv(path, header=True)
ts_h = self.read_csv(path, header=0)
assert ts_h.name == "ts"
self.series.to_csv(path)
series = self.read_csv(path)
| assert_series_equal(self.series, series, check_names=False) | pandas.util.testing.assert_series_equal |
import argparse
import math
import json
from tqdm import tqdm
from nltk.tag import pos_tag
import pandas as pd
import networkx as nx
import torch
import config
def get_relevant_tokens(word_count_path, threshold):
d = pd.read_csv(word_count_path, sep='\t', header=None, quotechar=None, quoting=3)
d.columns = ['token', 'count']
d = d.loc[d['count'] > threshold]
return d.token.tolist()
def prune_dt(input_dt_edges_path, relevant_tokens, output_dt_edges_path):
d = pd.read_csv(input_dt_edges_path, sep='\t', header=None, quotechar=None, quoting=3)
d.columns = ['word1', 'word2', 'weight']
d = d.loc[d['word1'].isin(relevant_tokens) & d['word2'].isin(relevant_tokens)]
d.to_csv(output_dt_edges_path, sep='\t', index=False, header=None, quotechar=None, quoting=3)
def update_POS_tags(input_DT_path, output_DT_path):
d = pd.read_csv(input_DT_path, sep='\t', header=None, quotechar=None, quoting=3)
d.columns = ['word1', 'word2', 'weight']
def replace_POS(e):
# https://cs.nyu.edu/grishman/jet/guide/PennPOS.html
d = {'NP': 'NNP', 'NPS': 'NNPS', 'PP': 'PRP', 'PP$': 'PRP$'}
word, pos = e.rsplit(config.DT_token_pos_delimiter, 1)
if(pos in d.keys()):
return f'{word}{config.DT_token_pos_delimiter}{d[pos]}'
else:
return f'{word}{config.DT_token_pos_delimiter}{pos}'
d.word1 = d.word1.apply(lambda x: replace_POS(x))
d.word2 = d.word2.apply(lambda x: replace_POS(x))
d.to_csv(output_DT_path, sep='\t', index=False, header=None, quotechar=None, quoting=3)
def load_DT(DT_edges_path=config.prune_DT_edges_path):
df = pd.read_csv(DT_edges_path, header=None, sep='\t', quotechar=None, quoting=3)
df.columns = ['word1', 'word2', 'weight']
G = nx.from_pandas_edgelist(df, 'word1', 'word2', 'weight')
print('Loaded the DT networkx graph')
return G
def edge_weight_u_v(DT_G, node1, node2):
try:
# ensure that shortest path over self-loops are not computed
shortest_path_length = nx.algorithms.shortest_paths.generic.shortest_path_length(G=DT_G, source=node1, target=node2, weight=None)
score = math.exp((-1) * (config.path_lambda) * (shortest_path_length - 1))
path_exists = True
except nx.exception.NodeNotFound:
score = -1
path_exists = False
except nx.exception.NetworkXNoPath:
score = -1
path_exists = False
return path_exists, score
def setup_graph_edges(DT_G, sentence):
space_tokenized_sentence = sentence.split()
if(config.is_en):
pos_tagged_space_tokenized_sentence = [token + config.DT_token_pos_delimiter + tag for (token, tag) in pos_tag(space_tokenized_sentence)]
else:
# no POS tagger used in the non english DT
pos_tagged_space_tokenized_sentence = space_tokenized_sentence
assert(len(pos_tagged_space_tokenized_sentence) == len(space_tokenized_sentence))
# to ensure that every graph has edges - setup the mandatory self-loops
_edge_index = [[i, i] for i in range(len(space_tokenized_sentence))]
_edge_attr = [[1] for _ in _edge_index]
for i in range(len(space_tokenized_sentence)):
for j in range(i+1, len(space_tokenized_sentence)):
assert(i != j)
path_exists, edge_weight = edge_weight_u_v(DT_G, pos_tagged_space_tokenized_sentence[i], pos_tagged_space_tokenized_sentence[j])
if(path_exists):
_edge_index.append([i, j])
_edge_attr.append([edge_weight])
_edge_index.append([j, i])
_edge_attr.append([edge_weight])
edge_index = torch.LongTensor(_edge_index).to(config.device)
edge_index = torch.transpose(edge_index, 0, 1)
# shape(edge_index) = [2, num_edges]
edge_attr = torch.FloatTensor(_edge_attr).to(config.device)
# shape(edge_attr) = [num_edges, 1]
return edge_index, edge_attr
def get_sentences_encoded_dict(tokenizer, sentences, max_length):
assert(len(sentences) == 1 or len(sentences) == 2)
if(len(sentences) == 1):
encoded_dict = tokenizer.encode_plus(sentences[0], add_special_tokens=True, max_length=max_length, truncation=True, padding='max_length', return_attention_mask=True, return_tensors='pt')
elif(len(sentences) == 2):
encoded_dict = tokenizer.encode_plus(sentences[0], sentences[1], add_special_tokens=True, max_length=max_length, truncation=True, padding='max_length', return_attention_mask=True, return_tensors='pt')
input_ids = encoded_dict['input_ids'][0].to(config.device)
if(config.lm_model_name.startswith('roberta') or config.lm_model_name.startswith('xlm-roberta')):
token_type_ids = torch.zeros_like(input_ids)
else:
token_type_ids = encoded_dict['token_type_ids'][0].to(config.device)
attention_mask = encoded_dict['attention_mask'][0].to(config.device)
return input_ids, token_type_ids, attention_mask
def get_label_embedding(label, label_dict):
assert(label in label_dict)
vec = torch.zeros(len(label_dict), dtype=torch.float, device=config.device)
vec[label_dict[label]] = 1
vec = torch.unsqueeze(vec, 0)
# shape(vec) = [1, len(label_dict)]
return vec
def get_score_embedding(score):
vec = torch.tensor([score], dtype=torch.float).unsqueeze(0).to(config.device)
# shape(vec) = [1, 1]
return vec
def get_WiC_data_frame(WiC_data_path, WiC_gold_path):
df_data = | pd.read_csv(WiC_data_path, header=None, sep='\t') | pandas.read_csv |
"""
Obtains category distributions for included and excluded patients.
"""
from click import *
from logging import *
import pandas as pd
@command()
@option("--all-input", required=True, help="the CSV file to read all diagnoses from")
@option(
"--included-input",
required=True,
help="the CSV file to read diagnoses for included patients from",
)
@option("--output", required=True, help="the CSV file to write counts to")
def main(all_input, included_input, output):
basicConfig(level=DEBUG)
# Load data.
info("Loading all diagnoses")
X_all = pd.read_csv(all_input, index_col="subject_id").query(
"diagnosis != 'Withdrawn'"
)
debug(f"Result: {X_all.shape}")
info("Loading diagnoses for included patients")
X_included = | pd.read_csv(included_input, index_col="subject_id") | pandas.read_csv |
import codecs
import json
import os
from collections import OrderedDict
import numpy as np
import pandas as pd
import pyshac.config.hyperparameters as hp
# compatible with both Python 2 and 3
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def flatten_parameters(params):
"""
Takes an OrderedDict or a list of lists, and flattens it into a
list containing the items.
# Arguments:
params (OrderedDict | list of lists): The parameters that were
provided by the engine, either as an OrderedDict or a list
of list representation.
# Returns:
a flattened python list containing just the sampled values.
"""
if isinstance(params, OrderedDict):
params = list(params.values())
params = [item
for sublist in params
for item in sublist]
return params
class Dataset(object):
"""Dataset manager for the engines.
Holds the samples and their associated evaluated values in a format
that can be serialized / restored as well as encoder / decoded for
training.
# Arguments:
parameter_list (hp.HyperParameterList | list | None): A python list
of Hyper Parameters, or a HyperParameterList that has been built.
Can also be None, if the parameters are to be assigned later.
basedir (str): The base directory where the data of the engine
will be stored.
"""
def __init__(self, parameter_list=None, basedir='shac'):
if not isinstance(parameter_list, hp.HyperParameterList):
if type(parameter_list) == list or type(parameter_list) == tuple:
parameter_list = hp.HyperParameterList(parameter_list)
self._parameters = parameter_list
self.X = []
self.Y = []
self.size = 0
self.basedir = basedir
self._prepare_dir()
def add_sample(self, parameters, value):
"""
Adds a single row of data to the dataset.
Each row contains the hyper parameter configuration as well as its associated
evaluation measure.
# Arguments:
parameters (list): A list of hyper parameters that have been sampled
value (float): The evaluation measure for the above sample.
"""
self.X.append(parameters)
self.Y.append(value)
self.size += 1
def clear(self):
"""
Removes all the data of the dataset.
"""
self.X = []
self.Y = []
self.size = 0
def encode_dataset(self, X=None, Y=None, objective='max'):
"""
Encode the entire dataset such that discrete hyper parameters are mapped
to integer indices and continuous valued hyper paramters are left alone.
# Arguments
X (list | np.ndarray | None): The input list of samples. Can be None,
in which case it defaults to the internal samples.
Y (list | np.ndarray | None): The input list of evaluation measures.
Can be None, in which case it defaults to the internal evaluation
values.
objective (str): Whether to maximize or minimize the
value of the labels.
# Raises:
ValueError: If `objective` is not in [`max`, `min`]
# Returns:
A tuple of numpy arrays (np.ndarray, np.ndarray)
"""
if objective not in ['max', 'min']:
raise ValueError("Objective must be in `max` or `min`")
if X is None:
X = self.X
if Y is None:
Y = self.Y
encoded_X = []
for x in X:
ex = self._parameters.encode(x)
encoded_X.append(ex)
encoded_X = np.array(encoded_X)
y = np.array(Y)
median = np.median(y)
encoded_Y = np.sign(y - median)
if objective == 'max':
encoded_Y = np.where(encoded_Y <= 0., 0.0, 1.0)
else:
encoded_Y = np.where(encoded_Y >= 0., 0.0, 1.0)
return encoded_X, encoded_Y
def decode_dataset(self, X=None):
"""
Decode the input samples such that discrete hyper parameters are mapped
to their original values and continuous valued hyper paramters are left alone.
# Arguments:
X (np.ndarray | None): The input list of encoded samples. Can be None,
in which case it defaults to the internal samples, which are encoded
and then decoded.
# Returns:
np.ndarray
"""
if X is None:
X, _ = self.encode_dataset(self.X)
decoded_X = []
for x in X:
dx = self._parameters.decode(x)
decoded_X.append(dx)
decoded_X = np.array(decoded_X, dtype=np.object)
return decoded_X
def save_dataset(self):
"""
Serializes the entire dataset into a CSV file saved at the path
provide by `data_path`. Also saves the parameters (list of hyper parameters).
# Raises:
ValueError: If trying to save a dataset when its parameters have not been
set.
"""
if self._parameters is None:
raise ValueError("Cannot save a dataset whose parameters have not been set !")
print("Serializing dataset...")
x, y = self.get_dataset()
name_list = self._parameters.get_parameter_names() + ['scores']
y = y.reshape((-1, 1))
dataset = np.concatenate((x, y), axis=-1)
# serialize the data
df = pd.DataFrame(dataset, columns=name_list)
df.to_csv(self.data_path, encoding='utf-8', index=True, index_label='id')
# serialize the parameters
param_config = self._parameters.get_config()
with codecs.open(self.parameter_path, 'w', encoding='utf-8') as f:
json.dump(param_config, f, indent=4)
print("Serialization of dataset done !")
def restore_dataset(self):
"""
Restores the entire dataset from a CSV file saved at the path provided by
`data_path`. Also loads the parameters (list of hyperparameters).
# Raises:
FileNotFoundError: If the dataset is not at the provided path.
"""
print("Deserializing dataset...")
df = | pd.read_csv(self.data_path, header=0, encoding='utf-8') | pandas.read_csv |
# Copyright © 2019 <NAME>
"""
Tests for the variable/column cleaning with variable dropping on equality.
"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...row_filter import RowFilter
class CleanDropIfEqualTests(unittest.TestCase):
"""
Tests for the ``preprocess._clean_variables`` module dropping rows based on "=="
"""
@staticmethod
def test_drop_if_equal_1():
"""
Test that no rows are dropped if equality conditions are not met.
"""
_table_1 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [2.0, 3.0, 4.0]})
_cleanings = [{"operator": "drop_if_equal", "columns": ["a"], "value": 5.0}]
_rf = RowFilter(_table_1)
_rf.filter(_cleanings)
assert_frame_equal(_table_1, _rf.frame)
@staticmethod
def test_drop_if_equal_2():
"""
Test that a single row is dropped
"""
_table_2 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [2.0, 3.0, 4.0]})
_cleanings = [{"operator": "drop_if_equal", "columns": ["a"], "value": 1.0}]
_expected = DataFrame({"a": [2.0, 3.0], "b": [3.0, 4.0]})
_rf = RowFilter(_table_2)
_rf.filter(_cleanings)
# TODO: There might be a bug in how pandas checks indexes, this is a hack:
_expected.index = _rf.frame.index
assert_frame_equal(_expected, _rf.frame)
@staticmethod
def test_drop_if_equal_3():
"""
Test that no rows are dropped even if conditions would be met in a different row.
"""
_table_3 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [2.0, 3.0, 4.0]})
_cleanings = [{"operator": "drop_if_equal", "columns": ["b"], "value": 1.0}]
_rf = RowFilter(_table_3)
_rf.filter(_cleanings)
assert_frame_equal(_table_3, _rf.frame)
@staticmethod
def test_drop_if_equal_4():
"""
Test that a single row is dropped when data type is a string.
"""
_table_4 = | DataFrame({"a": ["A", "B"], "b": ["C", "D"], "c": ["E", "F"]}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import argparse
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
from dde.data import get_db_mols, str_to_mol
from dde.predictor import Predictor
def parse_command_line_arguments():
"""
Parse the command-line arguments being passed to RMG Py. This uses the
:mod:`argparse` module, which ensures that the command-line arguments are
sensible, parses them, and returns them.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', metavar='FILE',
help='A file specifying which datasets to test on. Alternatively, a space-separated .csv file'
' with InChI/SMILES and output(s) in the first and subsequent columns, respectively.')
parser.add_argument('-o', '--out_file', metavar='FILE',
help='If specified, write results for each molecule to this file')
parser.add_argument('-i', '--input', metavar='FILE',
help='Path to predictor input file')
parser.add_argument('-w', '--weights', metavar='H5',
help='Path to model weights')
parser.add_argument('-a', '--architecture', metavar='JSON',
help='Path to model architecture (necessary if using uncertainty)')
parser.add_argument('-ms', '--mean_and_std', metavar='NPZ',
help='Saved mean and standard deviation. '
'Should be loaded alongside weights if output was normalized during training')
return parser.parse_args()
################################################################################
def read_datasets_file(datasets_file_path):
"""
This method specify which datasets to use for validation
"""
datasets = []
with open(datasets_file_path, 'r') as f_in:
for line in f_in:
line = line.strip()
if line and not line.startswith('#'):
host, db, table = [token.strip() for token in line.split('.')]
datasets.append((host, db, table))
return datasets
def prepare_data(host, db_name, collection_name, prediction_task="Hf298(kcal/mol)"):
# load validation data
db_mols = get_db_mols(host, db_name, collection_name)
smiles_list = []
ys = []
# decide what predict task is
if prediction_task not in ["Hf298(kcal/mol)", "S298(cal/mol/K)", "Cp(cal/mol/K)"]:
raise NotImplementedError("Prediction task: {0} not supported yet!".format(prediction_task))
for i, db_mol in enumerate(db_mols):
smiles = str(db_mol["SMILES_input"])
if prediction_task != "Cp(cal/mol/K)":
y = float(db_mol[prediction_task])
else:
Cp300 = float(db_mol["Cp300(cal/mol/K)"])
Cp400 = float(db_mol["Cp400(cal/mol/K)"])
Cp500 = float(db_mol["Cp500(cal/mol/K)"])
Cp600 = float(db_mol["Cp600(cal/mol/K)"])
Cp800 = float(db_mol["Cp800(cal/mol/K)"])
Cp1000 = float(db_mol["Cp1000(cal/mol/K)"])
Cp1500 = float(db_mol["Cp1500(cal/mol/K)"])
y = np.array([Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500])
smiles_list.append(smiles)
ys.append(y)
return smiles_list, ys
def prepare_predictor(input_file, weights_file=None, model_file=None, mean_and_std_file=None):
predictor = Predictor()
predictor.load_input(input_file)
if model_file is not None:
predictor.load_architecture(model_file)
predictor.load_parameters(param_path=weights_file, mean_and_std_path=mean_and_std_file)
return predictor
def make_predictions(predictor, id_list, uncertainty=False):
results = []
for ident in tqdm(id_list):
mol = str_to_mol(ident)
result = predictor.predict(mol, sigma=uncertainty)
results.append(result)
return results
def evaluate(id_list, ys, results, prediction_task="Hf298(kcal/mol)", uncertainty=False):
result_df = pd.DataFrame(index=id_list)
result_df[prediction_task+"_true"] = pd.Series(ys, index=result_df.index)
if uncertainty:
ys_pred, uncertainties = zip(*results)
result_df[prediction_task+"_uncertainty"] = pd.Series(uncertainties, index=result_df.index)
else:
ys_pred = results
result_df[prediction_task+"_pred"] = pd.Series(ys_pred, index=result_df.index)
diff = abs(result_df[prediction_task+"_true"]-result_df[prediction_task+"_pred"])
sqe = diff ** 2.0
# if the prediction task is Cp
# since it has 7 values
# we'll average them for evaluation
if prediction_task == 'Cp(cal/mol/K)':
diff = [np.average(d) for d in diff]
sqe = [np.average(s) for s in sqe]
if uncertainty:
us = result_df[prediction_task+"_uncertainty"]
aveu = [np.average(u) for u in us]
result_df[prediction_task+"_uncertainties"] = us
result_df[prediction_task+"_uncertainty"] = pd.Series(aveu, index=result_df.index)
result_df[prediction_task+"_diff"] = | pd.Series(diff, index=result_df.index) | pandas.Series |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = pd.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name="xxx"))
res = pd.to_numeric(pd.Series(idx, name="xxx"))
tm.assert_series_equal(res, pd.Series(idx.asi8, name="xxx"))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# TODO: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with pytest.raises(TypeError, match="Invalid object type"):
pd.to_numeric(s)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_downcast_basic(self, data):
# see gh-13352
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
# Basic function tests.
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
# Basic dtype support.
smallest_uint_dtype = np.dtype(np.typecodes["UnsignedInteger"][0])
# Support below np.float32 is rare and far between.
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast="float")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_signed_downcast(self, data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data(self):
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors="ignore",
downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned(self):
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize("data,expected", [
(["1.1", 2, 3],
np.array([1.1, 2, 3], dtype=np.float64)),
([10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
])
def test_ignore_downcast_cannot_convert_float(
self, data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast,expected_dtype", [
("integer", np.int16),
("signed", np.int16),
("unsigned", np.uint16)
])
def test_downcast_not8bit(self, downcast, expected_dtype):
# the smallest integer dtype need not be np.(u)int8
data = ["256", 257, 258]
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("dtype,downcast,min_max", [
("int8", "integer", [iinfo(np.int8).min,
iinfo(np.int8).max]),
("int16", "integer", [iinfo(np.int16).min,
iinfo(np.int16).max]),
('int32', "integer", [iinfo(np.int32).min,
iinfo(np.int32).max]),
('int64', "integer", [iinfo(np.int64).min,
iinfo(np.int64).max]),
('uint8', "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max]),
('uint16', "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max]),
('uint32', "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max]),
('uint64', "unsigned", [iinfo(np.uint64).min,
iinfo(np.uint64).max]),
('int16', "integer", [iinfo(np.int8).min,
iinfo(np.int8).max + 1]),
('int32', "integer", [iinfo(np.int16).min,
iinfo(np.int16).max + 1]),
('int64', "integer", [iinfo(np.int32).min,
iinfo(np.int32).max + 1]),
('int16', "integer", [iinfo(np.int8).min - 1,
iinfo(np.int16).max]),
('int32', "integer", [iinfo(np.int16).min - 1,
iinfo(np.int32).max]),
('int64', "integer", [iinfo(np.int32).min - 1,
iinfo(np.int64).max]),
('uint16', "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max + 1]),
('uint32', "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max + 1]),
('uint64', "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max + 1])
])
def test_downcast_limits(self, dtype, downcast, min_max):
# see gh-14404: test the limits of each downcast.
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
assert series.dtype == dtype
def test_coerce_uint64_conflict(self):
# see gh-17007 and gh-17125
#
# Still returns float despite the uint64-nan conflict,
# which would normally force the casting to object.
df = pd.DataFrame({"a": [200, 300, "", "NaN", 30000000000000000000]})
expected = pd.Series([200, 300, np.nan, np.nan,
30000000000000000000], dtype=float, name="a")
result = to_numeric(df["a"], errors="coerce")
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import pyautogui
from pathlib import Path
import time
import pandas as pd
from selenium.webdriver.common.keys import Keys
class ExcelManager:
SCORES_LIST = [750,500,250,100,100,100,100,100,100,100,100,100]
#define the global variable "Scores List"
def __init__(self,df,leaderboard_df,link=None,booster=1):
self.df = df
self.leaderboard_df = leaderboard_df
self.booster = booster
# self.answer = answer
# self.link = link
@classmethod
def read_excel_file(cls,path,path2):
"""
read_excel_file(cls,path)
Description:
Prints out the values from every cell within the rowstart,rowend,colstart,colend parameters, from a workbook that can be found via path. This also serves as a constructor for the class ExcelManager
Parameters:
Path(str):
path to the workbook
rowstart,rowend(int):
indicate what rows to read from
colstart,colend(int):
indicate what columns to read from
"""
# wb = load_workbook(path)
# ws = wb.active
# for row in range(rowstart,rowend):
# for column in range(colstart,colend):
# col = get_column_letter(column)
# print(ws["{}{}".format(col,row)].value)
# print("=====================================")
df = | pd.read_excel(path) | pandas.read_excel |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
| tm.assert_series_equal(result, expected, check_index_type=True) | pandas.util.testing.assert_series_equal |
"""This module contains tests for recoding"""
from unittest import TestCase
import datetime
import pandas as pd
from kernel.recoding import recode, recode_dates, recode_ordinal, recode_nominal, recode_range
from kernel.util import reduce_string
class TestStringGeneralization(TestCase):
"""Class containing tests for string generalization"""
def test_generalization(self):
postcode = 'NE9 5YE'
generalized = reduce_string(postcode)
self.assertNotEqual(postcode, generalized)
def test_single_step_generalization(self):
postcode_1 = 'HP2 7PW'
postcode_2 = 'HP2 7PF'
generalized_1 = reduce_string(postcode_1)
generalized_2 = reduce_string(postcode_2)
self.assertNotEqual(postcode_1, postcode_2)
self.assertEqual(generalized_1, generalized_2)
def test_multistep_generalization(self):
postcode_1 = 'HP2 7PW'
postcode_2 = 'HP2 4DY'
number_of_generalization_steps = 0
while(postcode_1 != postcode_2):
if (len(postcode_1) > len(postcode_2)):
postcode_1 = reduce_string(postcode_1)
else:
postcode_2 = reduce_string(postcode_2)
number_of_generalization_steps = number_of_generalization_steps + 1
self.assertEqual(postcode_1, postcode_2)
self.assertEqual(number_of_generalization_steps, 6)
def test_total_generalization(self):
postcode_1 = 'HP2 7PW'
postcode_2 = 'CF470JD'
number_of_generalization_steps = 0
while(postcode_1 != postcode_2):
if (len(postcode_1) > len(postcode_2)):
postcode_1 = reduce_string(postcode_1)
else:
postcode_2 = reduce_string(postcode_2)
number_of_generalization_steps = number_of_generalization_steps + 1
self.assertEqual(postcode_1, postcode_2)
self.assertEqual(number_of_generalization_steps, 14)
self.assertEqual(postcode_1, '*')
class TestRangeGeneralization(TestCase):
"""Class containing tests for range generalization"""
def test_range_of_ints_generalization(self):
numbers = [2, 5, 27, 12, 3]
generalized = recode_range(pd.Series(numbers))
self.assertIsInstance(generalized, range)
self.assertEqual(generalized, range(2, 28))
def test_range_of_floats_generalization(self):
numbers = [8.7, 4.12, 27.3, 18]
generalized = recode_range(pd.Series(numbers))
self.assertIsInstance(generalized, range)
self.assertEqual(generalized, range(4, 29))
class TestDateGeneralization(TestCase):
"""Class containing tests for date generalization"""
def test_time_generalization(self):
date_1 = datetime.datetime(2020, 9, 28, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized, datetime.datetime(2020, 9, 28))
def test_day_generalization(self):
date_1 = datetime.datetime(2020, 9, 27, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized.to_timestamp(), datetime.datetime(2020, 9, 1))
def test_month_generalization(self):
date_1 = datetime.datetime(2020, 10, 27, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized.to_timestamp(), datetime.datetime(2020, 1, 1))
def test_year_generalization(self):
date_1 = datetime.datetime(2021, 10, 27, 12, 32, 00)
date_2 = datetime.datetime(2020, 9, 28, 15, 27, 48)
series = pd.Series([date_1, date_2])
generalized = recode_dates(series)
self.assertEqual(generalized, range(2020, 2022))
class TestOrdinalGeneralization(TestCase):
"""Class containing tests for ordinal generalization"""
def test_ordinal_generalization_raises_exception(self):
categories = ['A', 'B', 'C']
values = ['A', 'A', 'A']
series = pd.Series( | pd.Categorical(values, categories, ordered=False) | pandas.Categorical |
# ## Experimental coinbase api
#
import cbpro
public_client = cbpro.PublicClient()
# How to get trading pairs
import pandas as pd
# ## Ticker symbols
products = public_client.get_products()
pd.DataFrame(products)
# ## Order book
# Best current order: each entry is in `price`, `size`, `num-orders`
best = public_client.get_product_order_book('BTC-USD')
pd.DataFrame(best)
# Top 50
best50 = public_client.get_product_order_book('BTC-USD', level=2)
pd.DataFrame(best50).head()
btc_tick = public_client.get_product_ticker('BTC-USD')
pd.Series(btc_tick)
# ## Historic rates
btc_hist = public_client.get_product_historic_rates('BTC-USD')
df = pd.DataFrame(btc_hist, columns = ['time', 'low', 'high', 'open', 'close', 'volume']).set_index('time')
df.index = | pd.to_datetime(df.index, unit='s') | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pyparsing
from mpl_toolkits import mplot3d
def linegraph(type, dims, component_names, colors_list):
'''
Plot the explained variance across dimensions using the model results
@params:
f_name: Name of the file
type: Type of reduction method, pca or autoencoders
dims: Number of dimensions
component_names: Gas/particulate list
colors_list: List of colors to be used
'''
num_of_comp = list(range(2,dims+1)) # TODO: delete this later
num_of_dims = list(range(2, dims+1))
# Plot results
plt.figure(figsize=(12,10))
for i, component in enumerate(component_names):
# AE or PCA
if type == 'ae':
#file_name = f'{f_name}{component}_metrics.csv'
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/ae/{component}_metrics.csv'
plt_title = 'Autoencoder Reduced Representation of Air Pollutants'
elif type == 'pca':
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/pca/{component}_metrics'
plt_title = 'PCA Reduced Representation of Air Pollutants'
else:
print('Type must be "ae" or "pca"')
quit()
# Read in model results
model = pd.read_csv(filepath_or_buffer=file_name)
variance = model['variance']
r2 = model['r2']
# sns
ax = sns.lineplot(x=num_of_dims, y=variance[:dims-1], linewidth=1, label=f'{component}', color=colors_list[i])
sns.lineplot(x=num_of_dims, y=r2[:dims-1], linewidth=5, linestyle = '-.', marker = 'H', label=f'{component}', color=colors_list[i])
sns.despine()
plt.rcParams.update({'font.size': 22})
plt.tick_params(axis='both', which='major', labelsize=28)
ax.set_xlabel('Dimension', fontsize='large')
ax.set_ylabel('% Explained Variance', fontsize='large')
plt.title(plt_title)
#plt.plot(num_of_comp, variance[:dims-1], label = '{}'.format(component), linestyle = '-', marker = '+', color = colors_list[i])
#plt.plot(num_of_comp, r2[:dims-1], linestyle = '-.', marker = 'H', color = colors_list[i])
#plt.rcParams.update({'font.size': 22})
#plt.tick_params(axis='both', which='major', labelsize=28)
#plt.xlabel('Dimension')
#plt.ylabel('% Explained Variance')
#plt.xlabel('')
#plt.ylabel('')
#plt.title(plt_title)
plt.ylim([0,1])
plt.legend()
plt.show()
def metrics_comparison(type, dims, component_names, colors_list):
'''
Plot the explained variance across dimensions using the model results
@params:
type: Type of reduction method, pca or autoencoders
dims: Number of dimensions
component_names: Gas/particulate list
colors_list: List of colors to be used
'''
num_of_comp = list(range(2,dims+1)) # TODO: delete this later
num_of_dims = list(range(1, dims+1))
# Plot results
plt.figure(figsize=(12,10))
for i, component in enumerate(component_names):
# AE or PCA
if type == 'ae':
high_file = f'/home/nick/github_repos/Pollution-Autoencoders/data/modle_metrics/ae/best_worst/{component}_best_metrics.csv'
low_file = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/ae/best_worst/{component}_worst_metrics.csv'
plt_title = 'Autoencoder Reduced Representation of Air Pollutants'
elif type == 'pca':
high_file = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/pca/best_worst/{component}_best_metrics.csv'
low_file = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/pca/best_worst/{component}_worst_metrics.csv'
plt_title = 'PCA Reduced Representation of Air Pollutants'
else:
print('Type must be "ae" or "pca"')
quit()
# Read in model results
best = pd.read_csv(filepath_or_buffer=high_file)
worst = pd.read_csv(filepath_or_buffer=low_file)
# Best and worst scores read in
best_variance = best['variance']
best_r2 = best['r2']
worst_variance = worst['variance']
worst_r2 = worst['r2']
# Plots
plt.plot(num_of_comp, best_variance[:dims-1], label = f'{component} high variance', linestyle = '-', marker = '+', color = 'green')
plt.plot(num_of_comp, best_r2[:dims-1], linestyle = '-.', marker = 'H', color = 'green')
plt.plot(num_of_comp, worst_variance[:dims-1], label = f'{component} low variance', linestyle = '-', marker = '+', color = 'red')
plt.plot(num_of_comp, worst_r2[:dims-1], linestyle = '-.', marker = 'H', color = 'red')
plt.rcParams.update({'font.size': 22})
plt.tick_params(axis='both', which='major', labelsize=28)
plt.xlabel('Dimension')
plt.ylabel('% Explained Variance')
plt.title('High-low variance scores')
plt.ylim([0,1])
plt.legend()
plt.show()
def heatmap(component):
# Read in component data and create an annotation list of cities
data = pd.read_csv('/home/nicks/github_repos/Pollution-Autoencoders/data/data_clean/{}_data_clean.csv'.format(component))
df = pd.DataFrame(data=data)
annotations = df['city']
# Read in whitelist of cities to graph
wlist_data = pd.read_csv(filepath_or_buffer='/home/nick/github_repos/Pollution-Autoencoders/data/other/outliers_whitelist.csv')
wlist = pd.DataFrame(data=wlist_data[:10])
# Remove lat/lon
df.drop(['lat', 'lon'], 1, inplace=True)
# Time series data, city labels, and values list
ts_list = []
cities_list = []
val_list = []
total = 0
# Loop through and find indexes of each city in data
for i in range(len(annotations)):
for w in range(len(wlist)):
# Check if current city is in the whitelist
if annotations.iloc[i] == wlist.iloc[w][0]:
# Average for seven days
for c in range(1, len(df.columns)):
total+=df.iloc[w][c]
if (c%7 == 0):
weekly = round(total/7, 5)
ts_list.append(weekly)
total = 0
# Update value and city name
val_list.append(ts_list)
cities_list.append(annotations.iloc[i])
# Clear list for next iteration
ts_list = []
# Plot figure
plt.rcParams.update({'font.size': 18})
fig, ax = plt.subplots(figsize=(15,14))
heat = ax.imshow(val_list, cmap='plasma')
# Set axis width
ax.set_xticks(np.arange(0, 27, 2)) # 27 weeks
ax.set_yticks(np.arange(len(cities_list)))
ax.set_yticklabels(cities_list)
fig.colorbar(heat)
plt.show()
def correlation(X_data_file, Y_data_file, component):
SIZE = 190
# Read in normalized data
norm_data = | pd.read_csv(filepath_or_buffer=X_data_file) | pandas.read_csv |
"""
生成微信视频号比赛训练集/测试集的tfrecord文件
训练集: date_ = 8-13(生成特征需要开7天的窗口)
测试集: date_ = 14
特征:
user侧:
userid: 用户id
u_read_comment_7d_sum: 近7天查看评论次数
u_like_7d_sum: 近7天点赞次数
u_click_avatar_7d_sum: 近7天点击头像次数
u_favorite_7d_sum: 近7天收藏次数
u_forward_7d_sum: 近7天转发次数
u_comment_7d_sum: 近7天评论次数
u_follow_7d_sum: 近7天关注次数
his_read_comment_7d_seq: 近7天查看评论序列, 最长50个
device: 设备类型
item侧:
feedid: feedid
i_read_comment_7d_sum: 近7天被查看评论次数
i_like_7d_sum: 近7天被点赞次数
i_click_avatar_7d_sum: 近7天被点击头像次数
i_favorite_7d_sum: 近7天被收藏次数
i_forward_7d_sum: 近7天被转发次数
i_comment_7d_sum: 近7天被评论次数
i_follow_7d_sum: 近7天经由此feedid, 作者被关注次数
videoplayseconds: feed时长
authorid: 作者id
bgm_song_id: 背景音乐id
bgm_singer_id: 背景音乐歌手id
manual_tag_list: 人工标注的分类标签
交叉侧:(过于稀疏且耗费资源, 暂时只考虑第一个)
c_user_author_read_comment_7d_sum: user对当前item作者的查看评论次数
c_user_author_like_7d_sum: user对当前item作者的点赞次数
c_user_author_click_avatar_7d_sum: user对当前item作者的点击头像次数
c_user_author_favorite_7d_sum: user对当前item作者的收藏次数
c_user_author_forward_7d_sum: user对当前item作者的转发次数
c_user_author_comment_7d_sum: user对当前item作者的评论次数
c_user_author_follow_7d_sum: user对当前item作者的关注次数
"""
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
tqdm.pandas(desc='pandas bar')
from collections import Counter
ACTION_COLUMN_LIST = ["read_comment", "like", "click_avatar", "forward", "comment", "follow", "favorite"]
END_DAY = 14
class DataGenerator:
"""生成微信视频号训练集/测试集的tfrecord文件"""
def __init__(self, dataset_dir: str = './', out_path: str = './'):
"""
Args:
dataset_dir: 数据文件所在文件夹路径
out_path: tfrecord文件以及类别特征vocabulary文件输出文件夹路径
"""
self.dataset_dir = dataset_dir
self.out_path = out_path
self.dense_features = [
"videoplayseconds",
"u_read_comment_7d_sum",
"u_like_7d_sum",
"u_click_avatar_7d_sum",
"u_forward_7d_sum",
"u_comment_7d_sum",
"u_follow_7d_sum",
"u_favorite_7d_sum",
"i_read_comment_7d_sum",
"i_like_7d_sum",
"i_click_avatar_7d_sum",
"i_forward_7d_sum",
"i_comment_7d_sum",
"i_follow_7d_sum",
"i_favorite_7d_sum",
"c_user_author_read_comment_7d_sum",
]
self.category_featurs = [
"userid",
"feedid",
"device",
"authorid",
"bgm_song_id",
"bgm_singer_id",
]
self.seq_features = ["his_read_comment_7d_seq", "manual_tag_list"]
self.labels = [
"read_comment",
"comment",
"like",
"click_avatar",
"forward",
"follow",
"favorite",
]
# 创建存放vocabulary文件的文件夹
self.vocab_dir = os.path.join(self.out_path, 'vocabulary')
# 创建存放features分片的文件夹
self.features_dir = os.path.join(self.out_path, 'features')
# 创建存放dataframe的文件夹
self.dataframe_dir = os.path.join(self.out_path, 'dataframe')
# 创建存放dataframe的文件夹
self.tfrecord_dir = os.path.join(self.out_path, 'tfrecord')
if not os.path.exists(os.path.join(self.dataframe_dir, 'DATAFRAME_ALREADY')):
self._load_data()
self._preprocess()
self._generate_vocabulary_file()
self._generate_features()
self._generate_dataframe()
self._generate_tfrecord()
def _load_data(self):
"""读入数据"""
self.user_action = pd.read_csv(self.dataset_dir + 'user_action.csv')
self.feed_info = pd.read_csv(self.dataset_dir + 'feed_info.csv',
usecols=["feedid", "authorid", "videoplayseconds", "bgm_song_id", "bgm_singer_id",
"manual_tag_list"])
def _preprocess(self):
"""数据预处理,把所有类别变量取值前面都加上前缀"""
self.feed_info['feedid'] = self.feed_info['feedid'].astype(str)
self.feed_info['authorid'] = self.feed_info['authorid'].astype(str)
# int型column中有空值存在的情况下, pd.read_csv后会被cast成float, 需要用扩展类型代替int
self.feed_info['bgm_song_id'] = self.feed_info['bgm_song_id'].astype( | pd.Int16Dtype() | pandas.Int16Dtype |
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#import stationary_block_bootstrap as sbb
import pandas as pd
import numpy as np
import scipy.stats
import numpy
import time
import random
#import state_variables
import os
import scipy.stats
import sklearn.feature_selection
import matplotlib.gridspec as gridspec
import copy
from argotools.config import *
from argotools.forecastlib.handlers import *
from argotools.forecastlib.functions import *
import argotools.forecastlib.stationary_block_bootstrap as sbb
from argotools.dataFormatter import *
import seaborn as sns
import matplotlib.ticker as mticker
import math
from matplotlib.ticker import MaxNLocator,IndexFormatter, FormatStrFormatter
class OutputVis:
# Variables : top_n = 3, ranking_metric = 'rmse', ranking_season ='ALL_PERIOD', preds (vector/PD containing all predictions), metrics (matrix/PD containing all metrics),
# Load predictions and csvs from file,
# get name of models, number of models, name of metrics, table variable names (season1, season2... allPeriod).
# Get RANKING METRIC or all models in the file. Check if theres more than one first.
# FUNC STATISTICS BETWEEN THE MODELS : MEAN, VARIANCE, BEST MODEL, WORST MODEL
# figure 1 : Time-series, error and percent error
# figure 2: metric / plot
def __init__(self, folder_dir=None, ids=None, overview_folder='_overview'):
# Loading tables and files
if folder_dir is None:
print('WARNING! No main folder directory specified. Add it as an attribute \
specify it on every function call that requires it.')
self.folder_main = folder_dir
self.ids = ids
self.overview_folder = overview_folder
print('Visualizer initialized')
# imported VARS
def plot_SEC(self, series_filepath=None, coeff_filepath=None, target_name='ILI', models=None, color_dict=None, start_period=None, end_period=None, alpha_dict=None, output_filename=None, ext='png', mode='save', n_coeff=20, cmap_color='RdBu_r', error_type='Error', vmin=-1, vmax=1, font_path=None):
if font_path:
from matplotlib import font_manager
prop = font_manager.FontProperties(fname=font_path)
if color_dict is None:
color_dict = dict(zip(models, [tuple(np.random.random(3)) for mod in models]))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1 for mod in models]))
series_df = pd.read_csv(series_filepath, index_col=0)
coeff_df = pd.read_csv(coeff_filepath, index_col=0)
if start_period is None:
start_period = series_df.index[0]
if end_period is None:
end_period = series_df.index[-1]
series_df = series_df[start_period:end_period]
coeff_df = coeff_df[start_period:end_period]
target = series_df[target_name].values
series = {}
errors = {}
for mod in models:
series[mod] = series_df[mod].values
errors[mod] = np.abs(target - series[mod])
indices = list(series_df[target_name].index.values)
#plotting target
f, axarr = plt.subplots(3,2, gridspec_kw = {'height_ratios':[2,1,3], 'width_ratios':[16,1]})
axarr[0,0].fill_between(x=list(range(len(indices))),y1=target, facecolor='gray', alpha=0.5, label=target_name)
#plotting series
for mod in models:
axarr[0,0].plot(series[mod], label=mod, color=color_dict[mod], alpha=alpha_dict[mod])
axarr[1,0].plot(errors[mod], color=color_dict[mod], alpha=alpha_dict[mod])
if n_coeff is None:
n_coeff = coeff_df.shape[1]
means = coeff_df.mean(axis=0)
coeff_names = list(coeff_df)
ordered_names = [ name for v, name in sorted(zip(means, coeff_names), key=lambda x: x[0], reverse=True)]
coeff_df = coeff_df[ordered_names[:n_coeff]]
sns.heatmap(coeff_df.T, vmin=vmin, vmax=vmax, cmap=cmap_color, center=None, \
robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0,\
linecolor='white', cbar=True, cbar_kws=None, cbar_ax=axarr[2,1], square=False,\
xticklabels='auto', yticklabels=True, mask=None, ax=axarr[2,0])
plt.gcf().set_size_inches([10, int(n_coeff/2)])
plt.sca(axarr[0,0])
plt.legend(frameon=False, ncol=len(models))
plt.xlim([0, len(indices)])
plt.ylim(bottom=0)
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_xticklabels([])
plt.grid(linestyle = 'dotted', linewidth = .6)
plt.sca(axarr[1,0])
plt.xlim([0, len(indices)])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_xticklabels([])
plt.grid(linestyle = 'dotted', linewidth = .6)
plt.sca(axarr[0,1])
plt.axis('off')
plt.sca(axarr[1,1])
plt.axis('off')
plt.sca(axarr[2,0])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_yticklabels(ordered_names[:n_coeff], fontproperties=prop)
# STYLE
axarr[0,0].spines['right'].set_visible(False)
axarr[0,0].spines['top'].set_visible(False)
axarr[1,0].spines['right'].set_visible(False)
axarr[1,0].spines['top'].set_visible(False)
axarr[0,0].set_ylabel(target_name)
axarr[1,0].set_ylabel(error_type)
plt.subplots_adjust(left=.2, bottom=.1, right=.95, top=.9, wspace=.05, hspace=.20)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_coefficients'.format(model)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, id_, output_filename, ext), format=ext)
else:
plt.savefig(output_filename+'.{0}'.format(ext), format=ext)
plt.close()
def plot_coefficients(self, id_=None, model=None, coefficients_filepath=None, cmap_color='RdBu_r',\
n_coeff=None, filename='_coefficients.csv', output_filename=None, ext='png', mode='show'):
if coefficients_filepath:
coefficients = pd.read_csv(coefficients_filepath, index_col=0)
else:
coefficients = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, model), index_col=0)
coefficients.fillna(0)
if n_coeff is None:
n_coeff = coefficients.shape[1]
means = coefficients.mean(axis=0)
coeff_names = list(coefficients)
ordered_names = [ name for v, name in sorted(zip(means, coeff_names), key=lambda x: x[0], reverse=True)]
coefficients = coefficients[ordered_names[:n_coeff]]
sns.heatmap(coefficients.T, vmin=None, vmax=None, cmap=cmap_color, center=None, \
robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0,\
linecolor='white', cbar=True, cbar_kws=None, cbar_ax=None, square=False,\
xticklabels='auto', yticklabels=True, mask=None, ax=None)
plt.gcf().set_size_inches([10, int(n_coeff/3)])
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_coefficients'.format(model)
plt.savefig('{0}/{1}/{2}.{3}'.format(folder_main, id_, output_filename, ext), format=ext)
else:
plt.savefig(output_filename+'.{0}'.format(ext), format=ext)
plt.close()
def inter_group_lollipop_comparison(ids_dict, path_dict, metric, period, models, benchmark, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_separation_multiplier=1.5, mode='show', output_filename='LollipopTest', plot_domain=None, ext='png'):
"""
Plots the ratio of the metric score for each of the models against a benchmark in a lollipop plot to compare between experiments.
Parameters
__________
ids_dict: dict
Dictionary containing the list of ids for each experiment
path_dict: dict
Dictionary containing the path to the experiment folders (must coincide with the keys of ids_dict)
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optional (default None)
String list containing the names of the models to plot
benchmark: str
The name within "models" which will serve as the benchmark
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
"""
fig, axarr = plt.subplots(len(ids_dict.keys()),1)
axes = axarr.ravel()
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
for i, (experiment, folder_main) in enumerate(path_dict.items()):
plt.sca(axes[i])
ids = ids_dict[experiment]
values_dict = dict(zip(models, [[] for mod in models]))
min_val = float('inf')
max_val = float('-inf')
indices = []
overview_path = '{0}/{1}'.format(folder_main, '_overview')
for i, id_ in enumerate(ids):
indices.append(i*bar_separation_multiplier)
id_path = '{0}/{1}'.format(folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
for j, mod in enumerate(models):
ratio = copy.copy(df[df['MODEL']==mod][period].values[0]/df[df['MODEL']==benchmark][period].values[0])
if metric in ['ERROR', 'RMSE', 'NRMSE', 'MAPE']:
ratio=(1/ratio)
values_dict[mod].append(ratio)
if ratio < min_val:
min_val = ratio
if ratio > max_val:
max_val = ratio
bar_width = 1/len(models)
indices = np.array(indices)
for i, mod in enumerate(models):
heights = values_dict[mod]
bar_positions = indices + bar_width*i
(markers, stemlines, baseline) = plt.stem(bar_positions, heights, linefmt='--')
plt.setp(markers, marker='o', markersize=7, color=color_dict[mod], alpha=alpha_dict[mod], label=mod)
plt.setp(stemlines, color=color_dict[mod], linewidth=1)
plt.setp(baseline, visible=False)
# Black line
plt.plot([0,bar_positions[-1]], [1,1],'--',color='.6', alpha=.6)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
if experiment == 'State':
ids = [id_[-3:] for id_ in ids]
plt.xticks(indices+bar_width*((len(models)-1)/2), ids)
plt.ylim([min_val*.95, max_val*1.05])
plt.xlim([-.3, bar_positions[-1]+.3])
if i == 0:
axes[i].legend(frameon=False, ncol=len(models))
plt.title('{0} barplot'.format(metric))
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([6,15])
plt.savefig('{0}/{1}.{2}'.format(overview_path, output_filename, ext), format=ext)
plt.close()
def group_lollipop_ratio(ids, metric, period, models, benchmark, folder_main = None, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_separation_multiplier=1.5, mode='show', output_filename='LollipopTest', plot_domain=None, ext='png'):
"""
Plots the ratio of the metric score for each of the models against a benchmark in a lollipop plot.
Parameters
__________
id_: str
Identifier for the region to look for
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optional (default None)
String list containing the names of the models to plot
benchmark: str
The name within "models" which will serve as the benchmark
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
"""
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
if folder_main is None:
folder_main = self.folder_main
values_dict = dict(zip(models, [[] for mod in models]))
min_val = float('inf')
max_val = float('-inf')
indices = []
overview_path = '{0}/{1}'.format(folder_main, '_overview')
for i, id_ in enumerate(ids):
indices.append(i*bar_separation_multiplier)
id_path = '{0}/{1}'.format(folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
for j, mod in enumerate(models):
ratio = copy.copy(df[df['MODEL']==mod][period].values[0]/df[df['MODEL']==benchmark][period].values[0])
if metric in ['ERROR', 'RMSE', 'NRMSE', 'MAPE']:
ratio=(1/ratio)
values_dict[mod].append(ratio)
if ratio < min_val:
min_val = ratio
if ratio > max_val:
max_val = ratio
bar_width = 1/len(models)
indices = np.array(indices)
for i, mod in enumerate(models):
heights = values_dict[mod]
bar_positions = indices + bar_width*i
(markers, stemlines, baseline) = plt.stem(bar_positions, heights, linefmt='--')
plt.setp(markers, marker='o', markersize=7, color=color_dict[mod], alpha=alpha_dict[mod], label=mod)
plt.setp(stemlines, color=color_dict[mod], linewidth=1)
plt.setp(baseline, visible=False)
# Black line
plt.plot([0,bar_positions[-1]], [1,1],'--',color='.6', alpha=.6)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.title('{0} barplot'.format(metric))
plt.xticks(indices+bar_width*((len(models)-1)/2), ids)
plt.ylim([min_val*.95, max_val*1.05])
plt.xlim([-.3, bar_positions[-1]+.3])
plt.legend(frameon=False, ncol=len(models))
if plot_domain:
plt.xlim(plot_domain)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([6,15])
plt.savefig('{0}/{1}.{2}'.format(overview_path, output_filename, ext), format=ext)
plt.close()
def inter_season_analysis(self,ids, main_folders, periods, series_names, metric = 'RMSE', filename='metrics_condensed.csv', output_filename='season_analysis', color_dict=None, alpha_dict=None, mode='save', ext='png'):
'''
Performs seasonal analysis of data based on periods decided from the user.
The top part of the plot shows violin plots (https://seaborn.pydata.org/generated/seaborn.violinplot.html)
and display the model's metric scores in a boxplot/distribution schemeself.
Bottom part shows a heatmap representing the distribution of ranking along all periods. I.E. If Each timeseries
case contain 4 periods and there's 4 cases, the total number of periods is 4*4 = 16. Each period has a metric for each model.
inter_season_analysis compare this metric within the period and ranks the models from first to nth place, and each place generats a +1
count onto the heatmap in the column representing the model and the row representing the rank.
__________
ids : dict
The dict of lists containing the identifiers for the regions.
main_folders : dict
The path to the experiments. Dictionary keys have to be consistent with the ids keys
periods : list
list containing the periods (should be available within the metrics table)
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
default_colors = ['royalblue', 'darkorange', 'forestgreen', 'firebrick']
if color_dict is None:
color_dict = dict(zip(series_names, default_colors[0:len(series_names)]))
score_periods = {}
ranks = {}
for title, ids_ in ids.items():
metrics_df = pd.read_csv(main_folders[title] + '/_overview/'+ filename)
score_periods[title] = []
ranks[title] = getRanks(metrics_df, metric, ids_, series_names, periods)
for mod in series_names:
score_periods[title].append(get_all_periods(metrics_df, mod, metric, periods))
score_periods[title] = pd.DataFrame(np.hstack(score_periods[title]), columns=series_names)
f, axarr = plt.subplots(2, len(ids.keys()))
axes = axarr.ravel()
places_dict = get_places(ranks, series_names)
places = ['4th', '3rd', '2nd', '1st']
for i, title in enumerate(ids.keys()):
places_list = places_dict[title]
sns.violinplot(data=score_periods[title], ax=axes[i], cut=0, inner='box')
'''
sns.heatmap(data=ranks[metric], ax=axes[i+len(ids.keys())], cmap='Reds', cbar=False, annot=True)
axes[i+len(ids.keys())].set_yticklabels(['1th', '2th', '3th', '4th', '5th'], rotation='horizontal')
axes[i+len(ids.keys())].set_xticklabels(series_names, rotation='horizontal')
'''
print(title, i)
for j, ord_list in enumerate(reversed(places_list)):
for (mod, height) in ord_list:
axes[i+len(ids.keys())].barh(j, height, color=color_dict[mod])
plt.sca(axes[i+len(ids.keys())])
plt.yticks(range(len(places_list)), places)
axes[i].set_title(title)
axes[i].set_xticklabels(series_names)
if i == 0:
axes[i+len(ids.keys())].set_xlabel('No. of States')
elif i == 1:
axes[i+len(ids.keys())].set_xlabel('No. of Regions')
elif i == 2:
axes[i+len(ids.keys())].set_xlabel('No. of Countries')
if i == 0:
axes[i].set_ylabel('{0}'.format(metric))
axes[+len(ids.keys())].set_ylabel('Ranking Proportions')
if mode == 'show':
plt.show()
if mode == 'save':
plt.gcf().set_size_inches([9, 5])
plt.subplots_adjust(left=.1, bottom=.12, right=.97, top=.91, wspace=.25, hspace=.20)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, OVERVIEW_FOLDER, output_filename,ext),fmt=ext)
plt.close()
return
def group_seriesbars(self, ids=None, start_period=None, end_period=None, series_names=None, folder_dir=None, metric_filename='metrics.csv', preds_filename='preds.csv', output_filename='series', color_dict=None, alpha_dict=None, mode='show', ext='png', n_col=1, width_ratios=[6,1], metric=None, metric_period=None, target_name=None):
default_colors = ['g', 'b', 'r', 'indigo']
default_linewidths = [1.5,1.4,1.6,1]
'''
Gathers information from all region and does a group plot using matplotlib, along with a barplot, showing a metric.
regions are ordered based on the original ordering from the ids list from left to right, top to bottom
Parameters
__________
ids : list
The list containing the identifiers for the regions.
preds_filename : str
String containing the preds_filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting indices.
end_period : str
timeseries ending indices in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_preds_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
if not ids:
ids = self.ids
if folder_dir is None:
folder_dir = self.folder_main
n_ids = len(ids)
n_rows = math.ceil(n_ids/n_col)
fig, axarr = plt.subplots(n_rows,n_col*2, gridspec_kw = {'width_ratios':width_ratios*n_col})
axes = axarr.ravel()
if color_dict is None:
color_dict = {}
for i, mod in enumerate(series_names):
color_dict[mod] = default_colors[i]
if alpha_dict is None:
alpha_dict = {}
for i, mod in enumerate(series_names):
alpha_dict[mod] = .8
for i, id_ in enumerate(ids):
df = pd.read_csv('{0}/{1}/{2}'.format(folder_dir, id_, preds_filename), index_col=[0])
metric_df = pd.read_csv('{0}/{1}/{2}'.format(folder_dir, id_, metric_filename))
series = []
indices = copy.copy(df[start_period:end_period].index.values)
for kk in range(np.size(indices)):
v = indices[kk][2:7]
indices[kk] = v
col_names = list(df)
if target_name:
zeros=np.zeros(np.size(df[start_period:end_period][target_name].values))
curve_max = np.amax(np.size(df[start_period:end_period][target_name].values))
#axes[i*2].plot(df[start_period:end_period][target_name].values, label=target_name, linewidth=.1)
axes[i*2].fill_between(x=list(range(len(indices))),y1=df[start_period:end_period][target_name].values, facecolor='gray', alpha=0.5, label=target_name)
for k, col in enumerate(series_names):
if col in col_names:
# create top panel
axes[i*2].plot(df[start_period:end_period][col].values, label=col, linewidth=default_linewidths[k])
else:
print('WARNING! {0} not in {1} timeseries list'.format(col, id_))
if color_dict:
for j, l in enumerate(axes[i*2].get_lines()):
l.set_color(color_dict[series_names[j]])
if alpha_dict:
for j, l in enumerate(axes[i*2].get_lines()):
l.set_alpha(alpha_dict[series_names[j]])
######
metric_df = metric_df[metric_df['METRIC']==metric][['MODEL', metric_period]]
bar_width = .5
hs = []
for k, mod in enumerate(series_names):
heights = metric_df[metric_df['MODEL'] == mod][metric_period].values
bar_positions = k
rects = axes[i*2+1].bar(bar_positions, heights, bar_width, label=mod, color=color_dict[mod], alpha=alpha_dict[mod])
hs.append(copy.copy(heights))
max_height = np.amax(hs)
min_height = np.amin(hs)
axes[i*2+1].set_ylim([min_height*.90, max_height*1.1])
axes[i*2+1].set_yticks([min_height, max_height])
axes[i*2+1].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
#####
if i == 0:
if target_name:
n_cols = len(series_names)+1
else:
n_cols = len(series_names)
axes[i*2].legend(ncol=n_cols, frameon=False, loc='upper left', \
bbox_to_anchor=(.0,1.20))
axes[i*2].text(.10,.9, id_, weight = 'bold', horizontalalignment='left', transform=axes[i*2].transAxes)
#axes[i*2+1].yaxis.set_major_locator(mticker.MaxNLocator(2))
axes[i*2].yaxis.set_major_locator(mticker.MaxNLocator(2))
axes[i*2+1].set_xticks([])
# SPINES
axes[i*2].spines['top'].set_visible(False)
axes[i*2].spines['right'].set_visible(False)
#axes[i*2].spines['left'].set_visible(False)
yticks=axes[i*2].get_yticks()
ylim = axes[i*2].get_ylim()
axes[i*2].spines['left'].set_bounds(0,yticks[2])
axes[i*2+1].spines['left'].set_bounds(min_height,max_height)
axes[i*2].set_ylim(0,ylim[1])
axes[i*2+1].spines['top'].set_visible(False)
axes[i*2+1].spines['right'].set_visible(False)
#axes[i*2+1].spines['left'].set_visible(False)
if i == 0:
plt.ylabel('Estimates')
if i > n_col*(n_rows - 1)-1:
axes[i*2].set_xlabel('Date')
plt.sca(axes[i*2])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(4))
xticks = axes[i*2].get_xticks()
axes[i*2].spines['bottom'].set_bounds(xticks[1], xticks[-2])
else:
plt.sca(axes[i*2])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(4))
xticks = axes[i*2].get_xticks()
axes[i*2].spines['bottom'].set_bounds(xticks[1], xticks[-2])
#axes[i*2].set_xticklabels([])
if i < np.size(axes)/2-1:
for j in range(i+1,int(np.size(axes)/2)):
axes[j*2+1].spines['top'].set_visible(False)
axes[j*2+1].spines['right'].set_visible(False)
axes[j*2+1].spines['left'].set_visible(False)
axes[j*2+1].spines['bottom'].set_visible(False)
axes[j*2].spines['top'].set_visible(False)
axes[j*2].spines['right'].set_visible(False)
axes[j*2].spines['left'].set_visible(False)
axes[j*2].spines['bottom'].set_visible(False)
axes[j*2].set_yticks([])
axes[j*2].set_xticks([])
axes[j*2+1].set_yticks([])
axes[j*2+1].set_xticks([])
axes[j*2].set_title('')
axes[j*2+1].set_title('')
plt.subplots_adjust(left=.03, bottom=.05, right=.99, top=.95, wspace=.25, hspace=.15)
if mode == 'show':
plt.show()
plt.close()
if mode == 'save':
fig.set_size_inches([7*n_col,2.5*n_rows])
plt.savefig('{0}/{1}/{2}.{3}'.format(folder_dir, OVERVIEW_FOLDER, output_filename,ext),fmt=ext)
plt.close()
def rank_ids_by_metric(self, ids, models, period, metric='RMSE', reverse=False, metric_filename='metrics.csv'):
'''
rank_ids_by_metric compares the performance of two models specified in the models list and
the selected metric. Function substracts model[0] from model[1] (i.e. model[1]-model[0]) and orders
the results based on decreasing order.
Parameters
__________
ids : list
List of strings containing the region identifiers to rank.
models : list
A list of two models to compare
metric : str, optional (default is RMSE)
The metric to use as comparison
order : Boolean, optional (default is False)
If False, orders in increasing order. If set to True, orders in decreasing order
metric_filename : str, optionall (default is 'metric.csv')
period : str
Specify the period of the metric
Returns
_______
ids = An ordered list of IDs based on the results of the comparison
'''
metric_values = []
for id_ in ids:
metric_df = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, metric_filename))
mod0_val = metric_df[ (metric_df['METRIC'] == metric) & (metric_df['MODEL'] == models[0])][period].values
mod1_val = metric_df[(metric_df['METRIC'] == metric) & (metric_df['MODEL'] == models[1])][period].values
ratio = mod0_val/mod1_val
if metric in ['RMSE', 'NRMSE', 'ERROR', 'MAPE']:
ratio = 1/ratio
metric_values.append(copy.copy(ratio))
ord_values = []
ord_ids = []
for id_, val in sorted(zip(ids, metric_values), key = lambda x : x[1], reverse=reverse):
ord_values.append(val)
ord_ids.append(id_)
return ord_ids
def group_weekly_winner(self, ids=None, cmap='BuPu', models=None, start_period=None, end_period=None, output_filename='weekly_winners', folder_main=None, filename='preds.csv', mode='show', ext='png'):
"""
Fir each ID, chooses the weekly winner out of the models list in a prediction file and plots all of them
together in heatmap.
Parameters
__________
ids : list
The list containing the identifiers for the regions.
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
ext : str, optional (default is png)
Extension formal to save the graphics file.
cmap : str, optional (default is 'BuPu')
colormap style to display in the plot. List of colormaps is provided by Matplotlib.
folder_main : str, optiona (default is None)
Path to folder with data. If None, uses default class attribute.
"""
if folder_main is None:
folder_main = self.folder_main
#Getting winners in each id
winners_dict = {}
ind = list(range(len(models)))
map_dict =dict(zip(models, ind))
for i, id_ in enumerate(ids):
df = pd.read_csv('{0}/{1}/{2}'.format(folder_main, id_, filename), index_col=[0])
if i == 0:
if start_period is None:
start_period = df.index[0]
if end_period is None:
end_period = df.index[-1]
df = df[start_period:end_period]
winners = get_winners_from_df(df, models=models)
winners=winners.replace({"winners" : map_dict })
winners_dict[id_] = winners['winners'].values
index = df[start_period:end_period].index.values
winners_df = pd.DataFrame(winners_dict, index=index)
ax= sns.heatmap(data=winners_df.transpose(), linewidths=.6, yticklabels=True, cbar_kws={"ticks":ind})
ax.collections[0].colorbar.ax.set_yticklabels(models)
#plt.matshow(winners_df.transpose(), origin='lower', aspect='auto', cmap='BuPu')
#cb = plt.colorbar(orientation='vertical', ticks=ind, shrink=.5)
#cb.ax.set_yticklabels(models)
#plt.xticks(range(len(index)),index, rotation=45)
#plt.gca().xaxis.set_major_formatter(IndexFormatter(index))
#plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
if mode == 'show':
plt.show()
plt.close()
if mode == 'save':
plt.gcf().set_size_inches([10,6])
plt.subplots_adjust(left=.10, bottom = .15, right = 1, top=.95, wspace=.20, hspace=.20)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, self.overview_folder, output_filename, ext),fmt=ext)
plt.close()
def plot_series(self,folder_dir=None, id_=None, filename=None, output_filename='series', series_names=None, color_dict=None, alpha_dict=None, start_period=None, end_period=None, mode='save', ext='png', add_weekly_winner=False, winner_models=None):
if folder_dir is None:
folder_dir = self.folder_main
if filename is None:
filename = ID_PREDS
df = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, filename), index_col=[0])
if start_period is None:
start_period = df.index[0]
if end_period is None:
end_period = df.index[-2]
series = []
index = df.index.values
if add_weekly_winner:
n_rows = 2
gridspec_kw = {'height_ratios':[6,1]}
else:
n_rows = 1
gridspec_kw = None
fig, axes = plt.subplots(n_rows, 1, gridspec_kw = gridspec_kw)
col_names = list(df)
if series_names is None:
series_names = col_names
for col in series_names:
# create top panel
axes[0].plot(df[start_period:end_period][col].values, label=col)
#a = ax.plot_date(x=dates, y=ILI) # fmt="-",color='.20', linewidth=3.2, label='ILI', alpha = 1)
if color_dict:
for i, l in enumerate(axes[0].get_lines()):
l.set_color(color_dict[series_names[i]])
if alpha_dict:
for i, l in enumerate(axes[0].get_lines()):
l.set_alpha(alpha_dict[series_names[i]])
if add_weekly_winner:
winners = get_winners_from_df(df, models=winner_models)
ind = list(range(len(winner_models)))
map_dict =dict(zip(winner_models, ind))
winners=winners.replace({"winners" : map_dict })
im = axes[1].matshow(winners['winners'].values.reshape([1,-1]), origin='lower', aspect='auto', cmap='BuPu')
cb = plt.colorbar(im, ax=axes[1], orientation='horizontal', ticks=ind)
cb.ax.set_xticklabels(winner_models)
axes[0].legend(ncol=len(series_names), frameon=False)
axes[0].set_title('{0}'.format(id_))
axes[0].set_ylabel('Estimates')
axes[0].set_xlabel('Index')
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
plt.xticks(range(len(index)),index, rotation=45)
axes[0].xaxis.set_major_formatter(IndexFormatter(index))
axes[0].xaxis.set_major_locator(mticker.MaxNLocator(6))
axes[1].set_xticks([])
axes[1].set_yticks([])
axes[0].autoscale(enable=True, axis='x', tight=True)
#plt.locator_params(nbins=8)
if mode == 'show':
plt.show()
plt.close()
if mode == 'save':
fig.set_size_inches([10,5])
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, id_, output_filename,ext),fmt=ext)
plt.close()
def season_analysis(self, ids, periods, series_names, folder_main=None, metrics = ['PEARSON', 'NRMSE'], filename='metrics_condensed.csv', output_filename='season_analysis', color_dict=None, alpha_dict=None, mode='save', ext='png'):
'''
Gathers information from all region and does a group plot using matplotlib.
regions are ordered in based on the original ordering from the ids list from left to right, top to bottom
Parameters
__________
ids : list
The list containing the identifiers for the regions.
periods : list
list containing the periods (should be available within the metrics table)
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
if not folder_main:
folder_main = self.folder_main
metrics_df = | pd.read_csv(folder_main + '/_overview/'+ filename) | pandas.read_csv |
import pandas as pd
from utilities import MODES
class Evaluator:
def __init__(self, data):
self.data = data # dictionary
def get_result(self,form):
mode = form['Mode']
d1 = form['Date1']
d2 = form['Date2']
s1 = form['Loc1'].split(':')[0]
s2 = form['Loc2'].split(':')[0]
if mode == MODES[1]:
return 'The AQI is ' + self.__get_aqi(s1,d1,d2)
elif mode == MODES[2]:
return self.__get_similarity(d1,d2)
elif mode == MODES[3]:
return self.__get_charData(s1,d1,d2)
return self.__get_comparison(s1,s2,d1,d2)
def __get_charData(self, sensor, dateI, dateF):
# controllo su 1 o 2 input
if dateF == 'empty':
start_date = pd.to_datetime(dateI) - pd.Timedelta("1 days")
end_date = pd.to_datetime(dateI)
else:
start_date = pd.to_datetime(dateI)
end_date = pd.to_datetime(dateF)
data = self.data[sensor]
mask = (data['Timestamp'] >= start_date) & (data['Timestamp'] <= end_date) & (data['SensorID'] == sensor)
data = data.loc[mask][['AttributeID', 'Value']] # Dataframe of 2 columns Attribute ID, value
# calcola il val medio per ogni gas
data = data.groupby('AttributeID', axis=0).mean().reset_index()
data.sort_values(by='AttributeID')
data = data.assign(
Description=['µg/m3 nitrogen dioxide content', 'µg/m3 ozone content', 'µg/m3 fine particles content',
'µg/m3 sulfur dioxide content'])
return data # struct: AttributeID,Description,Value,unit,
def __get_similarity(self, dateI, dateF):
sens_list = 'Sensor0,Sensor1,Sensor2,Sensor3,Sensor4,Sensor5,Sensor6,Sensor7,Sensor8,Sensor9'.split(',')
data = {'Sensor':[],'String':[]}
out = {}
out_str = ''
for s in sens_list:
data['Sensor'].append(s)
data['String'].append( str((self.__get_aqi(s,dateI,dateF,all_aqi_val=True)) ))
data = | pd.DataFrame(data,columns=['Sensor','String']) | pandas.DataFrame |
# The test is referenced from https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html
import time
import hdbscan
import warnings
import sklearn.cluster
import scipy.cluster
import sklearn.datasets
import numpy as np
import pandas as pd
import seaborn as sns
from numpy.linalg import norm
from classix.aggregation_test import aggregate
from classix import CLASSIX
from quickshift.QuickshiftPP import *
from sklearn import metrics
import matplotlib.pyplot as plt
from threadpoolctl import threadpool_limits
np.random.seed(0)
def benchmark_algorithm_tdim(dataset_dimensions, cluster_function, function_args, function_kwds,
dataset_size=10000, dataset_n_clusters=10, max_time=45, sample_size=10, algorithm=None):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result_time = np.nan * np.ones((len(dataset_dimensions), sample_size))
result_ar = np.nan * np.ones((len(dataset_dimensions), sample_size))
result_ami = np.nan * np.ones((len(dataset_dimensions), sample_size))
for index, dimension in enumerate(dataset_dimensions):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
# set cluster_std=0.1 to ensure clustering rely less on tuning parameters.
data, labels = sklearn.datasets.make_blobs(n_samples=dataset_size,
n_features=dimension,
centers=dataset_n_clusters,
cluster_std=1)
# Start the clustering with a timer
start_time = time.time()
cluster_function.fit(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
if algorithm == "Quickshift++":
preds = cluster_function.memberships
else:
preds = cluster_function.labels_
# print("labels num:", len(np.unique(preds)))
ar = metrics.adjusted_rand_score(labels, preds)
ami = metrics.adjusted_mutual_info_score(labels, preds)
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time: # Luckily, it won't happens in our experiment.
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
return pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
else:
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
def benchmark_algorithm_tsize(dataset_sizes, cluster_function, function_args, function_kwds,
dataset_dimension=10, dataset_n_clusters=10, max_time=45, sample_size=10, algorithm=None):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result_time = np.nan * np.ones((len(dataset_sizes), sample_size))
result_ar = np.nan * np.ones((len(dataset_sizes), sample_size))
result_ami = np.nan * np.ones((len(dataset_sizes), sample_size))
for index, size in enumerate(dataset_sizes):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
# set cluster_std=0.1 to ensure clustering rely less on tuning parameters.
data, labels = sklearn.datasets.make_blobs(n_samples=size,
n_features=dataset_dimension,
centers=dataset_n_clusters,
cluster_std=1)
# Start the clustering with a timer
start_time = time.time()
cluster_function.fit(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
if algorithm == "Quickshift++":
preds = cluster_function.memberships
else:
preds = cluster_function.labels_
# print("labels num:", len(np.unique(preds)))
ar = metrics.adjusted_rand_score(labels, preds)
ami = metrics.adjusted_mutual_info_score(labels, preds)
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time: # Luckily, it won't happens in our experiment.
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
else:
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
def rn_gaussian_dim():
warnings.filterwarnings("ignore")
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
dataset_dimensions = np.hstack([np.arange(1, 11) * 10])
np.random.seed(0)
with threadpool_limits(limits=1, user_api='blas'):
k_means = sklearn.cluster.KMeans(n_clusters=10, init='k-means++')
k_means_time, k_means_ar, k_means_ami = benchmark_algorithm_tdim(dataset_dimensions, k_means, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=10, min_samples=1, n_jobs=1, algorithm='ball_tree')
dbscan_btree_time, dbscan_btree_ar, dbscan_btree_ami = benchmark_algorithm_tdim(dataset_dimensions, dbscan, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=10, min_samples=1, n_jobs=1, algorithm='kd_tree')
dbscan_kdtree_time, dbscan_kdtree_ar, dbscan_kdtree_ami = benchmark_algorithm_tdim(dataset_dimensions, dbscan, (), {})
hdbscan_ = hdbscan.HDBSCAN(algorithm='best', core_dist_n_jobs=1)
hdbscan_time, hdbscan_ar, hdbscan_ami = benchmark_algorithm_tdim(dataset_dimensions, hdbscan_, (), {})
classix = CLASSIX(sorting='pca', radius=0.3, minPts=5, group_merging='distance', verbose=0)
classix_time, classix_ar, classix_ami = benchmark_algorithm_tdim(dataset_dimensions, classix, (), {})
quicks = QuickshiftPP(k=20, beta=0.7)
quicks_time, quicks_ar, quicks_ami = benchmark_algorithm_tdim(dataset_dimensions, quicks, (), {}, algorithm='Quickshift++')
k_means_time.to_csv("results/exp1/gd_kmeans_time.csv",index=False)
dbscan_kdtree_time.to_csv("results/exp1/gd_dbscan_kdtree_time.csv",index=False)
dbscan_btree_time.to_csv("results/exp1/gd_dbscan_btree_time.csv",index=False)
hdbscan_time.to_csv("results/exp1/gd_hdbscan_time.csv",index=False)
classix_time.to_csv("results/exp1/gd_classix_time.csv",index=False)
quicks_time.to_csv("results/exp1/gd_quicks_time.csv",index=False)
k_means_ar.to_csv("results/exp1/gd_kmeans_ar.csv",index=False)
dbscan_kdtree_ar.to_csv("results/exp1/gd_dbscan_kdtree_ar.csv",index=False)
dbscan_btree_ar.to_csv("results/exp1/gd_dbscan_btree_ar.csv",index=False)
hdbscan_ar.to_csv("results/exp1/gd_hdbscan_ar.csv",index=False)
classix_ar.to_csv("results/exp1/gd_classix_ar.csv",index=False)
quicks_ar.to_csv("results/exp1/gd_quicks_ar.csv",index=False)
def rn_gaussian_size():
warnings.filterwarnings("ignore")
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
np.random.seed(0)
dataset_sizes = np.hstack([np.arange(1, 11) * 5000])
np.random.seed(0)
with threadpool_limits(limits=1, user_api='blas'):
k_means = sklearn.cluster.KMeans(n_clusters=10, init='k-means++')
k_means_time, k_means_ar, k_means_ami = benchmark_algorithm_tsize(dataset_sizes, k_means, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=3, min_samples=1, n_jobs=1, algorithm='ball_tree')
dbscan_btree_time, dbscan_btree_ar, dbscan_btree_ami = benchmark_algorithm_tsize(dataset_sizes, dbscan, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=3, min_samples=1, n_jobs=1, algorithm='kd_tree')
dbscan_kdtree_time, dbscan_kdtree_ar, dbscan_kdtree_ami = benchmark_algorithm_tsize(dataset_sizes, dbscan, (), {})
hdbscan_ = hdbscan.HDBSCAN(algorithm='best', core_dist_n_jobs=1)
hdbscan_time, hdbscan_ar, hdbscan_ami = benchmark_algorithm_tsize(dataset_sizes, hdbscan_, (), {})
classix = CLASSIX(sorting='pca', radius=0.3, minPts=5, group_merging='distance', verbose=0)
classix_time, classix_ar, classix_ami = benchmark_algorithm_tsize(dataset_sizes, classix, (), {})
quicks = QuickshiftPP(k=20, beta=0.7)
quicks_time, quicks_ar, quicks_ami = benchmark_algorithm_tsize(dataset_sizes, quicks, (), {}, algorithm='Quickshift++')
k_means_time.to_csv("results/exp1/gs_kmeans_time.csv",index=False)
dbscan_kdtree_time.to_csv("results/exp1/gs_dbscan_kdtree_time.csv",index=False)
dbscan_btree_time.to_csv("results/exp1/gs_dbscan_btree_time.csv",index=False)
hdbscan_time.to_csv("results/exp1/gs_hdbscan_time.csv",index=False)
classix_time.to_csv("results/exp1/gs_classix_time.csv",index=False)
quicks_time.to_csv("results/exp1/gs_quicks_time.csv",index=False)
k_means_ar.to_csv("results/exp1/gs_kmeans_ar.csv",index=False)
dbscan_kdtree_ar.to_csv("results/exp1/gs_dbscan_kdtree_ar.csv",index=False)
dbscan_btree_ar.to_csv("results/exp1/gs_dbscan_btree_ar.csv",index=False)
hdbscan_ar.to_csv("results/exp1/gs_hdbscan_ar.csv",index=False)
classix_ar.to_csv("results/exp1/gs_classix_ar.csv",index=False)
quicks_ar.to_csv("results/exp1/gs_quicks_ar.csv",index=False)
def run_gassian_plot():
# -------------------------------dim
k_means_time = pd.read_csv("results/exp1/gd_kmeans_time.csv")
dbscan_kdtree_time = pd.read_csv("results/exp1/gd_dbscan_kdtree_time.csv")
dbscan_btree_time = pd.read_csv("results/exp1/gd_dbscan_btree_time.csv")
hdbscan_time = pd.read_csv("results/exp1/gd_hdbscan_time.csv")
classix_time = pd.read_csv("results/exp1/gd_classix_time.csv")
quicks_time = pd.read_csv("results/exp1/gd_quicks_time.csv")
k_means_ar = pd.read_csv("results/exp1/gd_kmeans_ar.csv")
dbscan_kdtree_ar = pd.read_csv("results/exp1/gd_dbscan_kdtree_ar.csv")
dbscan_btree_ar = pd.read_csv("results/exp1/gd_dbscan_btree_ar.csv")
hdbscan_ar = | pd.read_csv("results/exp1/gd_hdbscan_ar.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# ## Aim: Collect and combine relevant mouse expression data from the Stemformatics data portal
#
# Link: https://www.stemformatics.org/workbench/download_multiple_datasets.
#
# Stemformatics is an established gene expression data portal containing over 420 public gene expression datasets derived from microarray, RNA sequencing and single cell profiling technologies. It includes curated ‘collections’ of data relevant to cell reprogramming, as well as hematopoiesis and leukaemia.
#
# ### Samples
#
# Set the serch field to 'species' and use 'Mus musculus' as search key.
#
# ### Processing steps
#
# - Sample selection
# - Combine selected datasets based on platforms
# - Combine all selected datasets
# In[1]:
import pandas as pd
import numpy as np
import atlas
import handler
import requests
# In[2]:
# inspect the samples metadata
samples = | pd.read_csv('/Users/monica/Downloads/export_metadata_samples_v7.2.4.tsv', sep='\t', index_col=2) | pandas.read_csv |
########################
# Author: <NAME> #
########################
import pandas as pd
import argparse
def get_arguments():
'''
argparse object initialization and reading input and output file paths.
input files: new_comments_preprocessed (-i1), old_comments_preprocessed.csv (-i2),
comments_to_flag.txt (-i3)
output file: final_merged_comments.csv (-o)
'''
parser = argparse.ArgumentParser(description='csv file identifying duplicates between new and old comments')
parser.add_argument('--new_comments_csv', '-i1', type=str, dest='new_comments_preprocessed', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/new_comments_preprocessed.csv',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/new_comments_preprocessed.csv',
help="the input csv file for new_comments generated from preprocessing script")
parser.add_argument('--old_comments_csv', '-i2', type=str, dest='old_comments_preprocessed', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/old_comments_preprocessed.csv',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/old_comments_preprocessed_all_cols.csv',
help="the input csv file for old_comments generated from preprocessing script")
parser.add_argument('--comments_to_flag', '-i3', type=str, dest='comments_to_flag', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/comments_to_flag.txt',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/comments_to_flag.txt',
help="the input txt file generated from duplicate_filter.py containing comment_counters to flag")
parser.add_argument('--final_csv', '-o', type=str, dest='final_csv', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/final_merged_comments.csv',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/final_merged_comments.csv',
help="the output file containing both source1 and source2 comments")
args = parser.parse_args()
return args
def merge_sources(args):
'''
Given an argparse object, this module creates a dictionary of comment_counters present
in comments_to_flag.txt as key and value as {'exact_match':[<comment_counters>],'similar':[<comment_counters>]}
based on the weighted score and token sort score
Concatenates the dataframes of csvs from both the sources (new and old comments)
Adds a column 'flag' and populates it with the values of main_dict keys (where key is comment_counter)
:param args: argparse object containing the input and output file paths as attributes
'''
flag_list = []
with open(args.comments_to_flag,'r') as flag:
for f in flag.readlines():
flag_list.append(f.strip().split(','))
main_dict = {k[0]:{v:[] for v in ['exact_match','similar']} for k in flag_list }
for i in flag_list:
weighted_ratio = int(i[-2])
token_sort_ratio = int(i[-1])
if main_dict.get(i[0]):
if all(check_score in range(85, 97) for check_score in [weighted_ratio,token_sort_ratio]):
main_dict[i[0]]['similar'].append(i[1])
if all(check_score in range(97, 101) for check_score in [weighted_ratio,token_sort_ratio]):
main_dict[i[0]]['exact_match'].append(i[1])
new_comments = pd.read_csv(args.new_comments_preprocessed)
old_comments = pd.read_csv(args.old_comments_preprocessed)
old_comments.rename(columns = {'ID':'comment_id'}, inplace = True)
# old_comments.rename(columns = {'timestamp':'post_time'}, inplace = True)
merging_final = pd.concat([old_comments, new_comments])
'''
following line of commented code will delete the comments from the final csv
based on the comment_counters in comments_to_delete.txt
'''
# merging_final = merging_final.query('comment_counter not in @list_of_comments_to_delete')
duplicate_flag_df = pd.DataFrame(list(main_dict.items()), columns=['comment_counter', 'duplicate_flag'])
# Merging final csv with flagged dataframe
final = | pd.merge(merging_final,duplicate_flag_df,on='comment_counter',how='outer') | pandas.merge |
print("Loading...")
import sys
import logging
logging.getLogger().setLevel(logging.DEBUG)
import os
import tkinter as tk
from tkinter import filedialog
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyabf
from numpy import genfromtxt
print("Loaded external libraries")
from pyAPisolation.abf_featureextractor import folder_feature_extract, save_data_frames
from pyAPisolation.patch_utils import load_protocols
print("Load finished")
def main():
logging.basicConfig(level=logging.DEBUG)
root = tk.Tk()
root.withdraw()
files = filedialog.askdirectory(
title='Select Dir'
)
root_fold = files
##Declare our options at default
print('loading protocols...')
protocol_n = load_protocols(files)
print("protocols")
for i, x in enumerate(protocol_n):
print(str(i) + '. '+ str(x))
proto = input("enter Protocol to analyze (enter -1 to not filter to any protocol): ")
try:
proto = int(proto)
except:
proto = -1
filter = input("Allen's Gaussian Filter (recommended to be set to 0): ")
try:
filter = int(filter)
except:
filter = 0
savfilter = input("Savitzky-Golay Filter (recommended to be set in 0): ")
try:
savfilter = int(savfilter)
except:
savfilter = 0
tag = input("tag to apply output to files: ")
try:
tag = str(tag)
except:
tag = ""
plot_sweeps = input("Enter the sweep Numbers to plot [seperated by a comma] (0 to plot all sweeps, -1 to plot no sweeps): ")
try:
plot_sweeps = np.fromstring(plot_sweeps, dtype=int, sep=',')
if plot_sweeps.shape[0] < 1:
plot_sweeps = np.array([-1])
except:
plot_sweeps = -1
if proto == -1:
protocol_name = ''
else:
protocol_name = protocol_n[proto]
dv_cut = input("Enter the threshold cut off for the derivative (defaults to 7mv/s): ")
try:
dv_cut = int(dv_cut)
except:
dv_cut = 7
tp_cut = input("Enter the threshold cut off for max threshold-to-peak time (defaults to 10ms)[in ms]: ")
try:
tp_cut = (np.float64(tp_cut)/1000)
except:
tp_cut = 0.010
min_cut = input("Enter the minimum cut off for threshold-to-peak voltage (defaults to 2mV)[in mV]: ")
try:
min_cut = np.float64(min_cut)
except:
min_cut = 2
min_peak = input("Enter the mininum cut off for peak voltage (defaults to -10)[in mV]: ")
try:
min_peak = np.float64(min_peak)
except:
min_peak = -10
percent = input("Enter the percent of max DvDt used to calculate refined threshold (does not effect spike detection)(Allen defaults 5%)[in %]: ")
try:
percent = percent /100
except:
percent = 5/100
stim_find = input("Search for spikes based on applied Stimulus? (y/n): ")
try:
if stim_find == 'y' or stim_find =='Y':
bstim_find = True
else:
bstim_find = False
except:
bstim_find = False
if bstim_find:
upperlim = 0
lowerlim = 0
else:
lowerlim = input("Enter the time to start looking for spikes [in s] (enter 0 to start search at beginning): ")
upperlim = input("Enter the time to stop looking for spikes [in s] (enter 0 to search the full sweep): ")
try:
lowerlim = float(lowerlim)
upperlim = float(upperlim)
except:
upperlim = 0
lowerlim = 0
print(f"Running analysis with, dVdt thresh: {dv_cut}mV/s, thresh to peak max: {tp_cut}s, thresh to peak min height: {min_cut}mV, and min peak voltage: {min_peak}mV")
param_dict = {'filter': filter, 'dv_cutoff':dv_cut, 'start': lowerlim, 'end': upperlim, 'max_interval': tp_cut, 'min_height': min_cut, 'min_peak': min_peak, 'thresh_frac': percent,
'stim_find': bstim_find}
df = folder_feature_extract(files, param_dict, plot_sweeps, protocol_name)
print(f"Ran analysis with, dVdt thresh: {dv_cut}mV/s, thresh to peak max: {tp_cut}s, thresh to peak min height: {min_cut}mV, and min peak voltage: {min_peak}mV")
save_data_frames(df[0], df[1], df[2], root_fold, tag)
settings_col = ['dvdt Threshold', 'threshold to peak max time','threshold to peak min height', 'min peak voltage', 'allen filter', 'sav filter', 'protocol_name']
setdata = [dv_cut, tp_cut, min_cut, min_peak, filter, savfilter, protocol_name]
settings_df = | pd.DataFrame(data=[setdata], columns=settings_col, index=[0]) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
x_dist = 6.56
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
# write output arrays to excel file -- just for debugging
agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "output_array_generate.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg1(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg2(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a non-uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.4666667,9.4,10.4,11.4,
12.4,13.975,14.5,15.5,16.5,17.5,18.466666667,19.4,20.4,21.4,
22.4,23.975,24.5,25.5,26.5,27.5,28.46666667,29.4,30.4,31.4,
32.4,33.975,34.5,35.5,36.5,37.5,38.466666667,39.4,40.4,41.4,
42.4,43.975,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
agdrift_empty.num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg3(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array);
averages reflect weighted average assuming linearity between x points;
average is calculated as the area under the y-curve beginning at each x point and extending out x_dist
divided by x_dist (which yields the weighted average y between the relevant x points)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a monotonically increasing y_array and inserts a gap in the x values
that is greater than x_dist
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,51.,52.]
expected_result_y = [2.5,3.5,4.5,5.4111111,6.14444444,6.7,7.07777777,7.277777777,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
and generates running weighted averages from the first x,y value until it locates the user
specified integrated average of interest
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
expected_x_dist_of_interest = 990.8016
x_dist = 6.56
weighted_avg = 0.0009697 #this is the running average value we're looking for
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
agdrift_empty.find_nearest_x = True
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(agdrift_empty.num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg1(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE this test is for a monotonically increasing function with some irregularity in x-axis points
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,7.0,16.0,17.0,18.0,19.0,20.0,28.0,29.0,30.0,31.]
expected_result_y = [0.357143,1.27778,4.4125,5.15,5.7125,6.1,6.3125,9.5,10.5,11.5,12.5]
expected_result_npts = 11
expected_x_dist_of_interest = 30.5
x_dist = 5.
weighted_avg = 12.
num_db_values = 51
x_array_in = [0.,7.,16.,17.,18.,19.,20.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.,59.,60.,
61.,62.,63.,64.,65.,66.,67.,68.,69.,70.,
71.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
agdrift_empty.find_nearest_x = True
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg2(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test is for a monotonically decreasing function with irregular x-axis spacing
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,7.,16.,17.,18.,19.,20.,28.,29.,30.,
34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.,59.,60.]
expected_result_y = [49.6429,48.7222,45.5875,44.85,44.2875,43.9,43.6875,41.175,40.7,40.3,
37.5,36.5,35.5,34.5,33.5,32.5,31.5,30.5,29.5,28.5,
27.5,26.5,25.5,24.5,23.5,22.5,21.5,20.5,19.5,18.5,
17.5,16.5,15.5,14.5,13.5,12.5,11.5]
expected_result_npts = 37
expected_x_dist_of_interest = 60.
x_dist = 5.
weighted_avg = 12.
num_db_values = 51
agdrift_empty.find_nearest_x = True
x_array_in = [0.,7.,16.,17.,18.,19.,20.,28.,29.,30.,
34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.,59.,60.,
61.,62.,63.,64.,65.,66.,67.,68.,69.,70.,
71.,72.,73.,74. ]
y_array_in = [50.,49.,48.,47.,46.,45.,44.,43.,42.,41.,
40.,39.,38.,37.,36.,35.,34.,33.,32.,31.,
30.,29.,28.,27.,26.,25.,24.,23.,22.,21.,
20.,19.,18.,17.,16.,15.,14.,13.,12.,11.,
10.,9.,8.,7.,6.,5.,4.,3.,2.,1.,0.]
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg3(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE this test is for a monotonically decreasing function with regular x-axis spacing
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
expected_result_x_dist = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,
10.,11.,12.,13.,14.,15.,16.,17.,18.,19.,
20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,
30.,31.,32.,33.,34.,35.,36.]
expected_result_y = [47.5,46.5,45.5,44.5,43.5,42.5,41.5,40.5,39.5,38.5,
37.5,36.5,35.5,34.5,33.5,32.5,31.5,30.5,29.5,28.5,
27.5,26.5,25.5,24.5,23.5,22.5,21.5,20.5,19.5,18.5,
17.5,16.5,15.5,14.5,13.5,12.5,11.5]
expected_result_npts = 37
expected_x_dist_of_interest = 36.
x_dist = 5.
weighted_avg = 12.
num_db_values = 51
agdrift_empty.find_nearest_x = True
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,
10.,11.,12.,13.,14.,15.,16.,17.,18.,19.,
20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,
30.,31.,32.,33.,34.,35.,36.,37.,38.,39.,
40.,41.,42.,43.,44.,45.,46.,47.,48.,49.,
50.]
y_array_in = [50.,49.,48.,47.,46.,45.,44.,43.,42.,41.,
40.,39.,38.,37.,36.,35.,34.,33.,32.,31.,
30.,29.,28.,27.,26.,25.,24.,23.,22.,21.,
20.,19.,18.,17.,16.,15.,14.,13.,12.,11.,
10.,9.,8.,7.,6.,5.,4.,3.,2.,1.,0.]
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True )
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True )
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_round_model_outputs(self):
"""
:description round output variable values (and place in output variable series) so that they can be directly
compared to expected results (which were limited in terms of their output format from the OPP AGDRIFT
model (V2.1.1) interface (we don't have the AGDRIFT code so we cannot change the output format to
agree with this model
:param avg_dep_foa:
:param avg_dep_lbac:
:param avg_dep_gha:
:param avg_waterconc_ngl:
:param avg_field_dep_mgcm2:
:param num_sims: number of simulations
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
num_sims = 3
num_args = 5
agdrift_empty.out_avg_dep_foa = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_dep_lbac = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_dep_gha = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_waterconc_ngl = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_field_dep_mgcm2 = pd.Series(num_sims * [np.nan], dtype='float')
result = pd.Series(num_sims * [num_args*[np.nan]], dtype='float')
expected_result = pd.Series(num_sims * [num_args*[np.nan]], dtype='float')
expected_result[0] = [1.26,1.26,1.26,1.26,1.26]
expected_result[1] = [0.0004,0.0004,0.0004,0.0004,0.0004]
expected_result[2] = [3.45e-05,3.45e-05,3.45e-05,3.45e-05,3.45e-05]
try:
#setting each variable to same values, each value tests a separate pathway through rounding method
avg_dep_lbac = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_dep_foa = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_dep_gha = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_waterconc_ngl = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_field_dep_mgcm2 = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
for i in range(num_sims):
lbac = avg_dep_lbac[i]
foa = avg_dep_foa[i]
gha = avg_dep_gha[i]
ngl = avg_waterconc_ngl[i]
mgcm2 = avg_field_dep_mgcm2[i]
agdrift_empty.round_model_outputs(foa, lbac, gha, ngl, mgcm2, i)
result[i] = [agdrift_empty.out_avg_dep_foa[i], agdrift_empty.out_avg_dep_lbac[i],
agdrift_empty.out_avg_dep_gha[i], agdrift_empty.out_avg_waterconc_ngl[i],
agdrift_empty.out_avg_field_dep_mgcm2[i]]
npt.assert_allclose(result[0], expected_result[0], rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result[1], expected_result[1], rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result[2], expected_result[2], rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_find_dep_pt_location(self):
"""
:description this method locates the downwind distance associated with a specific deposition rate
:param x_array: array of distance values
:param y_array: array of deposition values
:param npts: number of values in x/y arrays
:param foa: value of deposition (y value) of interest
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
result = [[],[],[],[]]
expected_result = [(0.0, 'in range'), (259.1832, 'in range'), (997.3632, 'in range'), (np.nan, 'out of range')]
try:
x_array = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016, 997.3632]
y_array = [0.364706389,0.351133211,0.338484161,0.315606383,0.277604029,0.222810736,0.159943507,
0.121479708,0.099778741,0.068653,0.05635,0.0386,0.0296,0.02415,0.02055,0.01795,
0.0159675,0.0144675,0.0132,0.01215,0.0113,0.01055,0.009905,0.009345,0.008845,0.0084,
0.008,0.007635,0.0073,0.007,0.006725,0.006465,0.00623,0.00601,0.005805,0.005615,
0.005435,0.00527,0.00511,0.00496,0.00482,0.004685,0.00456,0.00444,0.004325,0.00422,
0.00412,0.00402,0.003925,0.003835,0.00375,0.00367,0.00359,0.00351,0.003435,0.003365,
0.0033,0.003235,0.00317,0.00311,0.003055,0.003,0.002945,0.002895,0.002845,0.002795,
0.002745,0.002695,0.00265,0.00261,0.00257,0.002525,0.002485,0.00245,0.00241,0.00237,
0.002335,0.0023,0.002265,0.002235,0.002205,0.002175,0.002145,0.002115,0.002085,
0.002055,0.002025,0.002,0.001975,0.001945,0.00192,0.0019,0.001875,0.00185,0.00183,
0.001805,0.00178,0.00176,0.00174,0.00172,0.0017,0.00168,0.00166,0.00164,0.00162,
0.001605,0.00159,0.00157,0.00155,0.001535,0.00152,0.0015,0.001485,0.00147,0.001455,
0.00144,0.001425,0.00141,0.001395,0.001385,0.00137,0.001355,0.00134,0.001325,0.001315,
0.001305,0.00129,0.001275,0.001265,0.001255,0.001245,0.00123,0.001215,0.001205,
0.001195,0.001185,0.001175,0.001165,0.001155,0.001145,0.001135,0.001125,0.001115,
0.001105,0.001095,0.001085,0.001075,0.001065,0.00106,0.001055,0.001045,0.001035,
0.001025,0.001015,0.001005,0.0009985,0.000993,0.000985,0.000977,0.0009695,0.0009612]
npts = len(x_array)
num_sims = 4
foa = [0.37, 0.004, 0.0009613, 0.0008]
for i in range(num_sims):
result[i] = agdrift_empty.find_dep_pt_location(x_array, y_array, npts, foa[i])
npt.assert_equal(expected_result, result, verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_extend_curve_opp(self):
"""
:description extends/extrapolates an x,y array of data points that reflect a ln ln relationship by selecting
a number of points near the end of the x,y arrays and fitting a line to the points
ln ln transforms (two ln ln transforms can by applied; on using the straight natural log of
each selected x,y point and one using a 'relative' value of each of the selected points --
the relative values are calculated by establishing a zero point closest to the selected
points
For AGDRIFT: extends distance vs deposition (fraction of applied) curve to enable model calculations
when area of interest (pond, wetland, terrestrial field) lie partially outside the original
curve (whose extent is 997 feet). The extension is achieved by fitting a line of best fit
to the last 16 points of the original curve. The x,y values representing the last 16 points
are natural log transforms of the distance and deposition values at the 16 points. Two long
transforms are coded here, reflecting the fact that the AGDRIFT model (v2.1.1) uses each of them
under different circumstandes (which I believe is not the intention but is the way the model
functions -- my guess is that one of the transforms was used and then a second one was coded
to increase the degree of conservativeness -- but the code was changed in only one of the two
places where the transformation occurs.
Finally, the AGDRIFT model extends the curve only when necessary (i.e., when it determines that
the area of intereest lies partially beyond the last point of the origanal curve (997 ft). In
this code all the curves are extended out to 1994 ft, which represents the furthest distance that
the downwind edge of an area of concern can be specified. All scenario curves are extended here
because we are running multiple simulations (e.g., monte carlo) and instead of extending the
curves each time a simulation requires it (which may be multiple time for the same scenario
curve) we just do it for all curves up front. There is a case to be made that the
curves should be extended external to this code and simply provide the full curve in the SQLite
database containing the original curve.
:param x_array: array of x values to be extended (must be at least 17 data points in original array)
:param y_array: array of y values to be extended
:param max_dist: maximum distance (ft) associated with unextended x values
:param dist_inc: increment (ft) for each extended data point
:param num_pts_ext: number of points at end of original x,y arrays to be used for extending the curve
:param ln_ln_trans: form of transformation to perform (True: straight ln ln, False: relative ln ln)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
# x_array_in = pd.Series([], dtype='float')
# y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632,
1003.9232,1010.4832,1017.0432,1023.6032,1030.1632,1036.7232,1043.2832,1049.8432,1056.4032,
1062.9632,1069.5232,1076.0832,1082.6432,1089.2032,1095.7632,1102.3232,1108.8832,1115.4432,
1122.0032,1128.5632,1135.1232,1141.6832,1148.2432,1154.8032,1161.3632,1167.9232,1174.4832,
1181.0432,1187.6032,1194.1632,1200.7232,1207.2832,1213.8432,1220.4032,1226.9632,1233.5232,
1240.0832,1246.6432,1253.2032,1259.7632,1266.3232,1272.8832,1279.4432,1286.0032,1292.5632,
1299.1232,1305.6832,1312.2432,1318.8032,1325.3632,1331.9232,1338.4832,1345.0432,1351.6032,
1358.1632,1364.7232,1371.2832,1377.8432,1384.4032,1390.9632,1397.5232,1404.0832,1410.6432,
1417.2032,1423.7632,1430.3232,1436.8832,1443.4432,1450.0032,1456.5632,1463.1232,1469.6832,
1476.2432,1482.8032,1489.3632,1495.9232,1502.4832,1509.0432,1515.6032,1522.1632,1528.7232,
1535.2832,1541.8432,1548.4032,1554.9632,1561.5232,1568.0832,1574.6432,1581.2032,1587.7632,
1594.3232,1600.8832,1607.4432,1614.0032,1620.5632,1627.1232,1633.6832,1640.2432,1646.8032,
1653.3632,1659.9232,1666.4832,1673.0432,1679.6032,1686.1632,1692.7232,1699.2832,1705.8432,
1712.4032,1718.9632,1725.5232,1732.0832,1738.6432,1745.2032,1751.7632,1758.3232,1764.8832,
1771.4432,1778.0032,1784.5632,1791.1232,1797.6832,1804.2432,1810.8032,1817.3632,1823.9232,
1830.4832,1837.0432,1843.6032,1850.1632,1856.7232,1863.2832,1869.8432,1876.4032,1882.9632,
1889.5232,1896.0832,1902.6432,1909.2032,1915.7632,1922.3232,1928.8832,1935.4432,1942.0032,
1948.5632,1955.1232,1961.6832,1968.2432,1974.8032,1981.3632,1987.9232,1994.4832]
expected_result_y = [0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741,1.1826345E-02,1.1812256E-02,
1.1798945E-02,1.1786331E-02,1.1774344E-02,1.1762927E-02,1.1752028E-02,1.1741602E-02,
1.1731610E-02,1.1722019E-02,1.1712796E-02,1.1703917E-02,1.1695355E-02,1.1687089E-02,
1.1679100E-02,1.1671370E-02,1.1663883E-02,1.1656623E-02,1.1649579E-02,1.1642737E-02,
1.1636087E-02,1.1629617E-02,1.1623319E-02,1.1617184E-02,1.1611203E-02,1.1605369E-02,
1.1599676E-02,1.1594116E-02,1.1588684E-02,1.1583373E-02,1.1578179E-02,1.1573097E-02,
1.1568122E-02,1.1563249E-02,1.1558475E-02,1.1553795E-02,1.1549206E-02,1.1544705E-02,
1.1540288E-02,1.1535953E-02,1.1531695E-02,1.1527514E-02,1.1523405E-02,1.1519367E-02,
1.1515397E-02,1.1511493E-02,1.1507652E-02,1.1503873E-02,1.1500154E-02,1.1496493E-02,
1.1492889E-02,1.1489338E-02,1.1485841E-02,1.1482395E-02,1.1478999E-02,1.1475651E-02,
1.1472351E-02,1.1469096E-02,1.1465886E-02,1.1462720E-02,1.1459595E-02,1.1456512E-02,
1.1453469E-02,1.1450465E-02,1.1447499E-02,1.1444570E-02,1.1441677E-02,1.1438820E-02,
1.1435997E-02,1.1433208E-02,1.1430452E-02,1.1427728E-02,1.1425036E-02,1.1422374E-02,
1.1419742E-02,1.1417139E-02,1.1414566E-02,1.1412020E-02,1.1409502E-02,1.1407011E-02,
1.1404546E-02,1.1402107E-02,1.1399693E-02,1.1397304E-02,1.1394939E-02,1.1392598E-02,
1.1390281E-02,1.1387986E-02,1.1385713E-02,1.1383463E-02,1.1381234E-02,1.1379026E-02,
1.1376840E-02,1.1374673E-02,1.1372527E-02,1.1370400E-02,1.1368292E-02,1.1366204E-02,
1.1364134E-02,1.1362082E-02,1.1360048E-02,1.1358032E-02,1.1356033E-02,1.1354052E-02,
1.1352087E-02,1.1350139E-02,1.1348207E-02,1.1346291E-02,1.1344390E-02,1.1342505E-02,
1.1340635E-02,1.1338781E-02,1.1336941E-02,1.1335115E-02,1.1333304E-02,1.1331507E-02,
1.1329723E-02,1.1327954E-02,1.1326197E-02,1.1324454E-02,1.1322724E-02,1.1321007E-02,
1.1319303E-02,1.1317611E-02,1.1315931E-02,1.1314263E-02,1.1312608E-02,1.1310964E-02,
1.1309332E-02,1.1307711E-02,1.1306101E-02,1.1304503E-02,1.1302915E-02,1.1301339E-02,
1.1299773E-02,1.1298218E-02,1.1296673E-02,1.1295138E-02,1.1293614E-02,1.1292099E-02,
1.1290594E-02,1.1289100E-02,1.1287614E-02,1.1286139E-02,1.1284672E-02,1.1283215E-02,
1.1281767E-02,1.1280328E-02,1.1278898E-02,1.1277477E-02,1.1276065E-02,1.1274661E-02]
expected_result_npts = [305]
max_dist = 997.3632
dist_inc = 6.56
num_pts_ext = 16
ln_ln_trans = False #using the relative ln ln transformation in this test
agdrift_empty.meters_per_ft = 0.3048
x_array_in = pd.Series([0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632])
y_array_in = pd.Series([0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457 ,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741])
x_array_out, y_array_out = agdrift_empty.extend_curve_opp(x_array_in, y_array_in, max_dist, dist_inc, num_pts_ext,
ln_ln_trans)
npts_out = [len(y_array_out)]
#
#agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "extend_data.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts[0], npts_out[0]))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_extend_curve_opp1(self):
"""
:description extends/extrapolates an x,y array of data points that reflect a ln ln relationship by selecting
a number of points near the end of the x,y arrays and fitting a line to the points
ln ln transforms (two ln ln transforms can by applied; on using the straight natural log of
each selected x,y point and one using a 'relative' value of each of the selected points --
the relative values are calculated by establishing a zero point closest to the selected
points
For AGDRIFT: extends distance vs deposition (fraction of applied) curve to enable model calculations
when area of interest (pond, wetland, terrestrial field) lie partially outside the original
curve (whose extent is 997 feet). The extension is achieved by fitting a line of best fit
to the last 16 points of the original curve. The x,y values representing the last 16 points
are natural log transforms of the distance and deposition values at the 16 points. Two long
transforms are coded here, reflecting the fact that the AGDRIFT model (v2.1.1) uses each of them
under different circumstandes (which I believe is not the intention but is the way the model
functions -- my guess is that one of the transforms was used and then a second one was coded
to increase the degree of conservativeness -- but the code was changed in only one of the two
places where the transformation occurs.
Finally, the AGDRIFT model extends the curve only when necessary (i.e., when it determines that
the area of intereest lies partially beyond the last point of the origanal curve (997 ft). In
this code all the curves are extended out to 1994 ft, which represents the furthest distance that
the downwind edge of an area of concern can be specified. All scenario curves are extended here
because we are running multiple simulations (e.g., monte carlo) and instead of extending the
curves each time a simulation requires it (which may be multiple time for the same scenario
curve) we just do it for all curves up front. There is a case to be made that the
curves should be extended external to this code and simply provide the full curve in the SQLite
database containing the original curve.
:param x_array: array of x values to be extended (must be at least 17 data points in original array)
:param y_array: array of y values to be extended
:param max_dist: maximum distance (ft) associated with unextended x values
:param dist_inc: increment (ft) for each extended data point
:param num_pts_ext: number of points at end of original x,y arrays to be used for extending the curve
:param ln_ln_trans: form of transformation to perform (True: straight ln ln, False: relative ln ln)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
# x_array_in = pd.Series([], dtype='float')
# y_array_in = pd.Series([], dtype='float')
x_array_out = | pd.Series([], dtype='float') | pandas.Series |
__all__ = [
"read_clock_paramaters",
"read_weather_inputs",
"read_model_parameters",
"read_irrigation_management",
"read_field_management",
"read_groundwater_table",
"compute_variables",
"compute_crop_calander",
"calculate_HIGC",
"calculate_HI_linear",
"read_model_initial_conditions",
"create_soil_profile",
]
# Cell
import numpy as np
import os
import pandas as pd
from .classes import *
import pathlib
from copy import deepcopy
import aquacrop
# Cell
def read_clock_paramaters(SimStartTime, SimEndTime, OffSeason=False):
"""
function to read in start and end simulaiton time and return a `ClockStructClass` object
*Arguments:*\n
`SimStartTime` : `str`: simulation start date
`SimEndTime` : `str` : simulation start date
`OffSeason` : `bool` : simulate off season true, false
*Returns:*
`ClockStruct` : `ClockStructClass` : time paramaters
"""
# extract data and put into numpy datetime format
SimStartTime = pd.to_datetime(SimStartTime)
SimEndTime = pd.to_datetime(SimEndTime)
# create object
ClockStruct = ClockStructClass()
# add variables
ClockStruct.SimulationStartDate = SimStartTime
ClockStruct.SimulationEndDate = SimEndTime
ClockStruct.nSteps = (SimEndTime - SimStartTime).days + 1
ClockStruct.TimeSpan = pd.date_range(freq="D", start=SimStartTime, end=SimEndTime)
ClockStruct.StepStartTime = ClockStruct.TimeSpan[0]
ClockStruct.StepEndTime = ClockStruct.TimeSpan[1]
ClockStruct.SimOffSeason = OffSeason
return ClockStruct
# Cell
def read_weather_inputs(ClockStruct, weather_df):
"""
clip weather to start and end simulation dates
*Arguments:*\n
`ClockStruct` : `ClockStructClass` : time paramaters
`weather_df` : `pd.DataFrame` : weather data
*Returns:*
`weather_df` : `pd.DataFrame`: clipped weather data
"""
# get the start and end dates of simulation
start_date = ClockStruct.SimulationStartDate
end_date = ClockStruct.SimulationEndDate
assert weather_df.Date.iloc[0] <= start_date
assert weather_df.Date.iloc[-1] >= end_date
# remove weather data outside of simulation dates
weather_df = weather_df[weather_df.Date >= start_date]
weather_df = weather_df[weather_df.Date <= end_date]
return weather_df
# Cell
def read_model_parameters(ClockStruct, Soil, Crop, weather_df):
"""
Finalise soil and crop paramaters including planting and harvest dates
save to new object ParamStruct
*Arguments:*\n
`ClockStruct` : `ClockStructClass`: time params
`Soil` : `SoilClass` : soil object
`Crop` : `CropClass` : crop object
`planting_dates` : `list` : list of datetimes
`harvest_dates` : `list` : list of datetimes
*Returns:*
`ClockStruct` : `ClockStructClass` : updated time paramaters
`ParamStruct` : `ParamStructClass` : Contains model crop and soil paramaters
"""
# create ParamStruct object
ParamStruct = ParamStructClass()
Soil.fill_nan()
# Assign Soil object to ParamStruct
ParamStruct.Soil = Soil
while Soil.zSoil < Crop.Zmax + 0.1:
for i in Soil.profile.index[::-1]:
if Soil.profile.loc[i, "dz"] < 0.25:
Soil.profile.loc[i, "dz"] += 0.1
Soil.fill_nan()
break
###########
# crop
###########
# if isinstance(Crop, Iterable):
# CropList=list(Crop)
# else:
# CropList = [Crop]
# # assign variables to paramstruct
# ParamStruct.NCrops = len(CropList)
# if ParamStruct.NCrops > 1:
# ParamStruct.SpecifiedPlantCalander = 'Y'
# else:
# ParamStruct.SpecifiedPlantCalander = 'N'
# # add crop list to ParamStruct
# ParamStruct.CropList = CropList
############################
# plant and harvest times
############################
# # find planting and harvest dates
# # check if there is more than 1 crop or multiple plant dates in sim year
# if ParamStruct.SpecifiedPlantCalander == "Y":
# # if here than crop rotation occours during same period
# # create variables from dataframe
# PlantingDates = pd.to_datetime(planting_dates)
# HarvestDates = pd.to_datetime(harvest_dates)
# if (ParamStruct.NCrops > 1):
# CropChoices = [crop.Name for crop in ParamStruct.CropList]
# assert len(CropChoices) == len(PlantingDates) == len(HarvestDates)
# elif ParamStruct.NCrops == 1:
# Only one crop type considered during simulation - i.e. no rotations
# either within or between years
CropList = [Crop]
ParamStruct.CropList = CropList
ParamStruct.NCrops = 1
# Get start and end years for full simulation
SimStartDate = ClockStruct.SimulationStartDate
SimEndDate = ClockStruct.SimulationEndDate
# extract the years and months of these dates
start_end_years = pd.DatetimeIndex([SimStartDate, SimEndDate]).year
start_end_months = pd.DatetimeIndex([SimStartDate, SimEndDate]).month
if Crop.HarvestDate == None:
Crop = compute_crop_calander(Crop, ClockStruct, weather_df)
mature = int(Crop.MaturityCD + 30)
plant = pd.to_datetime("1990/" + Crop.PlantingDate)
harv = plant + np.timedelta64(mature, "D")
new_harvest_date = str(harv.month) + "/" + str(harv.day)
Crop.HarvestDate = new_harvest_date
# check if crop growing season runs over calander year
# Planting and harvest dates are in days/months format so just add arbitrary year
singleYear = pd.to_datetime("1990/" + Crop.PlantingDate) < pd.to_datetime(
"1990/" + Crop.HarvestDate
)
if singleYear:
# if normal year
# specify the planting and harvest years as normal
plant_years = list(range(start_end_years[0], start_end_years[1] + 1))
harvest_years = plant_years
else:
# if it takes over a year then the plant year finishes 1 year before end of sim
# and harvest year starts 1 year after sim start
if pd.to_datetime(str(start_end_years[1] + 2) + "/" + Crop.HarvestDate) < SimEndDate:
# specify shifted planting and harvest years
plant_years = list(range(start_end_years[0], start_end_years[1] + 1))
harvest_years = list(range(start_end_years[0] + 1, start_end_years[1] + 2))
else:
plant_years = list(range(start_end_years[0], start_end_years[1]))
harvest_years = list(range(start_end_years[0] + 1, start_end_years[1] + 1))
# Correct for partial first growing season (may occur when simulating
# off-season soil water balance)
if (
pd.to_datetime(str(plant_years[0]) + "/" + Crop.PlantingDate)
< ClockStruct.SimulationStartDate
):
# shift everything by 1 year
plant_years = plant_years[1:]
harvest_years = harvest_years[1:]
# ensure number of planting and harvest years are the same
assert len(plant_years) == len(harvest_years)
# create lists to hold variables
PlantingDates = []
HarvestDates = []
CropChoices = []
# save full harvest/planting dates and crop choices to lists
for i in range(len(plant_years)):
PlantingDates.append(str(plant_years[i]) + "/" + ParamStruct.CropList[0].PlantingDate)
HarvestDates.append(str(harvest_years[i]) + "/" + ParamStruct.CropList[0].HarvestDate)
CropChoices.append(ParamStruct.CropList[0].Name)
# save crop choices
ParamStruct.CropChoices = list(CropChoices)
# save clock paramaters
ClockStruct.PlantingDates = | pd.to_datetime(PlantingDates) | pandas.to_datetime |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""Read input files and compute dynamic and thermodynamic quantities."""
import datetime
import logging
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Tuple
import attr
import gsd.hoomd
import numpy as np
import pandas
import tqdm
from ..dynamics import Dynamics, Relaxations
from ..frame import Frame, HoomdFrame
from ..molecules import Trimer
from ..util import get_filename_vars
from ._gsd import FileIterator, read_gsd_trajectory
from ._lammps import parse_lammpstrj, read_lammps_trajectory
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True)
class WriteCache:
_filename: Optional[Path] = None
group: str = "dynamics"
cache_multiplier: int = 1
to_append: bool = False
_cache: List[Any] = attr.ib(default=attr.Factory(list), init=False)
_cache_default: int = attr.ib(default=8192, init=False)
_emptied_count: int = attr.ib(default=0, init=False)
def __attr_post_init__(self):
if self.group is None:
raise ValueError("Group can not be None.")
@property
def _cache_size(self) -> int:
return self.cache_multiplier * self._cache_default
def append(self, item: Any) -> None:
# Cache of size 0 or with val None will act as list
if self._cache and len(self._cache) == self._cache_size:
self.flush()
self._emptied_count += 1
self._cache.append(item)
def _flush_file(self, df) -> None:
assert self.filename is not None
assert self.group is not None
df.to_hdf(self.filename, self.group, format="table", append=self.to_append)
self.to_append = True
def flush(self) -> None:
df = self.to_dataframe()
self._flush_file(df)
self._cache.clear()
@property
def filename(self) -> Optional[Path]:
if self._filename is not None:
return Path(self._filename)
return None
def __len__(self) -> int:
# Total number of elements added
return self._cache_size * self._emptied_count + len(self._cache)
def to_dataframe(self):
return | pandas.DataFrame.from_records(self._cache) | pandas.DataFrame.from_records |
__author__ = "<NAME>"
__license__ = "Apache 2"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__website__ = "https://llp.berkeley.edu/asgari/"
__git__ = "https://github.com/ehsanasgari/"
__email__ = "<EMAIL>"
__project__ = "1000Langs -- Super parallel project at CIS LMU"
import sys
import pandas as pd
sys.path.append('../')
from utility.file_utility import FileUtility
import requests
import codecs
import json
from multiprocessing import Pool
import tqdm
import collections
from pandas import Series
class BDPAPl(object):
'''
PBC retrieving from the bible digital platform
'''
def __init__(self, key, output_path):
'''
Constructor
'''
# set the parameters
self.key = key
self.output_path = output_path
FileUtility.ensure_dir(self.output_path + '/api_intermediate/')
FileUtility.ensure_dir(self.output_path + '/reports/')
self.to_double_check=list()
# check the API connection
response = requests.get('https://dbt.io/api/apiversion?key=' + self.key + '&v=2')
if response.status_code != 200:
print('Enter a correct API code')
return False
else:
response = json.loads(response.content)
print('Connected successfully to the bible digital platform v ' + response['Version'])
self.load_book_map()
def create_BPC(self, nump=20,update_meta_data=False, override=False, repeat=4):
'''
Creating PBC
'''
# update metadata file through api call
if update_meta_data:
self.update_meta_data()
# read the metadata file and create the dataframe
for line in codecs.open('../meta/api_volumes.txt','r','utf-8'):
books=json.loads(line)
books_filtered=([x for x in books if x['media']=='text'])
df=pd.DataFrame(books_filtered)
df['version'] = df[['version_code','volume_name']].apply(lambda x: ' # '.join(x), axis=1)
df['trans_ID']=df['fcbh_id'].str[0:6]
self.df=df[['language_iso','trans_ID','fcbh_id','language_english','language_name','version']]
# bible retrieval
self.id2iso_dict = | Series(self.df['language_iso'].values, index=self.df['trans_ID']) | pandas.Series |
import warnings
from typing import Union
import numpy as np
import pandas as pd
from ai4water.utils.utils import dateandtime_now, jsonize, deepcopy_dict_without_clone
from ._transformations import MinMaxScaler, PowerTransformer, QuantileTransformer, StandardScaler
from ._transformations import LogScaler, Log10Scaler, Log2Scaler, TanScaler, SqrtScaler, CumsumScaler
from ._transformations import FunctionTransformer, RobustScaler, MaxAbsScaler
from ._transformations import Center
from .utils import InvalidTransformation
# TODO add logistic, tanh and more scalers.
# which transformation to use? Some related articles/posts
# https://scikit-learn.org/stable/modules/preprocessing.html
# http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-16.html
# https://data.library.virginia.edu/interpreting-log-transformations-in-a-linear-model/
class TransformationsContainer(object):
def __init__(self):
self.scalers = {}
self.transforming_straight = True
# self.nan_indices = None
self.index = None
class Transformation(TransformationsContainer):
"""
Applies transformation to tabular data.
Any new transforming methods should define two methods one starting with
`transform_with_` and `inverse_transofrm_with_`
https://developers.google.com/machine-learning/data-prep/transform/normalization
Currently following methods are available for transformation and inverse transformation
Methods
-------
- `minmax`
- `maxabs`
- `robust`
- `power` same as yeo-johnson
- `yeo-johnson`
- `box-cox`
- `zscore` also known as standard scalers
- `scale` division by standard deviation
- 'center' by subtracting mean
- `quantile`
- `log` natural logrithmic
- `log10` log with base 10
- `log2` log with base 2
- `sqrt` square root
- `tan` tangent
- `cumsum` cummulative sum
To transform a datafrmae using any of the above methods use
Examples:
>>> transformer = Transformation(method='zscore')
>>> transformer.fit_transform(data=[1,2,3,5])
or
>>> transformer = Transformation(method='minmax')
>>> normalized_df = transformer.fit_transform_with_minmax(data=pd.DataFrame([1,2,3]))
>>> transformer = Transformation(method='minmax')
>>> normalized_df, scaler_dict = transformer(data=pd.DataFrame([1,2,3]))
or using one liner
>>> normalized_df = Transformation(method='minmax',
... features=['a'])(data=pd.DataFrame([[1,2],[3,4], [5,6]],
... columns=['a', 'b']))
where `method` can be any of the above mentioned methods.
Note:
------
`tan` and `cumsum` do not return original data upon inverse transformation.
"""
available_transformers = {
"minmax": MinMaxScaler,
"zscore": StandardScaler,
"center": Center,
"scale": StandardScaler,
"robust": RobustScaler,
"maxabs": MaxAbsScaler,
"power": PowerTransformer,
"yeo-johnson": PowerTransformer,
"box-cox": PowerTransformer,
"quantile": QuantileTransformer,
"log": LogScaler,
"log10": Log10Scaler,
"log2": Log2Scaler,
"sqrt": SqrtScaler,
"tan": TanScaler,
"cumsum": CumsumScaler
}
def __init__(self,
method: str = 'minmax',
features: list = None,
replace_zeros: bool = False,
replace_zeros_with: Union[str, int, float] = 1,
treat_negatives: bool = False,
**kwargs
):
"""
Arguments:
method : method by which to transform and consequencly inversely
transform the data. default is 'minmax'. see `Transformations.available_transformers`
for full list.
features : string or list of strings. Only applicable if `data` is
dataframe. It defines the columns on which we want to apply transformation.
The remaining columns will remain same/unchanged.
replace_zeros : If true, then setting this argument to True will replace
the zero values in data with some fixed value `replace_zeros_with`
before transformation. The zero values will be put back at their
places after transformation so this replacement/implacement is
done only to avoid error during transformation for example during Box-Cox.
replace_zeros_with : if replace_zeros is True, then this value will be used
to replace zeros in dataframe before doing transformation. You can
define the method with which to replace nans for exaple by setting
this argument to 'mean' will replace zeros with 'mean' of the
array/column which contains zeros. Allowed string values are
'mean', 'max', 'min'. see https://stats.stackexchange.com/a/222237/338323
treat_negatives:
If true, and if data contains negative values, then the absolute
values of these negative values will be considered for transformation.
For inverse transformation, the -ve sign is removed, to return the
original data. This option is necessary for log, sqrt and box-cox
transformations with -ve values in data.
kwargs : any arguments which are to be provided to transformer on
INTIALIZATION and not during transform or inverse transform
Example:
>>> from ai4water.preprocessing.transformations import Transformation
>>> from ai4water.datasets import busan_beach
>>> df = busan_beach()
>>> inputs = ['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm', 'pcp3_mm']
>>> transformer = Transformation(method='minmax', features=['sal_psu', 'air_temp_c'])
>>> new_data = transformer.fit_transform(df[inputs])
Following shows how to apply log transformation on an array containing zeros
by making use of the argument `replace_zeros`. The zeros in the input array
will be replaced internally but will be inserted back afterwards.
>>> from ai4water.preprocessing.transformations import Transformation
>>> transformer = Transformation(method='log', replace_zeros=True)
>>> transformed_data = transformer.fit_transform([1,2,3,0.0, 5, np.nan, 7])
... [0.0, 0.6931, 1.0986, 0.0, 1.609, None, 1.9459]
>>> original_data = transformer.inverse_transform(data=transformed_data)
"""
super().__init__()
if method not in self.available_transformers.keys():
raise InvalidTransformation(method, list(self.available_transformers.keys()))
self.method = method
self.replace_zeros = replace_zeros
self.replace_zeros_with = replace_zeros_with
self.treat_negatives = treat_negatives
self.features = features
self.kwargs = kwargs
self.transformed_features = None
def __call__(self, data, what="fit_transform", return_key=False, **kwargs):
"""
Calls the `fit_transform` and `inverse_transform` methods.
"""
if what.startswith("fit"):
self.transforming_straight = True
return self.fit_transform(data, return_key=return_key, **kwargs)
elif what.startswith("inv"):
self.transforming_straight = False
return self.inverse_transform(data, **kwargs)
else:
raise ValueError(f"The class Transformation can not be called with keyword argument 'what'={what}")
def __getattr__(self, item):
"""
Gets the attributes from underlying transformation modules.
"""
if item.startswith('_'):
return self.__getattribute__(item)
elif item.startswith("fit_transform_with"):
transformer = item.split('_')[-1]
if transformer.lower() in list(self.available_transformers.keys()):
self.method = transformer
return self.fit_transform_with_sklearn
elif item.startswith("inverse_transform_with"):
transformer = item.split('_')[-1]
if transformer.lower() in list(self.available_transformers.keys()):
self.method = transformer
return self.inverse_transform_with_sklearn
else:
raise AttributeError(f'Transformation has no attribute {item}')
@property
def data(self):
return self._data
@data.setter
def data(self, x):
if isinstance(x, pd.DataFrame):
self._data = x
else:
assert isinstance(x, np.ndarray)
xdf = pd.DataFrame(x, columns=['data'+str(i) for i in range(x.shape[1])])
self._data = xdf
@property
def features(self):
return self._features
@features.setter
def features(self, x):
if x is not None:
assert len(x) == len(set(x)), f"duplicated features are not allowed. Features are: {x}"
self._features = x
@property
def transformed_features(self):
return self._transformed_features
@transformed_features.setter
def transformed_features(self, x):
self._transformed_features = x
@property
def num_features(self):
return len(self.features)
def get_scaler(self):
return self.available_transformers[self.method.lower()]
def pre_process_data(self, data):
"""Makes sure that data is dataframe and optionally replaces nans"""
data = to_dataframe(data)
# save the index if not already saved so that can be used later
if self.index is None:
self.index = data.index
indices = {}
if self.replace_zeros and self.transforming_straight:
# instead of saving indices with column names, using column indices
# because df.iloc[row_idx, col_idx] is better than df[col_name].iloc[row_idx]
for col_idx, col in enumerate(data.columns):
# find index containing 0s in corrent column of dataframe
i = data.index[data[col] == 0.0]
if len(i) > 0:
indices[col_idx] = i.values
if self.replace_zeros_with in ['mean', 'max', 'min']:
replace_with = float(getattr(np, 'nan' + self.replace_zeros_with)(data[col]))
else:
replace_with = self.replace_zeros_with
data.iloc[indices[col_idx], col_idx] = get_val(data[col], replace_with)
#if self.zero_indices is None:
self.zero_indices_ = indices
indices = {}
if self.treat_negatives:
for col_idx, col in enumerate(data.columns):
# find index containing negatives in corrent column of dataframe
i = data.index[data[col] < 0.0]
if len(i) > 0:
indices[col_idx] = i.values
# turn -ve values into positives
data[col] = data[col].abs()
self.negative_indices_ = indices
return data
def post_process_data(self, data):
"""If nans/zeros were replaced with some value, put nans/zeros back."""
data = data.copy()
if self.replace_zeros:
if hasattr(self, 'zero_indices_'):
for col, idx in self.zero_indices_.items():
data.iloc[idx, col] = 0.0
if self.treat_negatives:
if hasattr(self, 'negative_indices_'):
for col, idx in self.negative_indices_.items():
# invert the sign of those values which were originally -ve
data.iloc[idx, col] = -data.iloc[idx, col]
return data
def fit_transform_with_sklearn(
self,
data:Union[pd.DataFrame, np.ndarray],
return_key=False,
**kwargs):
original_data = data.copy()
to_transform = self.get_features(data) # TODO, shouldn't kwargs go here as input?
if self.method.lower() in ["log", "log10", "log2"]:
if (to_transform.values < 0).any():
raise InvalidValueError(self.method, "negative")
_kwargs = {}
if self.method == "scale":
_kwargs['with_mean'] = False
elif self.method in ['power', 'yeo-johnson', 'box-cox']:
# a = np.array([87.52, 89.41, 89.4, 89.23, 89.92], dtype=np.float32).reshape(-1,1)
# power transformers sometimes overflow with small data which causes inf error
to_transform = to_transform.astype("float64")
if self.method == "box-cox":
_kwargs['method'] = "box-cox"
for k,v in self.kwargs.items():
if k in _kwargs:
_kwargs.pop(k)
scaler = self.get_scaler()(**_kwargs, **self.kwargs)
data = scaler.fit_transform(to_transform, **kwargs)
data = pd.DataFrame(data, columns=to_transform.columns)
scaler = self.serialize_scaler(scaler, to_transform)
data = self.maybe_insert_features(original_data, data)
data = self.post_process_data(data)
self.tr_data = data
if return_key:
return data, scaler
return data
def inverse_transform_with_sklearn(self, data, **kwargs):
self.transforming_straight = False
scaler = self.get_scaler_from_dict(**kwargs)
original_data = data.copy()
to_transform = self.get_features(data)
data = scaler.inverse_transform(to_transform)
data = | pd.DataFrame(data, columns=to_transform.columns) | pandas.DataFrame |
"""This module defines functions to read/load experimental data used for training and analysis.
"""
import os
import numpy as np
import pandas as pd
PATH = os.path.dirname(os.path.abspath(__file__))
def training_data():
"""Loads the FRET signals at different 2AT concentrations used for model training.
Returns:
pandas.DataFrame
"""
return kang2019_fig_s3c()
def kang2019_fig_s3c():
"""Loads the FRET signals at different 2AT concentrations from Fig. S3C of Kang et al. 2019.
Note that the data has been clipped and the times shifted so that the point
where 2AT was added (180 s) is now the zero timepoint.
Returns:
pandas.DataFrame
"""
fpath = os.path.abspath(os.path.join(PATH, '../exp_data/FRET_data_Kang2019_FigS3C.csv'))
df = pd.read_csv(fpath)
return df
def kang2019_fig_2d():
"""Loads the FRET signals from Fig. 2D of Kang et al. 2019.
Returns:
pandas.DataFrame
"""
fpath = os.path.abspath(os.path.join(PATH, '../exp_data/FRET_data_Kang2019_Fig2D.csv'))
df = pd.read_csv(fpath)
return df
def kang2019_fig_2f_raw():
"""Loads the raw FRET signals corresponding to Fig. 2F of Kang et al. 2019.
Returns:
pandas.DataFrame
"""
fpath = os.path.abspath(os.path.join(PATH, '../exp_data/raw_FRETratio_data_Kang2019_Fig2F.csv'))
df = | pd.read_csv(fpath, dtype={'LaserEnergy':np.int, 'PulseNumber':np.int}) | pandas.read_csv |
import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from loguru import logger
from torch import Tensor, nn
from torch.optim import Adam
from datetime import datetime as dt
sys.path.insert(0, f'{os.path.join(os.path.dirname(__file__), "../")}')
from model.model import AttentionOCR
from model.cnn import CNN, ResNetCNN
from torch.utils.data import DataLoader
from utils.dataset import get_dataloader
from typing import Dict, Union, List, Optional
from nltk.translate.bleu_score import sentence_bleu
loss = nn.NLLLoss()
def train_epoch(dl: DataLoader, model: nn.Module, optim, device: str) -> float:
model.train()
batches = tqdm(dl)
losses = []
for b in batches:
for k in b:
b[k] = b[k].to(device)
optim.zero_grad()
pred = model(b)
curr_loss = loss(pred, b['tokens'].squeeze())
curr_loss.backward()
optim.step()
losses.append(curr_loss.cpu().item())
batches.set_description(
f'Train epoch. Current CCE Loss: {losses[-1]}. ')
return np.mean(losses)
@torch.no_grad()
def validate_epoch(dl: DataLoader, model: nn.Module, device: str) -> Dict[str, float]:
model.eval()
batches = tqdm(dl)
losses = []
bleu_scores = []
for b in batches:
for k in b:
b[k] = b[k].to(device)
pred = model(b)
curr_loss = loss(pred, b['tokens'].squeeze()).cpu().item()
pred_tokens = torch.argmax(pred, 1).detach().cpu().numpy()
true_tokens = b['tokens'].squeeze().cpu().numpy()
bleu = sentence_bleu([true_tokens], pred_tokens, weights=(1,))
losses.append(curr_loss)
bleu_scores.append(bleu)
batches.set_description(
f'Validation epoch. Current CCE Loss: {losses[-1]}. Current BLEU: {bleu_scores[-1]}. ')
metrics = {
'bleu': np.mean(bleu_scores),
'loss': np.mean(losses)
}
return metrics
def fit_model(
train_path: str,
eval_path: str,
image_dir: str,
formulas_path: str,
vocab_path: str,
device: Union['cpu', 'cuda'] = 'cpu',
n_epochs: int = 12,
lr: float = 1e-4,
save_dir: Optional[str] = None,
cnn_type: Union[ResNetCNN, CNN] = ResNetCNN
) -> pd.DataFrame:
log_file = ''.join(
['train_', dt.now().strftime('%Y-%m-%dT%H:%M:%S'), '.log'])
log_path = os.path.join('./', 'logs', log_file)
if save_dir is None:
save_dir = os.path.join('./', 'params/')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logger.add(log_path)
logger.info('Loading train dataset')
train_dl, vocab = get_dataloader(data_path=train_path,
image_dir=image_dir,
formulas_path=formulas_path,
vocab_path=vocab_path)
logger.info('Loading validation dataset')
eval_dl, _ = get_dataloader(data_path=eval_path,
image_dir=image_dir,
formulas_path=formulas_path,
vocab_path=vocab_path)
logger.info('Loading model')
model = AttentionOCR(len(vocab), device, cnn_type=cnn_type)
optim = torch.optim.Adam(model.parameters(), lr=lr)
metrics = []
logger.info(f'Start fitting {n_epochs} epochs on {len(train_dl)} objects')
for epoch in range(1, n_epochs):
logger.info(f'Start {epoch} epoch of {n_epochs}')
train_loss = train_epoch(train_dl, model, optim, device)
logger.info(f'Train epoch {epoch}. Mean loss is {train_loss}')
eval_metrics = validate_epoch(eval_dl, model, device)
logger.info(
f'Validation epoch {epoch}. Mean loss is {eval_metrics["loss"]}')
logger.info(
f'Validation epoch {epoch}. Mean bleu is {eval_metrics["bleu"]}')
metrics.append(eval_metrics)
model_name = f'{round(eval_metrics["bleu"], 3)}_{dt.now().strftime("%m-%d")}'
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
logger.info(f'Model saved at {model_path}')
logger.info(f'End fitting on {n_epochs} epochs')
return | pd.DataFrame(metrics) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 14:26:07 2019
@author: mh2210
"""
# python dnabot\dnabot_app.py nogui --construct_path \\icnas1.cc.ic.ac.uk\ljh119\GitHub\DNA-BOT\examples\construct_csvs\storch_et_al_cons\storch_et_al_cons.csv --source_paths \\icnas1.cc.ic.ac.uk\ljh119\GitHub\DNA-BOT\examples\part_linker_csvs\BIOLEGIO_BASIC_STD_SET.csv \\icnas1.cc.ic.ac.uk\ljh119\GitHub\DNA-BOT\examples\part_linker_csvs\part_plate_2_230419.csv
# python3 dnabot/dnabot_app.py nogui --construct_path /Users/liamhallett/Documents/GitHub/DNA-BOT/examples/construct_csvs/storch_et_al_cons/storch_et_al_cons.csv --source_paths /Users/liamhallett/Documents/GitHub/DNA-BOT/examples/part_linker_csvs/BIOLEGIO_BASIC_STD_SET.csv /Users/liamhallett/Documents/GitHub/DNA-BOT/examples/part_linker_csvs/part_plate_2_230419.csv
print("\nINITIALISING>>>")
import os
import csv
import argparse
import pandas as pd
import numpy as np
import json
import sys
import dnabot_gui as gui
import tkinter as tk
import mplates
# Constant str
TEMPLATE_DIR_NAME = 'template_ot2_scripts'
CLIP_TEMP_FNAME = 'clip_template.py'
MAGBEAD_TEMP_FNAME = 'purification_template.py'
F_ASSEMBLY_TEMP_FNAME = 'assembly_template.py'
TRANS_SPOT_TEMP_FNAME = 'transformation_template.py'
CLIP_FNAME = '1_clip.ot2.py'
MAGBEAD_FNAME = '2_purification.ot2.py'
F_ASSEMBLY_FNAME = '3_assembly.ot2.py'
TRANS_SPOT_FNAME = '4_transformation.ot2.py'
CLIPS_INFO_FNAME = 'clip_run_info.csv'
FINAL_ASSEMBLIES_INFO_FNAME = 'final_assembly_run_info.csv'
WELL_OUTPUT_FNAME = 'wells.txt'
# Constant floats/ints
CLIP_DEAD_VOL = 60
CLIP_VOL = 30
T4_BUFF_VOL = 3
BSAI_VOL = 1
T4_LIG_VOL = 0.5
CLIP_MAST_WATER = 15.5
PART_PER_CLIP = 200
MIN_VOL = 1
MAX_CONSTRUCTS = 96
MAX_CLIPS = 48
FINAL_ASSEMBLIES_PER_CLIP = 15
DEFAULT_PART_VOL = 1
MAX_SOURCE_PLATES = 6
MAX_FINAL_ASSEMBLY_TIPRACKS = 7
# Constant dicts
SPOTTING_VOLS_DICT = {2: 5, 3: 5, 4: 5, 5: 5, 6: 5, 7: 5}
# Constant lists
SOURCE_DECK_POS = ['2', '5', '8', '7', '10', '11'] # deck positions available for source plates (only pos 2 and 5 are normally used)
def __cli():
"""Command line interface.
:returns: CLI arguments
:rtype: <argparse.Namespace>
"""
desc = "DNA assembly using BASIC on OpenTrons."
parser = argparse.ArgumentParser(description=desc) # creates an argparse object that allows for command line interfacing - https://docs.python.org/3/howto/argparse.html#id1
# Specific options for collecting settings from command line
subparsers = parser.add_subparsers(help='Optional, to define settings from the terminal instead of the graphical '
'interface. Type "python dnabot_app.py nogui -h" for more info.')
parser_nogui = subparsers.add_parser('nogui') # adds a new sub-parser called nogui for parsing variables directly from the command line
parser_nogui.add_argument('--construct_path', help='Construct CSV file.', required=True)
parser_nogui.add_argument('--source_paths', help='Source CSV files.', nargs='+', required=True)
parser_nogui.add_argument('--etoh_well', help='Well coordinate for Ethanol. Default: A11', default='A11', type=str)
parser_nogui.add_argument('--soc_column', help='Column coordinate for SOC. Default: 1', default=1, type=int)
parser_nogui.add_argument('--output_dir',
help='Output directory. Default: same directory than the one containing the '
'"construct_path" file',
default=None, type=str or None)
parser_nogui.add_argument('--template_dir',
help='Template directory. Default: "template_ot2_scripts" located next to the present '
'script.',
default=None, type=str or None)
# Makes life easier to decide if we should switch to GUI or not
parser.set_defaults(nogui=False)
parser_nogui.set_defaults(nogui=True)
return parser.parse_args()
def __info_from_gui():
"""Pop GUI to collect user inputs.
:returns user_inputs: info collected
:rtype: dict
"""
user_inputs = {
'construct_path': None,
'sources_paths': None,
'etoh_well': None,
'soc_column': None
}
# Obtain user input
print("Requesting user input, if not visible checked minimized windows.")
root = tk.Tk()
dnabotinst = gui.DnabotApp(root)
root.mainloop()
root.destroy()
if dnabotinst.quit_status:
sys.exit("User specified 'QUIT' during app.")
# etoh_well and soc_column are silently collected by the gui
user_inputs['etoh_well'] = dnabotinst.etoh_well
user_inputs['soc_column'] = dnabotinst.soc_column
# construct file path
root = tk.Tk()
user_inputs['construct_path'] = gui.UserDefinedPaths(root, 'Construct csv file').output
root.destroy()
# part & linker file paths
root = tk.Tk()
user_inputs['sources_paths'] = gui.UserDefinedPaths(root, 'Sources csv files', multiple_files=True).output
root.destroy()
return user_inputs
def main():
# Settings
args = __cli()
if args.nogui: # input args using argparse
etoh_well = args.etoh_well
soc_column = args.soc_column
construct_path = args.construct_path
sources_paths = args.source_paths
if args.output_dir == None:
output_dir = os.path.dirname(construct_path)
else:
output_dir = args.output_dir
template_dir = args.template_dir
print("\netoh_well ", etoh_well)
print("\nsoc_column ", soc_column)
print("\nconstruct_path ", construct_path)
print("\nsources_paths ", sources_paths)
print("\noutput_dir ", output_dir)
print("\ntemplate_dir", template_dir)
else: # input args from gui
user_inputs = __info_from_gui()
etoh_well = user_inputs['etoh_well']
soc_column = user_inputs['soc_column']
construct_path = user_inputs['construct_path']
sources_paths = user_inputs['sources_paths']
output_dir = os.path.dirname(construct_path)
template_dir = None
# Args checking
if len(sources_paths) > len(SOURCE_DECK_POS):
raise ValueError('Number of source plates exceeds deck positions.')
# Path to template directory
if template_dir is not None:
# Just to comment this case: only way to fall here is that the variable has been set throught the command
# line arguments, nothing to do.
template_dir_path = template_dir
pass
elif __name__ == '__main__':
# Alternatively, try to automatically deduce the path relatively to the main script path
script_path = os.path.abspath(__file__)
template_dir_path = os.path.abspath(os.path.join(script_path, '..', TEMPLATE_DIR_NAME))
else:
# Fallback
generator_dir = os.getcwd()
template_dir_path = os.path.abspath(os.path.join(generator_dir, TEMPLATE_DIR_NAME))
# Dealing with output dir
if not os.path.exists(output_dir):
os.makedirs(output_dir) # make a directory called output_dir
os.chdir(output_dir) # change to that directory
# Prefix name
construct_base = os.path.basename(construct_path)
construct_base = os.path.splitext(construct_base)[0] # returns the constructs csv path without the file name
print('User input successfully collected.')
# Process input csv files
print('Processing input csv files...')
constructs_list = generate_constructs_list(construct_path) # returns a list of pandas dfs, each containing the clip reactions required for a given construct
clips_df = generate_clips_df(constructs_list) # generates df of unique clips, numbers required of each, and mag well location
sources_dict = generate_sources_dict(sources_paths) # generates dict for sources with well location, conc, and pos location
# calculate OT2 script variables
print('Calculating OT-2 variables...')
clips_dict = generate_clips_dict(clips_df, sources_dict) # returns a dictionary with prefix wells, prefix plates, suffix wells, suffix plates, parts wells, parts plates, parts vols, water vols
magbead_sample_number = clips_df['number'].sum() # total number of mag bead purifications required
final_assembly_dict = generate_final_assembly_dict(constructs_list, # returns a dictionary with assembly wells as keys and clip wells as values
clips_df)
final_assembly_tipracks = calculate_final_assembly_tipracks( # returns total number of tipracks required
final_assembly_dict)
spotting_tuples = generate_spotting_tuples(constructs_list, # returns a list of spotting tuples with the assembly well twice and the vol required - this means that the assemblies are spotted twice
SPOTTING_VOLS_DICT)
print('Writing files...')
# Write OT2 scripts
generate_ot2_script(CLIP_FNAME, os.path.join( # write clips script using clips_dict
template_dir_path, CLIP_TEMP_FNAME), clips_dict=clips_dict)
generate_ot2_script(MAGBEAD_FNAME, os.path.join( # write magbead purification script using magbead_sample_number and etoh_well
template_dir_path, MAGBEAD_TEMP_FNAME),
sample_number=magbead_sample_number,
ethanol_well=etoh_well)
generate_ot2_script(F_ASSEMBLY_FNAME, os.path.join( # write assembly script using final_assembly_dict and final_assembly_tipracks
template_dir_path, F_ASSEMBLY_TEMP_FNAME),
final_assembly_dict=final_assembly_dict,
tiprack_num=final_assembly_tipracks)
generate_ot2_script(TRANS_SPOT_FNAME, os.path.join( # write spotting script using spotting_tuples and SOC column
template_dir_path, TRANS_SPOT_TEMP_FNAME),
spotting_tuples=spotting_tuples,
soc_well=f"A{soc_column}")
# Write non-OT2 scripts
if 'metainformation' in os.listdir():
pass
else:
os.makedirs('metainformation') # make a new folder in the directory called 'metainformation'
os.chdir('metainformation')
master_mix_df = generate_master_mix_df(clips_df['number'].sum()) # return master mix for total number of clips needed
sources_paths_df = generate_sources_paths_df(sources_paths, SOURCE_DECK_POS)
dfs_to_csv(construct_base + '_' + CLIPS_INFO_FNAME, index=False,
MASTER_MIX=master_mix_df, SOURCE_PLATES=sources_paths_df,
CLIP_REACTIONS=clips_df)
with open(construct_base + '_' + FINAL_ASSEMBLIES_INFO_FNAME, # write a new csv for FINAL_ASSEMBLIES_INFO_FNAME
'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
for final_assembly_well, construct_clips in final_assembly_dict.items(): # add in assembly wells and construct clips
csvwriter.writerow([final_assembly_well, construct_clips])
with open(construct_base + '_' + WELL_OUTPUT_FNAME, 'w') as f: # return text document with magbead ethanol well and SOC well
f.write('Magbead ethanol well: {}'.format(etoh_well))
f.write('\n')
f.write('SOC column: {}'.format(soc_column))
print('BOT-2 generator successfully completed!')
def generate_constructs_list(path):
"""Generates a list of dataframes corresponding to each construct. Each
dataframe lists components of the CLIP reactions required.
"""
def process_construct(construct):
"""Processes an individual construct into a dataframe of CLIP reactions
outlining prefix linkers, parts and suffix linkers.
"""
def interogate_linker(linker):
"""Interrogates linker to determine if the suffix linker is a UTR
linker.
"""
if linker.startswith('U'):
return linker.split('-')[0] + '-S'
else:
return linker + "-S"
clips_info = {'prefixes': [], 'parts': [],
'suffixes': []}
for i, sequence in enumerate(construct):
if i % 2 != 0: # for odd values (ie non-linkers)
clips_info['parts'].append(sequence) # add name to clip_info dict parts array
clips_info['prefixes'].append(
construct[i - 1] + '-P') # add name of previous name to clip_info dict linker prefixes array
if i == len(construct) - 1: # if the part is the final part in the construct...
suffix_linker = interogate_linker(construct[0]) # ...find the suffix linker name from the first linker in the construct
clips_info['suffixes'].append(suffix_linker) # add the suffix linker to the clips_info dict
else:
suffix_linker = interogate_linker(construct[i + 1])
clips_info['suffixes'].append(suffix_linker) # add the suffix linker to the clips_info dict
return pd.DataFrame.from_dict(clips_info) # converts the dictionary of parts to a pd.DataFrame
constructs_list = []
with open(path, 'r') as csvfile: # opens path as csvfile
csv_reader = csv.reader(csvfile) # reads csv file
for index, construct in enumerate(csv_reader): # for every construct (ie the row) in the csv file...
if index != 0: # Checks if row is header.
construct = list(filter(None, construct)) # removes empty values from csv
if not construct[1:]:
break
else:
constructs_list.append(process_construct(construct[1:])) # assembles a dictionary of parts and linkers for the constructs csv
# Errors
if len(constructs_list) > MAX_CONSTRUCTS:
raise ValueError(
'Number of constructs exceeds maximum. Reduce construct number in construct.csv.')
else:
return constructs_list
def generate_clips_df(constructs_list):
"""Generates a dataframe containing information about all the unique CLIP
reactions required to synthesise the constructs in constructs_list.
"""
merged_construct_dfs = pd.concat(constructs_list, ignore_index=True) # converts list of dfs into one large df
unique_clips_df = merged_construct_dfs.drop_duplicates() # drop duplicates
unique_clips_df = unique_clips_df.reset_index(drop=True) # reset index (this creates a df of all unique clips)
clips_df = unique_clips_df.copy() # makes a copy which is disconnected to the original
# Error
if len(unique_clips_df.index) > MAX_CLIPS:
raise ValueError(
'Number of CLIP reactions exceeds 48. Reduce number of constructs in construct.csv.')
# Count number of each CLIP reaction
clip_count = np.zeros(len(clips_df.index)) # creates an empty array the for each unique clip
for i, unique_clip in unique_clips_df.iterrows(): # for every unique clip...
for _, clip in merged_construct_dfs.iterrows(): # ...for every clip...
if unique_clip.equals(clip): # ...if they are the same
clip_count[i] = clip_count[i] + 1 # ...tally that unique clip
clip_count = clip_count // FINAL_ASSEMBLIES_PER_CLIP + 1 # find the number of clip wells needed (ie there is a max number of clip uses per reaction/well, if required clips exceeds this for a unique clip then start a new well)
clips_df['number'] = [int(i) for i in clip_count.tolist()] # add a count column to unique clips df
# Associate well/s for each CLIP reaction
clips_df['mag_well'] = pd.Series(['0'] * len(clips_df.index),
index=clips_df.index) # adds new column 'mag_well'
for index, number in clips_df['number'].iteritems(): # for every unique clip...
if index == 0: # ...if its the first clip...
mag_wells = []
for x in range(number): # ...for every count of that clip...
mag_wells.append(mplates.final_well(x + 1 + 48)) # parse into mag wells to return list
clips_df.at[index, 'mag_well'] = tuple(mag_wells) # return tuple to mag_well col
else:
mag_wells = []
for x in range(number):
well_count = clips_df.loc[
:index - 1, 'number'].sum() + x + 1 + 48 # adds all previous clips to clip count
mag_wells.append(mplates.final_well(well_count))
clips_df.at[index, 'mag_well'] = tuple(mag_wells)
return clips_df
def generate_sources_dict(paths):
"""Imports csvs files containing a series of parts/linkers with
corresponding information into a dictionary where the key corresponds with
part/linker and the value contains a tuple of corresponding information.
Args:
paths (list): list of strings each corresponding to a path for a
sources csv file.
"""
sources_dict = {}
for deck_index, path in enumerate(paths): # deck_index allocated to each source
with open(path, 'r') as csvfile:
csv_reader = csv.reader(csvfile)
for index, source in enumerate(csv_reader):
if index != 0:
csv_values = source[1:] # adds the part well location and concentration
csv_values.append(SOURCE_DECK_POS[deck_index]) # references the available source deck locations, allocates to the deck index, adds this value to the tuple
sources_dict[str(source[0])] = tuple(csv_values) # adds value to sources dict
return sources_dict
def generate_clips_dict(clips_df, sources_dict):
"""Using clips_df and sources_dict, returns a clips_dict which acts as the
sole variable for the opentrons script "clip.ot2.py".
"""
max_part_vol = CLIP_VOL - (T4_BUFF_VOL + BSAI_VOL + T4_LIG_VOL
+ CLIP_MAST_WATER + 2)
clips_dict = {'prefixes_wells': [], 'prefixes_plates': [],
'suffixes_wells': [], 'suffixes_plates': [],
'parts_wells': [], 'parts_plates': [], 'parts_vols': [],
'water_vols': []}
# Generate clips_dict from args
try:
for _, clip_info in clips_df.iterrows(): # for every unique clip...
prefix_linker = clip_info['prefixes'] # ...subset the prefix linker...
clips_dict['prefixes_wells'].append([sources_dict[prefix_linker][0]] # ...add the well ID to the clips_dict prefix wells as many times as unique clip used
* clip_info['number'])
clips_dict['prefixes_plates'].append( # ...pass the prefix linker info (well pos, conc, deck pos) to handle_2_columns(), returning tuple of 3 values in the same format
[handle_2_columns(sources_dict[prefix_linker])[2]] * clip_info['number'])
suffix_linker = clip_info['suffixes'] # ...subset the prefix linker...
clips_dict['suffixes_wells'].append([sources_dict[suffix_linker][0]] # ...add the well ID to the clips_dict suffix wells as many times as unique clip used
* clip_info['number'])
clips_dict['suffixes_plates'].append( # ...return suffix linker info in correct format as before
[handle_2_columns(sources_dict[suffix_linker])[2]] * clip_info['number'])
part = clip_info['parts'] # ...subset the part...
clips_dict['parts_wells'].append( # ...pass the part info (well pos, conc, deck pos) to handle_2_columns(), returning tuple of 3 values in the same format
[sources_dict[part][0]] * clip_info['number'])
clips_dict['parts_plates'].append( # ...return part info in correct format as before
[handle_2_columns(sources_dict[part])[2]] * clip_info['number'])
if not sources_dict[part][1]: # if concentration not defined...
clips_dict['parts_vols'].append([DEFAULT_PART_VOL] * # ...add the default vol required * number of clips required
clip_info['number'])
clips_dict['water_vols'].append([max_part_vol - DEFAULT_PART_VOL] # ...add the water vol required * number of clips required
* clip_info['number'])
else:
part_vol = round(
PART_PER_CLIP / float(sources_dict[part][1]), 1) # c1*v1/c2 = v2 where parts normally require 1ul at 100ng/ul
if part_vol < MIN_VOL: # correct for min vol requirement (could add min conc here instead)
part_vol = MIN_VOL
elif part_vol > max_part_vol: # correct for max vol requirement
part_vol = max_part_vol
water_vol = max_part_vol - part_vol # calculate water vol
clips_dict['parts_vols'].append(
[part_vol] * clip_info['number'])
clips_dict['water_vols'].append(
[water_vol] * clip_info['number'])
except KeyError:
sys.exit('likely part/linker not listed in sources.csv')
for key, value in clips_dict.items(): # removes sublists from dict values
clips_dict[key] = [item for sublist in value for item in sublist]
return clips_dict
def generate_final_assembly_dict(constructs_list, clips_df):
"""Using constructs_list and clips_df, returns a dictionary of final
assemblies with keys defining destination plate well positions and values
indicating which clip reaction wells are used.
"""
final_assembly_dict = {}
clips_count = np.zeros(len(clips_df.index)) # empty array of length unique clips
for construct_index, construct_df in enumerate(constructs_list): # for every construct df in constructs list...
construct_well_list = []
for _, clip in construct_df.iterrows(): # ...for every clip in that constructs df...
clip_info = clips_df[(clips_df['prefixes'] == clip['prefixes']) & # ...selects unique clip (in clips df) by matching its linkers and part
(clips_df['parts'] == clip['parts']) &
(clips_df['suffixes'] == clip['suffixes'])]
clip_wells = clip_info.at[clip_info.index[0], 'mag_well'] # subset mag bead wells
clip_num = int(clip_info.index[0]) # find number of clips required
clip_well = clip_wells[int(clips_count[clip_num] // # finds the mag bead well for that clip
FINAL_ASSEMBLIES_PER_CLIP)]
clips_count[clip_num] = clips_count[clip_num] + 1 # counts clips so that mag well changes when clips required exceeds clips per well
construct_well_list.append(clip_well) # adds clip well to list
final_assembly_dict[mplates.final_well( # creates dict with assembly well as the key for each construct, and lists of required clip wells as values
construct_index + 1)] = construct_well_list
return final_assembly_dict
def calculate_final_assembly_tipracks(final_assembly_dict):
"""Calculates the number of final assembly tipracks required ensuring
no more than MAX_FINAL_ASSEMBLY_TIPRACKS are used.
"""
final_assembly_lens = []
for values in final_assembly_dict.values(): # for every assembly...
final_assembly_lens.append(len(values)) # ...count tips required
master_mix_tips = len(list(set(final_assembly_lens))) # how many MM tips required = number of different reactions requiring different numbers of parts (different master mixes required for each)
total_tips = master_mix_tips + sum(final_assembly_lens)
final_assembly_tipracks = (total_tips-1) // 96 + 1 # tipracks needed
if final_assembly_tipracks > MAX_FINAL_ASSEMBLY_TIPRACKS:
raise ValueError(
'Final assembly tiprack number exceeds number of slots. Reduce number of constructs in constructs.csv')
else:
return final_assembly_tipracks
def generate_spotting_tuples(constructs_list, spotting_vols_dict):
"""Using constructs_list, generates a spotting tuple
(Refer to 'transformation_spotting_template.py') for every column of
constructs, assuming the 1st construct is located in well A1 and wells
increase linearly. Target wells locations are equivalent to construct well
locations and spotting volumes are defined by spotting_vols_dict.
Args:
spotting_vols_dict (dict): Part number defined by keys, spotting
volumes defined by corresponding value.
"""
# Calculate wells and volumes
wells = [mplates.final_well(x + 1) for x in range(len(constructs_list))] # assigns a final well for every assembly using a list comprehension
vols = [SPOTTING_VOLS_DICT[len(construct_df.index)] # assigns spotting vol based on how many clips there are in each reaction
for construct_df in constructs_list]
# Package spotting tuples
spotting_tuple_num = len(constructs_list)//8 + (1 # number of spotting steps with the 8 channel pipette
if len(constructs_list) % 8 > 0 else 0)
spotting_tuples = []
for x in range(spotting_tuple_num): # for every spotting step...
if x == spotting_tuple_num - 1: #
tuple_wells = tuple(wells[8*x:])
tuple_vols = tuple(vols[8*x:])
else:
tuple_wells = tuple(wells[8*x:8*x + 8])
tuple_vols = tuple(vols[8*x:8*x + 8])
spotting_tuples.append((tuple_wells, tuple_wells, tuple_vols))
return spotting_tuples
def generate_ot2_script(ot2_script_path, template_path, **kwargs):
"""Generates an ot2 script named 'ot2_script_path', where kwargs are
written as global variables at the top of the script. For each kwarg, the
keyword defines the variable name while the value defines the name of the
variable. The remainder of template file is subsequently written below.
"""
with open(ot2_script_path, 'w') as wf:
with open(template_path, 'r') as rf: # opens template in read format
for index, line in enumerate(rf):
if line[:3] == 'def': # find location of def in the file and save index as function start
function_start = index
break
else:
wf.write(line) # otherwise write the line (ie write all lines up to start)
for key, value in kwargs.items(): # read in kwargs (user defined input)
wf.write('{}='.format(key)) # write 'key = '
if type(value) == dict: # if the kwarg value is a dictionary then return as a str
wf.write(json.dumps(value))
elif type(value) == str: # if the kwarf value is a string then return with''
wf.write("'{}'".format(value))
else: # else return string
wf.write(str(value))
wf.write('\n')
wf.write('\n')
with open(template_path, 'r') as rf: # reopen rf and write lines succeeding the user defined input
for index, line in enumerate(rf):
if index >= function_start - 1:
wf.write(line)
def generate_master_mix_df(clip_number):
"""Generates a dataframe detailing the components required in the clip
reaction master mix.
"""
COMPONENTS = {'Component': ['Promega T4 DNA Ligase buffer, 10X',
'Water', 'NEB BsaI-HFv2',
'Promega T4 DNA Ligase']}
VOL_COLUMN = 'Volume (uL)'
master_mix_df = | pd.DataFrame.from_dict(COMPONENTS) | pandas.DataFrame.from_dict |
import sqlite3
import pandas as pd
from soynlp.noun import LRNounExtractor_v2
from collections import defaultdict
import re
import math
from string import punctuation
from soynlp.word import WordExtractor
# Stopwords 정의
pattern1 = re.compile(r'[{}]'.format(re.escape(punctuation))) # punctuation 제거
pattern2 = re.compile(r'[^가-힣 ]') # 특수문자, 자음, 모음, 숫자, 영어 제거
pattern3 = re.compile(r'\s{2,}') # white space 1개로 바꾸기.
class Ext:
# Instance를 생성할 때, 데이터 입력.
def __init__(self, df):
self.df = df
self.noun_extractor = LRNounExtractor_v2(verbose=True)
self.word_extractor = WordExtractor(min_frequency=math.floor(len(self.df) * 0.00001))
# 위에서 정의한 Stopwords를 이용하여 Data Stopwords 처리
def cleaning(self):
self.df['head'] = self.df['head'].map(lambda x: pattern3.sub(' ',
pattern2.sub('',
pattern1.sub('', x))))
return self.df # Stopwords 처리된 data 출력
# soynlp에서 제공하는 명사 추출을 통해 1차적으로 신어후보 추출
def extract_nouns(self):
nouns = self.noun_extractor.train_extract(self.df['head'], min_noun_frequency=math.floor(len(self.df) * 0.00001))
# 입력받은 data frame에서 명사 추출하며 해당 명사가 전체 게시글 수에서 0.001%이상 등장해야 한다.
# 뉴스데이터 0.001%, 커뮤니티 데이터 0.01% 이상 등장해야함
words = {k: v for k, v in nouns.items() if len(k) > 1}
# key = 명사(신어후보), value = NounScore로 words라는 dict에 저장
return words # 1차 신어후보 출력
# 추출된 신어를 사전에 검색하여 사전에 없는 결과를 2차 신어 후보로 추출
def search_dict(self, nouns):
conn = sqlite3.connect('kr_db.db')
data = pd.read_sql('SELECT word FROM kr_db', conn) # 국립국어원 사전 db를 'data'라는 이름의 data frame으로 저장
data['word'] = data['word'].map(lambda x: pattern3.sub(' ',
pattern2.sub('',
pattern1.sub('', x))))
data.drop_duplicates(keep='first', inplace=True)
data = ' '.join(data['word'])
# data의 불용어, 중복 게시글 제거 처리를 한뒤 data의 단어를 전부 저장
return pd.DataFrame([_ for _ in nouns if _[0] not in data]) # 사전에 없는 2차 신어후보 출력
# 신어후보 단어가 들어가 있는 문장 추출
def extract_sent(self, words):
sent = defaultdict(lambda: 0)
for w in words[0]:
temp = [s for s in self.df['head'] if w in s]
sent[w] = ' '.join(temp)
# 신어 후보 단어가 있는 문장을 temp에 리스트로 저장하고, sent에 {신어 후보 단어, 예문}으로 저장
# 예문은 문장을 더블 스페이스로 join하여 sent에 저장
return sent # 신어후보가 포함된 문장 출력
# 각 문장을 기반으로 soynlp에서 제공하는 8가지 변수 추출
def extract_statistic_value(self, sent):
statistic = defaultdict(lambda: 0)
for k, v in sent.items(): # k=신어 후보 단어, v = 예문
self.word_extractor.train([v]) # 예문에 대한 학습
try:
statistic[k] = self.word_extractor.extract()[k] # statistic dict에 key = 신어 후보 단어, value = 8가지 변수로 저장
except Exception as e:
print(e)
return statistic # soynlp에서 제공하는 8가지 변수 출력
# 각 문장을 기반으로 3가지 변수 추가 추출
# (신어후보의 오른쪽에 ~들이 붙은 비율 / 신어후보의 오른쪽에 그 외의 조사가 붙은 비율 / 신어후보의 오른쪽에 white space가 붙은 비율)
def extract_r_rat(self, sent, statistic):
conn = sqlite3.connect('kr_db.db')
post_pos = pd.read_sql('SELECT word FROM kr_db WHERE ID="조사_기초" OR ID="조사_상세"', conn)
# 검색이 가능한 형태로 만들어주기 위해서 동일한 불용어 처리 (사전의 ~는 과 같이 되어 있는 경우 ~를 제거하기 위함)
post_pos['word'] = post_pos['word'].map(lambda x: pattern3.sub(' ',
pattern2.sub('',
pattern1.sub('', x))))
post_pos.drop_duplicates(keep='first', inplace=True)
post_pos = ''.join(post_pos['word']) # 불용어, 중복 처리한 뒤 post_pos에 join하여 합침
r_rat = defaultdict(lambda: 0)
for k in statistic.keys(): # k = 신어후보 단어
try:
self.noun_extractor.train_extract([sent[k]]) # 신어 후보 단어에 대한 예문 학습
count = pprat = wsrat = pnrat = 0
for _ in self.noun_extractor.lrgraph.get_r(k, topk=-1): # 신어 후보 단어에 붙는 조사
if _[0] == '들': # 신어 후보 단어에 들 조사가 붙으면 pnrat에 들 개수 추가
pnrat += _[1]
elif _[0] in post_pos: # 신어 후보 오른쪽에 white space가 붙는 경우
if _[0] != '':
pprat += _[1]
elif _[0] == '': # 신어 후보의 오른쪽에 그외 조사가 붙는 경우
wsrat = _[1]
for _ in self.noun_extractor.lrgraph.get_r(k, topk=-1): # 각 신어후보에 대해 조사가 붙는 경우 count
count += _[1]
r_rat[k] = {'rpprat': pprat / count, 'rwsrat': wsrat / count, 'rpnrat': pnrat / count}
except Exception as e:
print(e)
return r_rat # 자체적으로 생성한 3가지 변수 출력
# 추출한 변수를 하나의 DataFrame으로 합침.
def combine_var(self, statistic, r_rat):
statistic = pd.DataFrame().from_dict(statistic).T
r_rat = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import numpy as np
import re
from collections import Counter
import logging
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel( logging.INFO )
__all__ = ['read_geo', 'detect_header_pattern']
''' circular imports problems --- https://stackabuse.com/python-circular-imports/
try:
# first: try to map the canonical version here
import methylprep
read_geo = methylprep.read_geo
except ImportError as error:
# if user doesn't have methylprep installed for the canonical version of this function, import this copy below
'''
def read_geo_v1(filepath, verbose=False, debug=False):
"""Use to load preprocessed GEO data into methylcheck. Attempts to find the sample beta/M_values
in the CSV/TXT/XLSX file and turn it into a clean dataframe, with probe ids in the index/rows.
VERSION 1.0 (deprecated June 2020 for v3, called "read_geo")
- reads a downloaded file, either in csv, xlsx, pickle, txt
- looks for /d_RxxCxx patterned headings and an probe index
- sets index in df to probes
- sets columns to sample names
- forces probe values to be floats, if strings/mixed
- if filename has 'intensit' or 'signal' in it, this converts to betas and saves
even if filename doesn't match, if columns have Methylated in them, it will convert and save
- detect multi-line headers and adjusts dataframe columns accordingly
- returns the usable dataframe
TODO:
- handle files with .Signal_A and .Signal_B instead of Meth/Unmeth
- handle processed files with sample_XX
notes:
this makes inferences based on strings in the filename, and based on the column names.
"""
this = Path(filepath)
if '.csv' in this.suffixes:
raw = pd.read_csv(this)
elif '.xlsx' in this.suffixes:
raw = pd.read_excel(this)
elif '.pkl' in this.suffixes:
raw = pd.read_pickle(this)
return raw
elif '.txt' in this.suffixes:
raw = pd.read_csv(this, sep='\t')
if raw.shape[1] == 1: # pandas doesn't handle \r\n two char line terminators, but seems to handle windows default if unspecified.
raw = pd.read_csv(this, sep='\t', lineterminator='\r') # leaves \n in values of first column, but loads
# lineterminator='\r\n')
# or use codecs first to load and parse text file before dataframing...
else:
LOGGER.error(f'ERROR: this file type (){this.suffix}) is not supported')
return
# next, see if betas are present of do we need to calculate them?
test = raw.iloc[0:100]
unmeth = False
if 'intensit' in str(this.name).lower() or 'signal' in str(this.name).lower(): # signal intensities
unmeth = True # need to calculate beta from unmeth/meth columns
LOGGER.info('Expecting raw meth/unmeth probe data')
else:
#meth_pattern_v1 = re.compile(r'.*[_ \.]Methylated[_ \.]', re.I)
meth_pattern = re.compile(r'.*[_ \.]?(Un)?methylated[_ \.]?', re.I)
meth_cols = len([col for col in test.columns if re.match(meth_pattern, col)])
if meth_cols > 0:
unmeth = True
# this should work below, so that even if betas are present, it will use betas first, then fall back to meth/unmeth
def calculate_beta_value(methylated_series, unmethylated_series, offset=100):
""" borrowed from methylprep.processing.postprocess.py """
methylated = max(methylated_series, 0)
unmethylated = max(unmethylated_series, 0)
total_intensity = methylated + unmethylated + offset
intensity_ratio = methylated / total_intensity
return intensity_ratio
# look for probe names in values (of first 100 responses)
index_name = None
multiline_header = False
sample_pattern = re.compile(r'\w?\d+_R\d{2}C\d{2}$') # $ ensures column ends with the regex part
sample_pattern_loose = re.compile(r'\w?\d+_R\d{2}C\d{2}.*beta', re.I)
probe_pattern = re.compile(r'(cg|rs|ch\.\d+\.|ch\.X\.|ch\.Y\.)\d+')
samples = []
for col in test.columns:
probes = [i for i in test[col] if type(i) == str and re.match(probe_pattern,i)] #re.match('cg\d+',i)]
if len(probes) == len(test):
index_name = col
if verbose:
LOGGER.info(f"Found probe names in `{col}` column and setting as index.")
elif len(probes)/len(test) > 0.8:
index_name = col
multiline_header = True
break
if re.match(sample_pattern, col):
samples.append(col)
if multiline_header: # start over with new column names
try:
start_index = len(test) - len(probes) - 1
# recast without header, starting at row before first probe
new_column_names = pd.Series(list(raw.iloc[start_index])).replace(np.nan, 'No label')
probe_list = raw[index_name].iloc[start_index + 1:]
probe_list = probe_list.rename(raw[index_name].iloc[start_index + 1])
bad_probe_list = [probe for probe in probe_list if not re.match(probe_pattern, probe)] # not probe.startswith('cg')]
if bad_probe_list != []:
LOGGER.error(f'ERROR reading probes with multiline header: {bad_probe_list[:200]}')
return
raw = raw.iloc[start_index + 1:]
raw.columns = new_column_names
test = raw.iloc[0:100]
samples = []
for col in test.columns:
if re.match(sample_pattern, col):
samples.append(col)
# raw has changed.
out_df = pd.DataFrame(index=probe_list)
except Exception as e:
LOGGER.error("ERROR: Unable to parse the multi-line header in this file. If you manually edit the file headers to ensure the sample intensities unclude 'Methylated' and 'Unmethylated' in column names, it might work on retry.")
return
else:
out_df = pd.DataFrame(index=raw[index_name]) # only used with unmethylated data sets
if samples == []:
# in some cases there are multiple columns matching sample_ids, and we want the 'beta' one
for col in test.columns:
if re.match(sample_pattern_loose, col):
samples.append(col)
# or we need TWO columns per sample and we calculate 'beta'.
if samples == [] and unmeth:
unmeth_samples = []
meth_samples = []
#unmeth_pattern_v1 = re.compile(r'.*[_ \.]Unmethylated[_ \.].*', re.I)
#meth_pattern_v1 = re.compile(r'.*[_ \.]Methylated[_ \.].*', re.I)
unmeth_pattern = re.compile(r'.*[_ \.]?Unmethylated[_ \.]?', re.I)
meth_pattern = re.compile(r'.*[_ \.]?(?<!Un)Methylated[_ \.]?', re.I)
for col in test.columns:
if re.match(unmeth_pattern, col):
unmeth_samples.append(col)
if debug:
LOGGER.info(col)
if re.match(meth_pattern, col):
meth_samples.append(col)
if debug:
LOGGER.info(col)
if unmeth_samples != [] and meth_samples != [] and len(unmeth_samples) == len(meth_samples):
# next: just need to match these up. they should be same if we drop the meth/unmeth part
if verbose:
LOGGER.info(f"{len(unmeth_samples)} Samples with Methylated/Unmethylated probes intensities found. Calculating Beta Values.")
linked = []
for col in unmeth_samples:
test_name = col.replace('Unmethylated','Methylated')
if test_name in meth_samples:
linked.append([col, test_name])
# Here, we calculate betas for full raw data frame
for col_u, col_m in linked:
col_name = col_u.replace('Unmethylated','').replace('Signal','').strip()
unmeth_series = raw[col_u]
meth_series = raw[col_m]
betas = calculate_beta_value(meth_series, unmeth_series)
try:
out_df[col_name] = betas
samples.append(col_name)
except Exception as e:
LOGGER.error('ERROR', col_name, len(betas), out_df.shape, e)
elif unmeth:
LOGGER.info(f"File appears to contain probe intensities, but the column names don't match up for samples, so can't calculate beta values.")
if samples != [] and verbose and not unmeth:
LOGGER.info(f"Found {len(samples)} samples on second pass, apparently beta values with a non-standard sample_naming convention.")
elif samples != [] and verbose and unmeth:
pass
elif samples == []:
# no samples matched, so show the columns instead
LOGGER.info(f"No samples found. Here are some column names:")
LOGGER.info(list(test.columns)[:20])
return
if index_name == None:
LOGGER.error("Error: probe names not found in any columns")
return
if unmeth and samples != [] and out_df.shape[1] > 1:
# column names are being merged and remapped here as betas
df = out_df # index is already set
elif multiline_header:
df = raw.loc[:, samples]
df.index = probe_list
else:
df = raw[[index_name] + samples]
df = df.set_index(index_name)
# finally, force probe values to be floats
num_converted = 0
for col in df.columns:
if df[col].dtype.kind != 'f' and df[col].dtype.kind == 'O':
# convert string to float
try:
#df[col] = df[col].astype('float16')
# use THIS when mix of numbers and strings
df[col] = pd.to_numeric(df[col], errors='coerce')
num_converted += 1
except:
LOGGER.error('error')
df = df.drop(columns=[col])
if verbose:
if num_converted > 0:
LOGGER.info(f"Converted {num_converted} samples from string to float16.")
LOGGER.info(f"Found {len(samples)} samples and dropped {len(raw.columns) - len(samples)} meta data columns.")
return df
def pd_read_big_csv(filepath, max_cols=1000, **kwargs):
header=pd.read_csv(filepath, nrows=10, **kwargs)
ncols=len(header.columns)
if ncols <= max_cols:
return pd.read_csv(filepath, **kwargs)
r=[]
for i in range((ncols + max_cols - 1) // max_cols):
maxcol=(i+1)*max_cols
if maxcol >= ncols:
maxcol = ncols-1
r.append(pd.read_csv(filepath, usecols=range(i*max_cols, maxcol), **kwargs))
return pd.concat(r)
def read_geo(filepath, verbose=False, debug=False, as_beta=True, column_pattern=None, test_only=False, rename_probe_column=True, decimals=3):
"""Use to load preprocessed GEO data into methylcheck. Attempts to find the sample beta/M_values
in the CSV/TXT/XLSX file and turn it into a clean dataframe, with probe ids in the index/rows.
Version 3 (introduced June 2020)
- reads a downloaded file, either in csv, xlsx, pickle, txt
- looks for /d_RxxCxx patterned headings and an probe index
- sets index in df to probes
- sets columns to sample names
- forces probe values to be floats, if strings/mixed
- if filename has 'intensit' or 'signal' in it, this converts to betas and saves
even if filename doesn't match, if columns have Methylated in them, it will convert and save
- detect multi-line headers and adjusts dataframe columns accordingly
- returns the usable dataframe
as_beta == True -- converts meth/unmeth into a df of sample betas.
column_pattern=None (Sample21 | Sample_21 | Sample 21) -- some string of characters that precedes the number part of each sample in the columns of the file to be ingested.
FIXED:
[x] handle files with .Signal_A and .Signal_B instead of Meth/Unmeth
[x] BUG: can't parse matrix_... files if uses underscores instead of spaces around sample numbers, or where sampleXXX has no separator.
[x] handle processed files with sample_XX
[x] returns IlmnID as index/probe column, unless 'rename_probe_column' == False
[x] pass in sample_column names from header parser so that logic is in one place
(makes the output much larger, so add kwarg to exclude this)
[x] demicals (default 3) -- round all probe beta/intensity/p values returned to this number of decimal places.
[x] bug: can only recognize beta samples if 'sample' in column name, or sentrix_id pattern matches columns.
need to expand this to handle arbitrary sample naming styles (limited to one column per sample patterns)
TODO:
[-] BUG: meth_unmeth_pval works `as_beta` but not returning full data yet
[-] multiline header not working with all files yet.
notes:
this makes inferences based on strings in the filename, and based on the column names.
"""
#1. load and read the file, regardless of type and CSV delimiter choice.
this = Path(filepath)
kwargs = {'nrows':200} if test_only else {}
raw = pd_load(filepath, **kwargs)
if not isinstance(raw, pd.DataFrame):
LOGGER.error(f"Did not detect a file: {type(raw)} aborting")
return
if debug: LOGGER.info(f"{filepath} loaded.")
def calculate_beta_value(methylated_series, unmethylated_series, offset=100):
""" borrowed from methylprep.processing.postprocess.py """
methylated = np.clip(methylated_series, 0, None)
unmethylated = np.clip(unmethylated_series, 0, None)
total_intensity = methylated + unmethylated + offset
intensity_ratio = methylated / total_intensity
return intensity_ratio
#if as_beta:
def convert_meth_to_beta(raw, meta, rename_probe_column=True, decimals=3, verbose=False):
#if (meta['column_pattern'] == 'meth_unmeth_pval' and
# meta['sample_columns_meth_unmeth_count'] > 0 and
# 'sample_numbers_range' != []):
# beta = methylated / total intensity (meth + unmeth + 100)
# use sample_column_names and sample_numbers_list to find each set of columns
out_df = pd.DataFrame(index=raw[meta['probe_column_name']])
probe_name_msg = meta['probe_column_name']
if rename_probe_column:
out_df.index.name = 'IlmnID' # standard for methyl-suite, though ID_REF is common in GEO.
probe_name_msg = f"{meta['probe_column_name']} --> IlmnID"
if not meta.get('sample_names') and not meta['sample_names'].get('meth'):
# when "sample" not in columns.
LOGGER.error("ERROR: missing sample_names")
pass
if meta['sample_numbers_range'] and meta['sample_numbers_range'][1] <= meta['total_samples']:
# if non-sequential sample numbers, cannot rely on this range.
sample_min, sample_max = meta['sample_numbers_range']
else:
sample_min = 1
sample_max = meta['total_samples']
# JSON returns 1 to N (as human names) but index is 0 to N.
for sample_number in range(sample_min -1, sample_max):
# here need to get the corresponding parts of each sample triplet of columns.
try:
col_m = meta['sample_names']['meth'][sample_number]
col_u = meta['sample_names']['unmeth'][sample_number]
#col_pval = meta['sample_columns']['pval']
except Exception as e:
LOGGER.error(f"ERROR {e} {meta['sample_names']['meth'][sample_number]} {list(meta['sample_names']['unmeth'].columns)[sample_number]}")
continue
unmeth_series = raw[col_u]
meth_series = raw[col_m]
betas = calculate_beta_value(meth_series, unmeth_series)
# try to lookup and retain any unique part of column names here
col_name = f"Sample_{sample_number + 1}"
if isinstance(meta['sample_names_stems'], list):
# assume order of sample_names_stems matches order of sample names
try:
this_stem = meta['sample_names_stems'][sample_number]
if this_stem not in out_df.columns:
col_name = this_stem
except IndexError:
if debug: LOGGER.error(f"ERROR: unable to assign Sample {sample_number} using original column stem.")
""" This code was trying to match samples up using regex/difflib but it was unreliable.
this_stem = [stem for stem in meta['sample_names_stems'] if stem in col_m]
if len(this_stem) > 0:
if len(this_stem) > 1:
# difflib to separatet Sample 1 vs Sample 11
best_match = difflib.get_close_matches(col_m, this_stem, 1)
if best_match != [] and best_match[0] not in out_df.columns:
col_name = best_match[0]
elif best_match != []: # ensure unique in out_df, even if not a perfect transfer of labels.
col_name = f"{best_match[0]}_{sample_number}"
else:
if debug: LOGGER.info(f"WARNING: multiple similar sample names detected but none were a close match: {col_m} : {this_stem}")
col_name = f"Sample_{sample_number}"
elif len(this_stem) == 1 and this_stem[0] not in out_df.columns:
# only one match, and is unique.
col_name = this_stem[0]
else: # only one match, but already in out_df, so can't reuse.
col_name = f"{this_stem[0]}_Sample_{sample_number}"
else:
col_name = f"Sample_{sample_number}"
else:
col_name = f"Sample_{sample_number}"
"""
try:
out_df[col_name] = betas
sample_number += 1
except Exception as e:
LOGGER.error(f"ERROR {col_name} {len(betas)} {out_df.shape} {e}")
out_df = out_df.round(decimals)
beta_value_range = True if all([all(out_df[col_name].between(0,1)) == True for col_name in out_df.columns]) else False
intensity_value_range = True if all([all(out_df[col_name].between(0,1000000)) == True for col_name in out_df.columns]) else False
try:
value_mean = round(sum(out_df.mean(axis=0))/len(out_df.columns),2)
except:
value_mean = 0
if verbose:
LOGGER.info(f"Returning {len(out_df.columns)} samples (mean: {value_mean}). Structure appears to be {meta['columns_per_sample']} columns per sample; raw data column pattern was '{meta['column_pattern']}'; probes in rows; and {probe_name_msg} as the probe names.")
return out_df
#2. test file structure
# next, see if betas are present of do we need to calculate them?
test = raw.iloc[0:100]
meta = detect_header_pattern(test, filepath, return_sample_column_names=True)
if meta['multiline_header'] and meta['multiline_header_rows'] > 0:
if verbose: print("Reloading raw data, excluding header.")
old_non_blank_col_count = len(raw.loc[:, ~raw.columns.str.contains('^Unnamed:')].columns) #~ before test counts non-blank columns
kwargs['skiprows'] = meta['multiline_header_rows']
raw = pd_load(filepath, **kwargs)
new_non_blank_col_count = len(raw.loc[:, ~raw.columns.str.contains('^Unnamed:')].columns)
if verbose: print(f"After ignoring multiline header: {old_non_blank_col_count} old columns are now {new_non_blank_col_count} new named columns.")
if debug:
LOGGER.info(f"file shape: {raw.shape}")
concise = meta.copy()
concise.pop('sample_names')
from pprint import pprint
pprint(concise)
del concise
#3. use header_meta to parse and return data as dataframe
"""
{'all_sample_columns': True,
'column_pattern': 'meth_unmeth_pval',
'columns_per_sample': 3,
'fraction_sample pval meth unmeth signal intensity': (1.0, 0.33, 0.33, 0.33, 0.66, 0.0),
'has_beta_values': False,
'has_meth_unmeth_values': True,
'has_p_values': True,
'multiline_header': False,
'multiline_rows': 0,
'one_sample_beta': False,
'probe_column_name': 'ID_REF',
'sample_columns_meth_unmeth_count': 222,
'sample_numbers_range': [1, 111], # or None
'sequential_numbers': True,
'total_samples': 111}"""
if (meta['column_pattern'] == 'sample_beta_sentrix_id'):
sample_columns = meta['sample_names']
out_df = pd.DataFrame(data=raw[sample_columns])
out_df.index=raw[meta['probe_column_name']]
out_df = out_df.round(decimals) #{sample:demicals for sample in sample_columns})
probe_name_msg = meta['probe_column_name']
if rename_probe_column:
out_df.index.name = 'IlmnID' # standard for methyl-suite, though ID_REF is common in GEO.
probe_name_msg = f"{meta['probe_column_name']} --> IlmnID"
beta_value_range = True if all([all(out_df[col_name].between(0,1)) == True for col_name in out_df.columns]) else False
intensity_value_range = True if all([all(out_df[col_name].between(0,1000000)) == True for col_name in out_df.columns]) else False
try:
value_mean = round(sum(out_df.mean(axis=0))/len(out_df.columns),2)
except:
value_mean = 0
if verbose and meta['columns_per_sample'] != 1 and beta_value_range:
LOGGER.info(f"Returning raw data. Structure appears to be {meta['columns_per_sample']} columns per sample; numbered samples: {numbered_samples}; column_pattern: {column_pattern}; probes in rows; and {probe_name_msg} as the probe names.")
elif verbose and beta_value_range:
LOGGER.info(f"Returning raw data. Appears to be sample beta values in columns (mean: {value_mean}), probes in rows, and {probe_name_msg} as the probe names.")
elif verbose and not beta_value_range and intensity_value_range and value_mean > 10:
LOGGER.info(f"Returning raw data. Appears to be sample fluorescence intensity values in columns (mean: {int(value_mean)}), probes in rows, and {probe_name_msg} as the probe names.")
elif verbose and not beta_value_range and intensity_value_range and value_mean > 10:
LOGGER.info(f"Returning raw data of UNKNOWN type. Not beta values or fluorescence intensity values in columns. Mean probe value was {value_mean}. Probes are in rows, and {probe_name_msg} as the probe names.")
return out_df
elif ((meta['column_pattern'] == 'sample_beta_numbered') or
(meta['all_sample_columns'] and meta['has_beta_values']) or
(meta['sequential_numbers'] and meta['sample_columns_meth_unmeth_count'] == 0) or
(meta['sample_numbers_range'] and meta['has_p_values'] is False) or
(meta['all_sample_columns'] and meta['sample_numbers_range'] and meta['columns_per_sample'] == 1)
):
sample_columns = [column for column in test.columns if 'sample' in column.lower()]
out_df = pd.DataFrame(data=raw[sample_columns])
out_df.index=raw[meta['probe_column_name']]
out_df = out_df.round(decimals) #{sample:demicals for sample in sample_columns})
probe_name_msg = meta['probe_column_name']
if rename_probe_column:
out_df.index.name = 'IlmnID' # standard for methyl-suite, though ID_REF is common in GEO.
probe_name_msg = f"{meta['probe_column_name']} --> IlmnID"
#if debug: LOGGER.info(f"DEBUG: out_df.columns: {out_df.columns} --- out_df.index {out_df.index.name} out_df.shape {out_df.shape}")
beta_value_range = True if all([all(out_df[col_name].between(0,1)) == True for col_name in out_df.columns]) else False
intensity_value_range = True if all([all(out_df[col_name].between(0,1000000)) == True for col_name in out_df.columns]) else False
try:
value_mean = round(sum(out_df.mean(axis=0))/len(out_df.columns),2)
except:
value_mean = 0
if verbose and meta['columns_per_sample'] != 1 and beta_value_range:
LOGGER.info(f"Returning raw data. Structure appears to be {meta['columns_per_sample']} columns per sample; {len(out_df.columns)} numbered samples; column_pattern: {column_pattern}; probes in rows; and {probe_name_msg} as the probe names.")
elif verbose and beta_value_range:
LOGGER.info(f"Returning raw data. Appears to contain {len(out_df.columns)} numbered samples; beta values in columns (mean: {value_mean}), probes in rows, and {probe_name_msg} as the probe names.")
elif verbose and not beta_value_range and intensity_value_range and value_mean > 10:
LOGGER.info(f"Returning raw data. Appears to be sample fluorescence intensity values in columns ({len(out_df.columns)} samples with mean: {int(value_mean)}), probes in rows, and {probe_name_msg} as the probe names.")
elif verbose and not beta_value_range and intensity_value_range and value_mean > 10:
LOGGER.info(f"Returning raw data of UNKNOWN type. Not beta values or fluorescence intensity values in columns. {len(out_df.columns)} samples with a mean probe value of {value_mean}. Probes are in rows, and {probe_name_msg} as the probe names.")
return out_df
elif meta['column_pattern'] == 'beta_intensity_pval':
if verbose: LOGGER.info("returning raw data without processing. Column pattern was 'beta_intensity_pval'.")
return raw
elif meta['column_pattern'] == 'beta_pval_a_b':
if as_beta and meta['sample_names'] != None:
# only returning the beta part. Signal_A and Signal_B are the meth/unmeth parts.
out_df = pd.DataFrame(data=raw[meta['sample_names']])
out_df.index=raw[meta['probe_column_name']]
out_df = out_df.round(decimals) #{sample:demicals for sample in sample_columns})
probe_name_msg = meta['probe_column_name']
if rename_probe_column:
out_df.index.name = 'IlmnID' # standard for methyl-suite, though ID_REF is common in GEO.
probe_name_msg = f"{meta['probe_column_name']} --> IlmnID"
try:
value_mean = round(sum(out_df.mean(axis=0))/len(out_df.columns),2)
except:
value_mean = 0
if verbose:
LOGGER.info(f"Returning beta values for {len(out_df.columns)} samples in columns (mean: {value_mean}), probes in rows, and {probe_name_msg} as the probe names.")
return out_df
else:
if verbose: LOGGER.info("returning raw data without processing. Column pattern was 'beta_pval_a_b'.")
return raw
if meta['column_pattern'] == 'meth_unmeth':
LOGGER.info("*** column_pattern was 'meth_unmeth' this has litterally never happened before. ***")
if meta['column_pattern'] in ('meth_unmeth','meth_unmeth_pval') and as_beta:
if debug: LOGGER.info("Converting meth and unmeth intensities to beta values.")
return convert_meth_to_beta(raw, meta, rename_probe_column=rename_probe_column, verbose=verbose)
if meta['column_pattern'] in ('meth_unmeth','meth_unmeth_pval') and not as_beta:
if verbose: LOGGER.info(f"Returning raw data without processing. Column pattern was {meta['column_pattern']}.")
return raw
if meta['column_pattern'] is None:
if debug: LOGGER.info("Returning raw data without processing. No file header column pattern was detected.")
return raw
raise Exception("Unable to identify file structure")
def detect_header_pattern(test, filename, return_sample_column_names=False):
"""test is a dataframe with first 100 rows of the data set, and all columns.
makes all the assumptions easier to read in one place.
betas
non-normalized
matrix_processed
matrix_signal
series_matrix
methylated_signal_intensities and unmethylated_signal_intensities
_family
TODO: GSM12345-tbl-1.txt type files (in _family.tar.gz packages) are possible, but needs more work.
TODO: combining two files with meth/unmeth values
- numbered samples handled differently from sample_ids in columns
- won't detect columns with no separators in strings
"""
if test.shape[0] != 100:
raise ValueError("test dataset must be exactly 100 rows")
if test.shape[1] == 1:
raise ValueError("this dataset has only one sample. it is likely that the columns were not parsed correctly.")
seps = [' ', '_', '.', '-'] # for parsing columns. also try without any separators
index_names = ['IlmnID', 'ID_REF', 'illumina_id']
# sample patterns
sample_pattern = re.compile(r'\w?\d+_R\d{2}C\d{2}$') # $ ensures column ends with the regex part
sample_pattern_loose = re.compile(r'\w?\d+_R\d{2}C\d{2}.*beta', re.I)
samplelike_pattern = re.compile(r'.*(?:\w?\d+_R\d{2}C\d{2}|sample).*', re.I)
probe_pattern = re.compile(r'(cg|rs|ch\.\d+\.|ch\.X\.|ch\.Y\.)\d+')
#pval_pattern = re.compile(r'(.*)(?:\bPval\b|\.Pval|_Pval_|-Pval|Pval|\bDetection\bPval|_Detection_Pval|\._Detection\.Pval||\._Detection\bPval\b).*', re.I)
pval_pattern = re.compile(r'(.*)(?:\bPval\b|\.Pval|_Pval_|-Pval|Pval).*', re.I)
meth_pattern = re.compile(r'(.*)(?:\bmeth|\.meth|_meth|-meth|(?<!un)meth\w+).*', re.I)
unmeth_pattern = re.compile(r'(.*)(?:\bunmeth|\.unmeth|_unmeth|-unmeth|unmeth\w+).*', re.I)
intensity_pattern = re.compile(r'(.*)(?:\bintensity\b|\.intensity|_intensity|-intensity|intensity).*', re.I)
signal_pattern = re.compile(r'(.*)(?:\bsignal\b|\.signal|_signal|-signal|signal).*', re.I)
betalike_pattern = re.compile(r'(.*)(?:\Wbeta\W|\Wavg\Wbeta\W|\Wavg_beta|_avg_beta|\Wavgbeta).*', re.I)
# use the last part to find any sample identifiers; then re.sub() replace them. then pass rest into the beta_converter function.
residual_pattern = re.compile(r'(.*)(?:[\b\._-]Pval[\b\._-]|[\b\._-]Detection[\b\._-]Pval|[\b\._-]meth|(?<!un)meth\w+|\bunmeth|\.unmeth|_unmeth|-unmeth|unmeth\w+|\bintensity\b|\.intensity|_intensity|-intensity|intensity).*', re.I)
file_gsm_txt_in_family_tar = re.compile(r'gsm\d+-tbl-1.txt') # all matches are lower() -- not used yet
#meth_unmeth_pattern = re.compile(r'.*[_ \.]?(Un)?methylated[_ \.]?', re.I)
# filename patterns
this = str(Path(filename).name).lower()
has_beta_values = True if ('matrix' in this and 'signal' not in this) else False #exceptions to this rule found.
# for meth_unmeth, need to calculate beta from columns
has_meth_unmeth_values = True if 'intensit' in this or 'signal' in this else False
# no rule for this yet
has_p_values = None
# GSM1280914-tbl-1.txt contains probe names, one sample beta val column, and an optional p-val column with no header.
one_sample_beta = True if this.startswith('gsm') and re.search(this, file_gsm_txt_in_family_tar) else False
# column parts
sample_columns = [column for column in test.columns if 'sample' in column.lower()]
# -- next: test first 100 rows and calculate the faction of each column that matches the probe_pattern. list of floats.
fraction_probelike = [len([row for row in test[column] if isinstance(row, str) and re.match(probe_pattern,row)])/len(test) for column in test.columns]
fraction_pvallike = round(sum([(True if re.search(pval_pattern,column) else False) for column in test.columns])/len(list(test.columns)),2)
fraction_meth = round(sum([(True if re.search(meth_pattern,column) else False) for column in test.columns])/len(list(test.columns)),2)
fraction_unmeth = round(sum([(True if re.search(unmeth_pattern,column) else False) for column in test.columns])/len(list(test.columns)),2)
fraction_intensity = round(sum([(True if re.search(intensity_pattern,column) else False) for column in test.columns])/len(list(test.columns)),2)
fraction_signal = round(sum([(True if re.search(signal_pattern,column) else False) for column in test.columns])/len(list(test.columns)),2)
fraction_beta = round(sum([(True if re.search(betalike_pattern,column) else False) for column in test.columns])/len(list(test.columns)),2)
fraction_samplelike = round(sum([(True if re.search(samplelike_pattern,column) else False) for column in test.columns])/len(list(test.columns)),2)
probe_column_position = fraction_probelike.index( max(fraction_probelike) )
probe_column_name = list(test.columns)[probe_column_position] # used for index_name
# grab residual column name parts to add to output column names (tricky)
sample_column_residuals = [re.search(residual_pattern, column).groups()[0] for column in test.columns if re.search(residual_pattern, column)]
# a trick to deal with 3-column samples that have similar names; retain any repeated names, if repeats exist.
if sample_column_residuals != [] and Counter(sample_column_residuals).most_common(1)[0][1] > 1:
sample_column_residuals = [column.strip() for column,freq in Counter(sample_column_residuals).most_common() if freq > 1]
## DETECT column repeating structure.
# how many columns PER SAMPLE in structure?
# meth or (un)meth appear anywhere in column name along with 'sample' anywhere
sample_columns_meth_unmeth_count = sum([(True if (re.search(meth_pattern,column) or re.search(unmeth_pattern,column)) else False) for column in test.columns])
#sum([('meth' in column.lower().replace('sample', '')) for column in sample_columns])
# extract any "first" numbers found in column parts, where parts are strings separated any any logical separators.
sample_id_columns = []
sample_numbers_list = []
sample_numbers_range = None
sample_count = 0
sequential_numbers = None # starting from 1 to xx
avg_sample_repeats = 1
for sep in seps:
try:
sample_numbers_list = [[part for part in column.split(sep) if re.match(r'\d+', part)][0] for column in sample_columns]
if len(sample_numbers_list) > 0:
sorted_sample_numbers_list = sorted([int(j) for j in list(set(sample_numbers_list))])
sample_repeats = list(Counter([int(j) for j in list(sample_numbers_list)]).values())
avg_sample_repeats = int(round(sum(sample_repeats)/len(sample_repeats)))
#sequential_numbers = True if all([(i+1 == j) for i,j in list(enumerate(sorted_sample_numbers_list))]) else False
sequential_numbers = True if all([(i+1 == j) for i,j in list(enumerate(sorted_sample_numbers_list))]) else False
sample_count = len(sample_numbers_list)
sample_numbers_range = [min(sorted_sample_numbers_list), max(sorted_sample_numbers_list)]
break
except Exception as e:
continue
# detect if some part of columns are named like sample_ids
if sample_numbers_list == []:
for sep in seps:
sample_id_columns = [column for column in test.columns if any([re.search(sample_pattern, part) for part in column.split(sep)])]
sample_count = len(sample_id_columns)
# tests / data attributes: pval, multiline, probes, sample/nonsample columns
if max(fraction_probelike) < 0.75:
LOGGER.warning(f"WARNING: Unable to identify the column with probe names ({max(fraction_probelike)})")
multiline_rows = 0
if 0.75 <= max(fraction_probelike) < 1.00:
multiline_rows = int(round(100*max(fraction_probelike)))
header_rows = 100 - multiline_rows
LOGGER.info(f"Multiline header detected with {header_rows} rows.")
multiline_header = True if multiline_rows > 0 else False
all_sample_columns = all([('sample' in column.lower() or 'ID_REF' in column) for column in list(set(test.columns))])
has_p_values = True if fraction_pvallike > 0 else False
# overall pattern logic
column_pattern = None
if (sample_numbers_list != [] or sample_id_columns != []):
columns_per_sample = int(round(sample_count/len(set(sample_numbers_list or sample_id_columns))))
elif (0.31 <= fraction_meth <= 0.34 and 0.31 <= fraction_unmeth <= 0.34 and 0.31 <= fraction_pvallike <= 0.34):
columns_per_sample = 3
else:
columns_per_sample = 1
if sample_numbers_list != [] and columns_per_sample != avg_sample_repeats:
LOGGER.warning('WARNING: inconsistent columns per sample')
if (sample_count > 0 and
sample_numbers_list != [] and
sample_columns_meth_unmeth_count == 0 and
0.9 < columns_per_sample < 1.1):
column_pattern = 'sample_beta_numbered'
total_samples = len(sample_columns)
elif (sample_count > 0 and
sample_id_columns != [] and
sample_columns_meth_unmeth_count == 0 and
0.9 < columns_per_sample < 1.1):
column_pattern = 'sample_beta_sentrix_id'
total_samples = len(sample_columns)
elif (sample_columns_meth_unmeth_count > 0 and
sample_count > 0 and
sample_columns_meth_unmeth_count > 0 and
1.8 < len(sample_numbers_list) / sample_columns_meth_unmeth_count < 2.2):
column_pattern = 'meth_unmeth'
total_samples = int(round(sample_columns_meth_unmeth_count/2))
elif (sample_columns_meth_unmeth_count > 0 and
sample_count > 0 and
sample_columns_meth_unmeth_count > 0 and
1.4 < len(sample_numbers_list)/sample_columns_meth_unmeth_count < 1.6):
column_pattern = 'meth_unmeth_pval'
total_samples = int(round(sample_columns_meth_unmeth_count/2))
elif (0.03 <= fraction_pvallike <= 0.36 and
0.03 <= fraction_meth <= 0.36 and
0.03 <= fraction_unmeth <= 0.36):
column_pattern = 'meth_unmeth_pval'
total_samples = int(round(len(test.columns)*fraction_pvallike))
elif (0.30 <= fraction_pvallike <= 0.36 and sample_columns_meth_unmeth_count == 0):
column_pattern = 'beta_intensity_pval'
total_samples = int(round(len(test.columns)*fraction_pvallike))
elif (0.45 <= fraction_signal <= 0.55 and
0.22 <= fraction_pvallike <= 0.26):
column_pattern = 'beta_pval_a_b' #SignalA ... SignalB
total_samples = int(round(len(test.columns)*fraction_pvallike))
else:
column_pattern = None
total_samples = 0
# return_sample_column_names
# -- depends on structure.
# -- sample_columns will be a flat list for "sample"-like ones.
# -- Or a dict of lists if meth_unmeth_pval.
if return_sample_column_names:
if fraction_meth > 0 and fraction_unmeth > 0 and column_pattern in ('meth_unmeth_pval', 'meth_unmeth'):
sample_columns = {
'meth': [column for column in test.columns if re.search(meth_pattern,column)],
'unmeth': [column for column in test.columns if re.search(unmeth_pattern,column)],
'pval': [column for column in test.columns if re.search(pval_pattern,column)],
}
elif sample_columns == [] and sample_id_columns != []:
sample_columns = sample_id_columns
elif sample_columns == [] and column_pattern == 'beta_intensity_pval':
sample_columns = test.columns
elif column_pattern == 'beta_pval_a_b': # and sample_columns == []
if 0.2 <= fraction_beta <= 0.28:
sample_columns = [column for column in test.columns if re.search(betalike_pattern,column)]
else:
LOGGER.warning("WARNING: test columns for beta_pval_a_b don't look right.")
sample_columns = test.columns
else:
sample_columns = None
sample_columns_residuals = None
return {'has_beta_values': has_beta_values, # or (beta_assumed and columns_per_sample == 1),
'has_meth_unmeth_values': has_meth_unmeth_values,
'has_p_values': has_p_values,
'one_sample_beta': one_sample_beta,
'sample_columns_meth_unmeth_count': sample_columns_meth_unmeth_count,
'all_sample_columns': all_sample_columns,
'multiline_header': multiline_header,
'multiline_header_rows': header_rows,
'column_pattern': column_pattern,
'columns_per_sample': columns_per_sample,
'sequential_numbers': sequential_numbers,
'sample_numbers_range': sample_numbers_range,
'total_samples': total_samples,
'fraction_sample pval meth unmeth signal intensity beta': (fraction_samplelike, fraction_pvallike, fraction_meth, fraction_unmeth, fraction_signal, fraction_intensity, fraction_beta),
'probe_column_name': probe_column_name,
'sample_names': sample_columns,
'sample_names_stems': sample_column_residuals, # unique parts of sample names
}
def pd_load(filepath, **kwargs):
""" helper function that reliably loads any GEO file, by testing for the separator that gives the most columns """
this = Path(filepath)
if this.suffix not in ('.xlsx', '.pkl'):
# first, check that we're getting the max cols
test_csv = pd.read_csv(this, nrows=100, skiprows=kwargs.get('skiprows',0))
test_t = pd.read_csv(this, sep='\t', nrows=100, skiprows=kwargs.get('skiprows',0))
test_space = pd.read_csv(this, sep=r',\s+', nrows=100, quoting=csv.QUOTE_ALL, engine='python', skiprows=kwargs.get('skiprows',0))
params = [
{'method':'auto', 'cols': test_csv.shape[1], 'kwargs': {}},
{'method':'tab', 'cols': test_t.shape[1], 'kwargs': {'sep':'\t'}},
{'method':'quoted', 'cols': test_space.shape[1], 'kwargs': {'sep':r',\s+', 'quoting':csv.QUOTE_ALL, 'engine': 'python'}},
]
best_params = sorted([(parts['method'], parts['cols'], parts['kwargs']) for parts in params], key= lambda param_tuple: param_tuple[1], reverse=True)
kwargs.update(best_params[0][2])
if '.csv' in this.suffixes:
raw = pd_read_big_csv(this, **kwargs)
elif '.xlsx' in this.suffixes:
raw = pd.read_excel(this, **kwargs)
elif '.pkl' in this.suffixes:
raw = | pd.read_pickle(this, **kwargs) | pandas.read_pickle |
"Functions to cluster or otherwise reduce the number of hours in generation and load profiles"
from sklearn.cluster import KMeans
from sklearn.preprocessing import minmax_scale
import numpy as np
import datetime
import pandas as pd
def kmeans_time_clustering(
resource_profiles,
load_profiles,
days_in_group,
num_clusters,
include_peak_day=True,
load_weight=1,
variable_resources_only=True,
):
"""Reduce the number of hours in load and resource variability timeseries using
kmeans clustering.
This script is adapted from work originally created by <NAME>.
Parameters
----------
resource_profiles : DataFrame
Hourly generation profiles for all resources. Each column is a resource with
a unique name, each row is a consecutive hour.
load_profiles : DataFrame
Hourly demand profiles of load. Each column is a region with a unique name. each
row is a consecutive hour.
days_in_group : int
The number of 24 hour periods included in each group/cluster
num_clusters : int
The number of clusters to include in the output
include_peak_day : bool, optional
If the days with system peak demand should be included in outputs, by default
True
load_weight : int, optional
A weighting factor for load profiles during clustering, by default 1
variable_resources_only : bool, optional
If clustering should only consider resources with variable (non-zero standard
deviation) profiles, by default True
Returns
-------
(dict, list, list)
This function returns multiple items. The dict has keys ['load_profiles',
'resource_profiles', 'ClusterWeights', 'AnnualGenScaleFactor', 'RMSE', and
'AnnualProfile']
The first list has strings with the order of periods selected e.g. ['p42','p26',
'p3', 'p13', 'p32', 'p8'].
The second list has integer weights of each cluster.
"""
resource_col_names = resource_profiles.columns
if variable_resources_only:
input_std = resource_profiles.describe().loc["std", :]
var_col_names = [col for col in input_std.index if input_std[col] > 0]
resource_profiles = resource_profiles.loc[:, var_col_names]
# Initialize dataframes to store final and intermediate data in
input_data = pd.concat(
[
load_profiles.reset_index(drop=True),
resource_profiles.reset_index(drop=True),
],
axis=1,
)
input_data = input_data.reset_index(drop=True)
original_col_names = input_data.columns.tolist()
# CAUTION: Load Column lables should be named with the phrase "Load_"
load_col_names = load_profiles.columns
# Columns to be reported in output files
new_col_names = input_data.columns.tolist() + ["GrpWeight"]
# Dataframe storing final outputs
final_output_data = pd.DataFrame(columns=new_col_names)
# Dataframe storing normalized inputs
norm_tseries = | pd.DataFrame(columns=original_col_names) | pandas.DataFrame |
import pandas as pd
import os
from data.importer.base_loader import BaseLoader
from sklearn.model_selection import train_test_split
'''
SMM4H Class object to load and prepare all the SMM4H related datasets
'''
class SMM4HLoader(BaseLoader):
def __init__(self):
super().__init__()
self.extraction_data = pd.read_csv(self.dataset_path + 'SMM4H_Task2/SMM4H19_Task2.csv', index_col="tweet_id")
self.save_path = os.getcwd() + "/data/combiner_data/"
'''
Function to get the split ids of SMM4H train, eval or test from the splits folder
Parameters
------------
filename (str): the input filename to fetch the splits can be train, eval or test
Returns
------------
l1 (list): return the ids from that split
'''
def get_split_ids(self, filename):
with open(self.split_path + "SMM4H_Task2/" + filename, 'r') as f:
l1 = f.read()
return l1.split('\n')
'''
Function to get input and target list based on the ids
Parameters
------------
split_id (list): list containing the id from which samples are extracted
extraction_term (str): either extraction or drug keyword depedning upon the dataset wihch is prepared
Returns
------------
tweet (list): extracted input tweets according to ids
extraction (list): extracted target strings according to ids
'''
def get_input_target(self, split_id, extraction_term):
tweet = []
extraction = []
for ids in split_id:
search_id = ids
row_data = self.extraction_data.loc[search_id]
try:
for idx in range(self.extraction_data.loc[search_id].shape[0]):
tweet.append(self.twitter_preprocessor(row_data.iloc[idx]['tweet']))
ex_term = row_data.iloc[idx][extraction_term]
if type(ex_term) != type("hi"):
extraction.append('none')
else:
extraction.append(ex_term.lower())
except:
tweet.append(row_data['tweet'])
ex_term = row_data[extraction_term]
if type(ex_term) != type("hi"):
extraction.append('none')
else:
extraction.append(ex_term.lower())
return tweet, extraction
'''
Function to prepare the AE Extraction dataset for SMM4H Task 2
'''
def prep_data_ner_ade_smm4h_task2(self):
split_list = ["train", "eval", "test"]
for split in split_list:
ids = self.get_split_ids(split + '.txt')
tweet, extraction = self.get_input_target(ids, "extraction")
res_data = pd.DataFrame()
res_data['prefix'] = ["ner ade"]*len(tweet)
res_data['input_text'] = tweet
res_data['target_text'] = extraction
res_data = res_data.sample(frac=1)
res_data.to_csv(self.save_path + 'ner_ade/' + split + '_ner_ade_smm4h_task2.csv', index = None)
print("SMM4H Task2 AE Extraction dataset Saved Successfully!")
'''
Function to prepare the Drug Extraction Dataset for SMM4H Task 2
'''
def prep_data_ner_drug_smm4h_task2(self):
split_list = ["train", "eval", "test"]
for split in split_list:
ids = self.get_split_ids(split + '.txt')
tweet, extraction = self.get_input_target(ids, "drug")
res_data = pd.DataFrame()
res_data['prefix'] = ["ner drug"]*len(tweet)
res_data['input_text'] = tweet
res_data['target_text'] = extraction
res_data = res_data.sample(frac=1)
res_data.to_csv(self.save_path + 'ner_drug/' + split + '_ner_drug_smm4h_task2.csv', index = None)
print("SMM4H Task2 Drug Extraction dataset Saved Successfully!")
'''
Function to partition the given data into train, eval and test
Parameters
-----------
data (pandas dataframe): input data which needs to partitioned
Returns
----------
data_train (pandas dataframe): train split from the input data (80%)
data_eval (pandas dataframe): eval split from the input data (10%)
data_test (pandas dataframe): test split from the input data (10%)
'''
def split_parts(self, data):
data_train, data_eval = train_test_split(data, test_size = 0.2, shuffle = True)
data_eval, data_test = train_test_split(data_eval, test_size = 0.5, shuffle = True)
return data_train, data_eval, data_test
'''
Function to restructure the data and save
Parameters
-------------
df (pandas dataframe): the input dataframe which needs to be restructured
save_name (str): the name of the csv file to be saved
'''
def restructure_data(self, df, save_name):
res_df = pd.DataFrame()
label = df["label"].tolist()
tgt_txt = []
for val in label:
if val == 0:
tgt_txt.append("healthy okay")
else:
tgt_txt.append("adverse event problem")
res_df["prefix"] = ["assert ade"]*len(df)
res_df["input_text"] = [self.twitter_preprocess(in_txt) for in_txt in df['tweet'].tolist()]
res_df["target_text"] = tgt_txt
res_df.to_csv(self.save_path + save_name, index = None)
'''
Function to prepare the AE Detection Dataset for SMM4H Task 1
'''
def prep_data_assert_ade_task1(self):
raw_data = | pd.read_csv(self.dataset_path + "SMM4H_Task1/SMM4H19_Task1.csv") | pandas.read_csv |
import os
import logging
import copy
import numpy as np
import pandas as pd
from oemof.solph import EnergySystem, Bus, Sink, Source
import oemof.tabular.tools.postprocessing as pp
from oemof.tools.economics import annuity
from oemof_flexmex.helpers import delete_empty_subdirs, load_elements, load_scalar_input_data,\
load_yaml
from oemof_flexmex.parametrization_scalars import get_parameter_values
from oemof_flexmex.facades import TYPEMAP
basic_columns = ['region', 'name', 'type', 'carrier', 'tech']
# Path definitions
module_path = os.path.abspath(os.path.dirname(__file__))
MODEL_CONFIG = 'model_config'
PATH_MAPPINGS_REL = '../flexmex_config'
path_mappings = os.path.abspath(os.path.join(module_path, PATH_MAPPINGS_REL))
path_map_output_timeseries = os.path.join(path_mappings, 'mapping-output-timeseries.yml')
path_map_input_scalars = os.path.join(path_mappings, 'mapping-input-scalars.yml')
# Load mappings
map_output_timeseries = load_yaml(path_map_output_timeseries)
FlexMex_Parameter_Map = load_yaml(path_map_input_scalars)
def create_postprocessed_results_subdirs(postprocessed_results_dir):
for parameters in map_output_timeseries.values():
for subdir in parameters.values():
path = os.path.join(postprocessed_results_dir, subdir)
if not os.path.exists(path):
os.makedirs(path)
def get_capacities(es):
r"""
Calculates the capacities of all components.
Adapted from oemof.tabular.tools.postprocessing.write_results()
Parameters
----------
es : oemof.solph.EnergySystem
EnergySystem containing the results.
Returns
-------
capacities : pd.DataFrame
DataFrame containing the capacities.
"""
def get_facade_attr(attr):
# Function constructor for getting a specific property from
# the Facade object in bus_results() DataFrame columns "from" or "to"
def fnc(flow):
# Get property from the Storage object in "from" for the discharge device
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['from'], attr, np.nan)
# Get property from the Storage object in "to" for the charge device
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['to'], attr, np.nan)
# Get property from other object in "from"
return getattr(flow['from'], attr, np.nan)
return fnc
def get_parameter_name(flow):
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_discharge_invest"
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_charge_invest"
return np.nan
try:
flows = pp.bus_results(es, es.results, select="scalars", concat=True)
flows.name = "var_value"
endogenous = flows.reset_index()
# Results already contain a column named "type". Call this "var_name" to
# preserve its content ("invest" for now)
endogenous.rename(columns={"type": "var_name"}, inplace=True)
# Update "var_name" with Storage specific parameter names for charge and discharge devices
df = pd.DataFrame({'var_name': endogenous.apply(get_parameter_name, axis=1)})
endogenous.update(df)
endogenous["region"] = endogenous.apply(get_facade_attr('region'), axis=1)
endogenous["name"] = endogenous.apply(get_facade_attr('label'), axis=1)
endogenous["type"] = endogenous.apply(get_facade_attr('type'), axis=1)
endogenous["carrier"] = endogenous.apply(get_facade_attr('carrier'), axis=1)
endogenous["tech"] = endogenous.apply(get_facade_attr('tech'), axis=1)
endogenous.drop(['from', 'to'], axis=1, inplace=True)
endogenous.set_index(
["region", "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
endogenous = pd.DataFrame()
d = dict()
for node in es.nodes:
if not isinstance(node, (Bus, Sink, TYPEMAP["shortage"], TYPEMAP["link"])):
# Specify which parameters to read depending on the technology
parameters_to_read = []
if isinstance(node, TYPEMAP["storage"]):
# TODO for brownfield optimization
# parameters_to_read = ['capacity', 'storage_capacity']
# WORKAROUND Skip 'capacity' to safe some effort in aggregation and elsewhere
# possible because storages are greenfield optimized only: 'capacity' = 0
parameters_to_read = ['storage_capacity']
elif isinstance(node, TYPEMAP["asymmetric storage"]):
parameters_to_read = ['capacity_charge', 'capacity_discharge', 'storage_capacity']
elif getattr(node, "capacity", None) is not None:
parameters_to_read = ['capacity']
# Update dict with values in oemof's parameter->value structure
for p in parameters_to_read:
key = (
node.region,
node.label,
# [n for n in node.outputs.keys()][0],
node.type,
node.carrier,
node.tech, # tech & carrier are oemof-tabular specific
p
) # for oemof logic
d[key] = {'var_value': getattr(node, p)}
exogenous = pd.DataFrame.from_dict(d).T # .dropna()
if not exogenous.empty:
exogenous.index = exogenous.index.set_names(
['region', 'name', 'type', 'carrier', 'tech', 'var_name']
)
# Read storage capacities (from oemof.heat)
# only component_results() knows about 'storage_capacity'
try:
components = pd.concat(pp.component_results(es, es.results, select='scalars'))
components.name = 'var_value'
storage = components.reset_index()
storage.drop('level_0', 1, inplace=True)
storage.columns = ['name', 'to', 'var_name', 'var_value']
storage['region'] = [
getattr(t, "region", np.nan) for t in components.index.get_level_values('from')
]
storage['type'] = [
getattr(t, "type", np.nan) for t in components.index.get_level_values('from')
]
storage['carrier'] = [
getattr(t, "carrier", np.nan) for t in components.index.get_level_values('from')
]
storage['tech'] = [
getattr(t, "tech", np.nan) for t in components.index.get_level_values('from')
]
storage = storage.loc[storage['to'].isna()]
storage.drop('to', 1, inplace=True)
storage = storage[['region', 'name', 'type', 'carrier', 'tech', 'var_name', 'var_value']]
# Delete unused 'init_cap' rows - parameter name misleading! (oemof issue)
storage.drop(storage.loc[storage['var_name'] == 'init_cap'].index, axis=0, inplace=True)
storage.replace(
['invest'],
['storage_capacity_invest'],
inplace=True
)
storage.set_index(
['region', "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
storage = pd.DataFrame()
capacities = pd.concat([endogenous, exogenous, storage])
return capacities
def format_capacities(oemoflex_scalars, capacities):
df = pd.DataFrame(columns=oemoflex_scalars.columns)
df.loc[:, 'name'] = capacities.reset_index().loc[:, 'name']
df.loc[:, 'tech'] = capacities.reset_index().loc[:, 'tech']
df.loc[:, 'carrier'] = capacities.reset_index().loc[:, 'carrier']
df.loc[:, 'var_name'] = capacities.reset_index().loc[:, 'var_name']
df.loc[:, 'var_value'] = capacities.reset_index().loc[:, 'var_value']
df.loc[:, 'type'] = capacities.reset_index().loc[:, 'type']
df.loc[:, 'region'] = capacities.reset_index().loc[:, 'region']
df['var_unit'] = 'MW'
return df
def get_sequences_by_tech(results):
r"""
Creates a dictionary with carrier-tech as keys with the sequences of the components
from optimization results.
Parameters
----------
results : dict
Dictionary containing oemof.solph.Model results.
Returns
-------
sequences_by_tech : dict
Dictionary containing sequences with carrier-tech as keys.
"""
# copy to avoid manipulating the data in es.results
sequences = copy.deepcopy({key: value['sequences'] for key, value in results.items()})
sequences_by_tech = []
# Get internal busses for all 'ReservoirWithPump' and 'Bev' nodes to be ignored later
internal_busses = get_subnodes_by_type(sequences, Bus)
# Get inflows for all 'ReservoirWithPump' nodes
reservoir_inflows = get_subnodes_by_type(sequences, Source)
for key, df in sequences.items():
if isinstance(key[0], Bus):
component = key[1]
bus = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.from_bus:
var_name = 'flow_gross_forward'
elif bus == component.to_bus:
var_name = 'flow_gross_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
var_name = 'flow_fuel'
else:
var_name = 'flow_in'
if isinstance(key[1], Bus):
bus = key[1]
component = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.to_bus:
var_name = 'flow_net_forward'
elif bus == component.from_bus:
var_name = 'flow_net_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
if bus == component.electricity_bus:
var_name = 'flow_electricity'
elif bus == component.heat_bus:
var_name = 'flow_heat'
elif component in reservoir_inflows:
var_name = 'flow_inflow'
else:
var_name = 'flow_out'
if key[1] is None:
component = key[0]
var_name = 'storage_content'
# Ignore sequences FROM internal busses (concerns ReservoirWithPump, Bev)
if bus in internal_busses and component not in reservoir_inflows:
continue
carrier_tech = component.carrier + '-' + component.tech
if isinstance(component, TYPEMAP["link"]):
# Replace AT-DE by AT_DE to be ready to be merged with DataFrames from preprocessing
region = component.label.replace('-', '_')
else:
# Take AT from AT-ch4-gt, string op since sub-nodes lack of a 'region' attribute
region = component.label.split('-')[0]
df.columns = pd.MultiIndex.from_tuples([(region, carrier_tech, var_name)])
df.columns.names = ['region', 'carrier_tech', 'var_name']
sequences_by_tech.append(df)
sequences_by_tech = pd.concat(sequences_by_tech, axis=1)
return sequences_by_tech
def get_subnodes_by_type(sequences, cls):
r"""
Get all the subnodes of type 'cls' in the <to> nodes of 'sequences'
Parameters
----------
sequences : dict (special format, see get_sequences_by_tech() and before)
key: tuple of 'to' node and 'from' node: (from, to)
value: timeseries DataFrame
cls : Class
Class to check against
Returns
-------
A list of all subnodes of type 'cls'
"""
# Get a list of all the components
to_nodes = []
for k in sequences.keys():
# It's sufficient to look into one side of the flows ('to' node, k[1])
to_nodes.append(k[1])
subnodes_list = []
for component in to_nodes:
if hasattr(component, 'subnodes'):
# Only get subnodes of type 'cls'
subnodes_per_component = [n for n in component.subnodes if isinstance(n, cls)]
subnodes_list.extend(subnodes_per_component)
return subnodes_list
def get_summed_sequences(sequences_by_tech, prep_elements):
# Put component definitions into one DataFrame - drops 'carrier_tech' information in the keys
base = pd.concat(prep_elements.values())
df = base.loc[:, basic_columns]
sum = sequences_by_tech.sum()
sum.name = 'var_value'
sum_df = sum.reset_index()
# Form helper column for proper merging with component definition
df['carrier_tech'] = df['carrier'] + '-' + df['tech']
summed_sequences = pd.merge(df, sum_df, on=['region', 'carrier_tech'])
# Drop helper column
summed_sequences.drop('carrier_tech', axis=1, inplace=True)
summed_sequences = summed_sequences.loc[summed_sequences['var_name'] != 'storage_content']
summed_sequences['var_unit'] = 'MWh'
return summed_sequences
def get_re_generation(oemoflex_scalars):
renewable_carriers = ['solar', 'wind']
re_generation = pd.DataFrame(columns=oemoflex_scalars.columns)
re_flow = oemoflex_scalars.loc[(oemoflex_scalars['carrier'].isin(renewable_carriers)) &
(oemoflex_scalars['var_name'] == 'flow_out')]
curtailment = oemoflex_scalars.loc[(oemoflex_scalars['carrier'] == 'electricity') &
(oemoflex_scalars['tech'] == 'curtailment') &
(oemoflex_scalars['var_name'] == 'flow_in')]
sum = re_flow.groupby('region').sum() - curtailment.groupby('region').sum()
re_generation['region'] = sum.index
re_generation['carrier'] = 're'
re_generation['type'] = 'none'
re_generation['tech'] = 'none'
re_generation['var_name'] = 're_generation'
re_generation = re_generation.drop('var_value', 1)
re_generation = pd.merge(re_generation, sum['var_value'], on='region')
re_generation['var_unit'] = 'MWh'
return re_generation
def get_transmission_losses(oemoflex_scalars):
r"""Calculates losses_forward losses_backward for each link."""
def gross_minus_net_flow(direction):
flow_gross = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_gross_{direction}'].set_index('name')
flow_net = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_net_{direction}'].set_index('name')
loss = flow_gross.copy()
loss['var_name'] = f'loss_{direction}'
loss['var_value'] = flow_gross['var_value'] - flow_net['var_value']
return loss
losses = []
for direction in ['forward', 'backward']:
loss = gross_minus_net_flow(direction)
losses.append(loss)
losses = pd.concat(losses)
losses = losses.reset_index()
return losses
def get_storage_losses(oemoflex_scalars):
storage_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['storage', 'asymmetric storage'])
]
flow_in = storage_data.loc[storage_data['var_name'] == 'flow_in'].set_index('name')
flow_out = storage_data.loc[storage_data['var_name'] == 'flow_out'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'loss'
losses['var_value'] = flow_in['var_value'] - flow_out['var_value']
losses = losses.reset_index()
return losses
def get_reservoir_losses(oemoflex_scalars):
reservoir_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['reservoir'])
]
flow_in = reservoir_data.loc[reservoir_data['var_name'] == 'flow_in'].set_index('name')
flow_out = reservoir_data.loc[reservoir_data['var_name'] == 'flow_out'].set_index('name')
flow_inflow = reservoir_data.loc[reservoir_data['var_name'] == 'flow_inflow'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'losses'
losses['var_value'] = flow_inflow['var_value'] - (flow_out['var_value'] - flow_in['var_value'])
losses = losses.reset_index()
return losses
def aggregate_storage_capacities(oemoflex_scalars):
storage = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['storage_capacity', 'storage_capacity_invest'])].copy()
# Make sure that values in columns used to group on are strings and thus equatable
storage[basic_columns] = storage[basic_columns].astype(str)
storage = storage.groupby(by=basic_columns, as_index=False).sum()
storage['var_name'] = 'storage_capacity_sum'
storage['var_value'] = storage['var_value'] * 1e-3 # MWh -> GWh
storage['var_unit'] = 'GWh'
charge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_charge', 'capacity_charge_invest'])]
charge = charge.groupby(by=basic_columns, as_index=False).sum()
charge['var_name'] = 'capacity_charge_sum'
charge['var_unit'] = 'MW'
discharge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_discharge', 'capacity_discharge_invest'])]
discharge = discharge.groupby(by=basic_columns, as_index=False).sum()
discharge['var_name'] = 'capacity_discharge_sum'
discharge['var_unit'] = 'MW'
return pd.concat([storage, charge, discharge])
def aggregate_other_capacities(oemoflex_scalars):
capacities = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity', 'invest'])
].copy()
# Make sure that values in columns used to group on are strings and thus equatable
capacities[basic_columns] = capacities[basic_columns].astype(str)
capacities = capacities.groupby(by=basic_columns, as_index=False).sum()
capacities['var_name'] = 'capacity_sum'
capacities['var_unit'] = 'MW'
return capacities
def get_emissions(oemoflex_scalars, scalars_raw):
try:
emissions = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'cost_emission'].copy()
except KeyError:
logging.info("No key 'cost_emissions' found to calculate 'emissions'.")
return None
price_emission = get_parameter_values(scalars_raw, 'Energy_Price_CO2')
emissions['var_value'] *= 1/price_emission
emissions['var_name'] = 'emissions'
emissions['var_unit'] = 'tCO2'
return emissions
def map_link_direction(oemoflex_scalars):
r"""Swaps name and region for backward flows of links."""
backward = (
(oemoflex_scalars['type'] == 'link') &
(oemoflex_scalars['var_name'].str.contains('backward'))
)
def swap(series, delimiter):
return series.str.split(delimiter).apply(lambda x: delimiter.join(x[::-1]))
def drop_regex(series, regex):
return series.str.replace(regex, '', regex=True)
oemoflex_scalars.loc[backward, 'name'] = swap(oemoflex_scalars.loc[backward, 'name'], '-')
oemoflex_scalars.loc[backward, 'region'] = swap(oemoflex_scalars.loc[backward, 'region'], '_')
oemoflex_scalars.loc[:, 'var_name'] = drop_regex(
oemoflex_scalars.loc[:, 'var_name'], '.backward|.forward'
)
return oemoflex_scalars
def map_to_flexmex_results(oemoflex_scalars, flexmex_scalars_template, mapping, scenario):
mapping = mapping.set_index('Parameter')
flexmex_scalars = flexmex_scalars_template.copy()
oemoflex_scalars = oemoflex_scalars.set_index(['region', 'carrier', 'tech', 'var_name'])
oemoflex_scalars.loc[oemoflex_scalars['var_unit'] == 'MWh', 'var_value'] *= 1e-3 # MWh to GWh
for i, row in flexmex_scalars.loc[flexmex_scalars['UseCase'] == scenario].iterrows():
try:
select = mapping.loc[row['Parameter'], :]
except KeyError:
continue
try:
value = oemoflex_scalars.loc[
(row['Region'],
select['carrier'],
select['tech'],
select['var_name']), 'var_value']
except KeyError:
logging.info(
f"No key "
f"{(row['Region'], select['carrier'], select['tech'], select['var_name'])}"
f"found to be mapped to FlexMex."
)
continue
if isinstance(value, float):
flexmex_scalars.loc[i, 'Value'] = np.around(value)
flexmex_scalars.loc[:, 'Modell'] = 'oemof'
return flexmex_scalars
def get_varom_cost(oemoflex_scalars, prep_elements):
r"""
Calculates the VarOM cost by multiplying consumption by marginal cost.
Which value is taken as consumption depends on the actual technology type.
Parameters
----------
oemoflex_scalars
prep_elements
Returns
-------
"""
varom_cost = []
for prep_el in prep_elements.values():
if 'marginal_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] == 'excess':
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
elif prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_electricity']
elif prep_el['type'][0] in ['link', 'electrical line']:
net_flows = ['flow_net_forward', 'flow_net_backward']
flow = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(net_flows)]
flow = flow.groupby(basic_columns, as_index=False).sum()
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_out']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['marginal_cost']
df['var_name'] = 'cost_varom'
varom_cost.append(df)
varom_cost = pd.concat(varom_cost)
varom_cost['var_unit'] = 'Eur'
return varom_cost
def get_carrier_cost(oemoflex_scalars, prep_elements):
carrier_cost = []
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_fuel']
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['carrier_cost']
df['var_name'] = 'cost_carrier'
carrier_cost.append(df)
if carrier_cost:
carrier_cost = pd.concat(carrier_cost)
else:
carrier_cost = pd.DataFrame(carrier_cost)
carrier_cost['var_unit'] = 'Eur'
return carrier_cost
def get_fuel_cost(oemoflex_scalars, prep_elements, scalars_raw):
r"""
Re-calculates the fuel costs from the carrier costs if there are CO2 emissions.
Bypass for non-emission carriers (cost_carrier = cost_fuel).
Having emissions or not is determined by the parameter mapping dict (emission_factor).
TODO Let's think about using the 'flow' values as input because this way we could
generalize the structure with get_varom_cost() and get_emission_cost() into one function
for all 'flow'-derived values.
Parameters
----------
oemoflex_scalars
prep_elements
scalars_raw
Returns
-------
"""
fuel_cost = pd.DataFrame()
# Iterate over oemof.tabular components (technologies)
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
# Set up a list of the current technology's elements
df = prep_el.loc[:, basic_columns]
# Select carriers from the parameter map
carrier_name = prep_el['carrier'][0]
parameters = FlexMex_Parameter_Map['carrier'][carrier_name]
# Only re-calculate if there is a CO2 emission
if 'emission_factor' in parameters.keys():
price_carrier = get_parameter_values(scalars_raw, parameters['carrier_price'])
price_emission = get_parameter_values(scalars_raw, parameters['co2_price'])\
* get_parameter_values(scalars_raw, parameters['emission_factor'])
factor = price_carrier / (price_carrier + price_emission)
# Otherwise take the carrier cost value for the fuel cost
else:
factor = 1.0
df = get_calculated_parameters(df, oemoflex_scalars, 'cost_carrier', factor)
# Update other columns
df['var_name'] = 'cost_fuel'
df['var_unit'] = 'Eur'
# Append current technology elements to the return DataFrame
fuel_cost = pd.concat([fuel_cost, df])
return fuel_cost
def get_emission_cost(oemoflex_scalars, prep_elements, scalars_raw):
r"""
Re-calculates the emission costs from the carrier costs if there are CO2 emissions.
Structure only slightly different (+ else branch) from get_fuel_cost() because there are costs
of zero instead of the fuel costs (in get_fuel_cost()) if there are no emissions.
Parameters
----------
oemoflex_scalars
prep_elements
scalars_raw
Returns
-------
"""
emission_cost = pd.DataFrame()
# Iterate over oemof.tabular components (technologies)
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
# Set up a list of the current technology's elements
df = prep_el.loc[:, basic_columns]
# Select carriers from the parameter map
carrier_name = prep_el['carrier'][0]
parameters = FlexMex_Parameter_Map['carrier'][carrier_name]
# Only re-calculate if there is a CO2 emission
if 'emission_factor' in parameters.keys():
price_carrier = get_parameter_values(scalars_raw, parameters['carrier_price'])
price_emission = get_parameter_values(scalars_raw, parameters['co2_price']) \
* get_parameter_values(scalars_raw, parameters['emission_factor'])
factor = price_emission / (price_carrier + price_emission)
df = get_calculated_parameters(df, oemoflex_scalars, 'cost_carrier', factor)
else:
df['var_value'] = 0.0
# Update other columns
df['var_name'] = 'cost_emission'
df['var_unit'] = 'Eur'
# Append current technology elements to the return DataFrame
emission_cost = pd.concat([emission_cost, df])
return emission_cost
def get_calculated_parameters(df, oemoflex_scalars, parameter_name, factor):
r"""
Takes the pre-calculated parameter 'parameter_name' from
'oemoflex_scalars' DataFrame and returns it multiplied by 'factor' (element-wise)
with 'df' as a template
Parameters
----------
df
output template DataFrame
oemoflex_scalars
DataFrame with pre-calculated parameters
parameter_name
parameter to manipulate
factor
factor to multiply parameter with
Returns
-------
"""
calculated_parameters = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == parameter_name].copy()
if calculated_parameters.empty:
logging.info("No key '{}' found as input"
"for postprocessing calculation.".format(parameter_name))
# Make sure that values in columns to merge on are strings
# See here:
# https://stackoverflow.com/questions/39582984/pandas-merging-on-string-columns-not-working-bug
calculated_parameters[basic_columns] = calculated_parameters[basic_columns].astype(str)
df = pd.merge(
df, calculated_parameters,
on=basic_columns
)
df['var_value'] = df['var_value'] * factor
return df
def get_invest_cost(oemoflex_scalars, prep_elements, scalars_raw):
invest_cost = pd.DataFrame()
for prep_el in prep_elements.values():
# In the following line: Not 'is'! pandas overloads operators!
if 'expandable' in prep_el.columns and prep_el['expandable'][0] == True: # noqa: E712, E501 # pylint: disable=C0121
# element is expandable --> 'invest' values exist
df = prep_el[basic_columns]
tech_name = prep_el['tech'][0]
parameters = FlexMex_Parameter_Map['tech'][tech_name]
interest = get_parameter_values(
scalars_raw,
'EnergyConversion_InterestRate_ALL') * 1e-2 # percent -> 0...1
# Special treatment for storages
if tech_name in ['h2_cavern', 'liion_battery']:
# Charge device
capex = get_parameter_values(scalars_raw, parameters['charge_capex'])
lifetime = get_parameter_values(scalars_raw, parameters['charge_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_charge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_charge_invest',
annualized_cost)
# Discharge device
capex = get_parameter_values(scalars_raw, parameters['discharge_capex'])
lifetime = get_parameter_values(scalars_raw, parameters['discharge_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_discharge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_discharge_invest',
annualized_cost)
# Storage cavern
capex = get_parameter_values(scalars_raw,
parameters['storage_capex']) * 1e-3 # €/MWh -> €/GWh
lifetime = get_parameter_values(scalars_raw, parameters['storage_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_storage = get_calculated_parameters(df, oemoflex_scalars,
'storage_capacity_invest',
annualized_cost)
df = pd.concat([df_charge, df_discharge, df_storage])
# Sum the 3 amounts per storage, keep indexes as columns
df = df.groupby(by=basic_columns, as_index=False).sum()
else:
capex = get_parameter_values(scalars_raw, parameters['capex'])
lifetime = get_parameter_values(scalars_raw, parameters['lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df = get_calculated_parameters(df, oemoflex_scalars, 'invest', annualized_cost)
df['var_name'] = 'cost_invest'
df['var_unit'] = 'Eur'
invest_cost = pd.concat([invest_cost, df])
return invest_cost
def get_fixom_cost(oemoflex_scalars, prep_elements, scalars_raw):
fixom_cost = pd.DataFrame()
for prep_el in prep_elements.values():
# not 'is'! pandas overloads operators!
if 'expandable' in prep_el.columns and prep_el['expandable'][0] == True: # noqa: E712, E501 # pylint: disable=C0121
# element is expandable --> 'invest' values exist
df = prep_el[basic_columns]
tech_name = prep_el['tech'][0]
parameters = FlexMex_Parameter_Map['tech'][tech_name]
# Special treatment for storages
if tech_name in ['h2_cavern', 'liion_battery']:
# One fix cost factor for all sub-components
fix_cost_factor = get_parameter_values(
scalars_raw, parameters['fixom']) * 1e-2 # percent -> 0...1
# Charge device
capex = get_parameter_values(scalars_raw, parameters['charge_capex'])
df_charge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_charge_invest',
fix_cost_factor * capex)
# Discharge device
capex = get_parameter_values(scalars_raw, parameters['discharge_capex'])
df_discharge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_discharge_invest',
fix_cost_factor * capex)
# Storage cavern
capex = get_parameter_values(scalars_raw,
parameters['storage_capex']) * 1e-3 # €/MWh -> €/GWh
df_storage = get_calculated_parameters(df, oemoflex_scalars,
'storage_capacity_invest',
fix_cost_factor * capex)
df = pd.concat([df_charge, df_discharge, df_storage])
# Sum the 3 amounts per storage, keep indexes as columns
df = df.groupby(by=basic_columns, as_index=False).sum()
else:
capex = get_parameter_values(scalars_raw, parameters['capex'])
fix_cost_factor = get_parameter_values(
scalars_raw, parameters['fixom']) * 1e-2 # percent -> 0...1
df = get_calculated_parameters(df, oemoflex_scalars,
'invest',
fix_cost_factor * capex)
df['var_name'] = 'cost_fixom'
df['var_unit'] = 'Eur'
fixom_cost = pd.concat([fixom_cost, df])
return fixom_cost
def aggregate_by_country(df):
if not df.empty:
aggregated = df.groupby(['region', 'var_name', 'var_unit']).sum()
aggregated['name'] = 'energysystem'
aggregated['carrier'] = 'ALL'
aggregated['tech'] = 'ALL'
aggregated['type'] = 'ALL'
aggregated = aggregated.reset_index()
return aggregated
return None
def get_total_system_cost(oemoflex_scalars):
cost_list = ['cost_varom', 'cost_fuel', 'cost_invest', 'cost_emission']
df = oemoflex_scalars.loc[oemoflex_scalars['var_name'].isin(cost_list)]
total_system_cost = pd.DataFrame(columns=oemoflex_scalars.columns)
total_system_cost.loc[0, 'var_name'] = 'total_system_cost'
total_system_cost.loc[0, 'var_value'] = df['var_value'].sum()
total_system_cost['carrier'] = 'ALL'
total_system_cost['tech'] = 'ALL'
total_system_cost['region'] = 'ALL'
total_system_cost['var_unit'] = 'Eur'
return total_system_cost
def save_flexmex_timeseries(sequences_by_tech, scenario, model, year, dir):
for carrier_tech in sequences_by_tech.columns.unique(level='carrier_tech'):
try:
components_paths = map_output_timeseries[carrier_tech]
except KeyError:
logging.info(f"No entry found in {path_map_output_timeseries} for '{carrier_tech}'.")
continue
idx = pd.IndexSlice
for var_name, subdir in components_paths.items():
df_var_value = sequences_by_tech.loc[:, idx[:, carrier_tech, var_name]]
for region in df_var_value.columns.get_level_values('region'):
filename = os.path.join(
dir,
subdir,
'_'.join([scenario, model, region, year]) + '.csv'
)
single_column = df_var_value.loc[:, region]
single_column = single_column.reset_index(drop=True)
single_column.columns = single_column.columns.droplevel('carrier_tech')
remaining_column_name = list(single_column)[0]
single_column.rename(columns={remaining_column_name: 'value'}, inplace=True)
single_column.index.name = 'timeindex'
single_column.to_csv(filename, header=True)
delete_empty_subdirs(dir)
def sum_transmission_flows(sequences_by_tech):
idx = pd.IndexSlice
try:
flow_net_fw = sequences_by_tech. \
loc[:, idx[:, 'electricity-transmission', 'flow_net_forward']]
flow_net_bw = sequences_by_tech. \
loc[:, idx[:, 'electricity-transmission', 'flow_net_backward']]
except KeyError:
return None
flow_net_fw = flow_net_fw.rename(columns={'flow_net_forward': 'flow_net_sum'})
flow_net_bw = flow_net_bw.rename(columns={'flow_net_backward': 'flow_net_sum'})
flow_net_sum = flow_net_fw - flow_net_bw
return flow_net_sum
def aggregate_re_generation_timeseries(sequences_by_tech):
idx = pd.IndexSlice
# Sum flow_out sequences from renewable energies
renewable_techs = ['wind-offshore', 'wind-onshore', 'solar-pv']
df_renewable = sequences_by_tech.loc[:, idx[:, renewable_techs, 'flow_out']]
df_renewable_sum = df_renewable.groupby(['region'], axis=1).sum()
df_renewable_sum.columns = pd.MultiIndex.from_product(
[list(df_renewable_sum.columns), ['energysystem'], ['re_generation']],
names=['region', 'carrier_tech', 'var_name']
)
# Substract Curtailment
df_curtailment = sequences_by_tech.loc[:, (slice(None), 'electricity-curtailment')]
df_curtailment.columns = df_renewable_sum.columns
df_re_generation = df_renewable_sum.sub(df_curtailment, axis=0)
return df_re_generation
def export_bus_sequences(es, destination):
if not os.path.exists(destination):
os.mkdir(destination)
bus_results = pp.bus_results(es, es.results)
for key, value in bus_results.items():
if value.empty:
continue
file_path = os.path.join(destination, key + '.csv')
value.to_csv(file_path)
def log_solver_time_to_file(meta_results, path):
r"""Log solver time from oemof.outputlib.processing.meta_results() to a log file in 'path'"""
sys_time = meta_results['solver']['System time'] # equals 'Total time (CPU seconds)' in stdout
wc_time = meta_results['solver']['Wallclock time']
user_time = meta_results['solver']['User time'] # Always -1 so far
time = meta_results['solver']['Time'] # Not clear what this means
output_path = os.path.join(path, 'solver_time.csv')
df = pd.DataFrame(
{'system_time': [sys_time],
'wallclock_time': [wc_time],
'user_time': [user_time],
'time': [time],
})
df.to_csv(output_path, index=False)
def log_problem_metrics_to_file(meta_results, path):
r"""Log a number of solver metrics from oemof.outputlib.processing.meta_results()
to a log file in 'path'"""
no_of_constraints = meta_results['problem']['Number of constraints']
no_of_vars = meta_results['problem']['Number of variables']
no_of_nonzeros = meta_results['problem']['Number of nonzeros']
output_path = os.path.join(path, 'problem_metrics.csv')
df = pd.DataFrame(
{'constraints': [no_of_constraints],
'vars': [no_of_vars],
'nonzeros': [no_of_nonzeros],
})
df.to_csv(output_path, index=False)
def run_postprocessing(scenario_specs, exp_paths):
create_postprocessed_results_subdirs(exp_paths.results_postprocessed)
# load raw data
scalars_raw = load_scalar_input_data(scenario_specs, exp_paths.data_raw)
# load scalars templates
flexmex_scalars_template = pd.read_csv(os.path.join(exp_paths.results_template, 'Scalars.csv'))
flexmex_scalars_template = flexmex_scalars_template.loc[
flexmex_scalars_template['UseCase'] == scenario_specs['scenario']
]
# load mapping
mapping = pd.read_csv(os.path.join(path_mappings, 'mapping-output-scalars.csv'))
# Load preprocessed elements
prep_elements = load_elements(os.path.join(exp_paths.data_preprocessed, 'data', 'elements'))
# restore EnergySystem with results
es = EnergySystem()
es.restore(exp_paths.results_optimization)
log_solver_time_to_file(es.meta_results, exp_paths.logging_path)
log_problem_metrics_to_file(es.meta_results, exp_paths.logging_path)
# format results sequences
sequences_by_tech = get_sequences_by_tech(es.results)
flow_net_sum = sum_transmission_flows(sequences_by_tech)
sequences_by_tech = pd.concat([sequences_by_tech, flow_net_sum], axis=1)
df_re_generation = aggregate_re_generation_timeseries(sequences_by_tech)
sequences_by_tech = | pd.concat([sequences_by_tech, df_re_generation], axis=1) | pandas.concat |
import asyncio
import datetime
import logging
from typing import List, Tuple, Union
import pandas as pd
import pytest
import core.signal_processing as csigproc
import helpers.hasyncio as hasynci
import helpers.hdbg as hdbg
import helpers.hunit_test as hunitest
import market_data as mdata
import oms.oms_db as oomsdb
import oms.order_processor as oordproc
import oms.portfolio as omportfo
import oms.portfolio_example as oporexam
import oms.process_forecasts as oprofore
import oms.test.oms_db_helper as omtodh
_LOG = logging.getLogger(__name__)
class TestSimulatedProcessForecasts1(hunitest.TestCase):
def test_initialization1(self) -> None:
with hasynci.solipsism_context() as event_loop:
hasynci.run(
self._test_simulated_system1(event_loop), event_loop=event_loop
)
async def _test_simulated_system1(
self, event_loop: asyncio.AbstractEventLoop
) -> None:
"""
Run `process_forecasts()` logic with a given prediction df to update a
Portfolio.
"""
config = {}
(
market_data,
get_wall_clock_time,
) = mdata.get_ReplayedTimeMarketData_example3(event_loop)
# Build predictions.
index = [
pd.Timestamp("2000-01-01 09:35:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:40:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:45:00-05:00", tz="America/New_York"),
]
columns = [101, 202]
prediction_data = [
[0.1, 0.2],
[-0.1, 0.3],
[-0.3, 0.0],
]
predictions = pd.DataFrame(prediction_data, index, columns)
volatility_data = [
[1, 1],
[1, 1],
[1, 1],
]
volatility = pd.DataFrame(volatility_data, index, columns)
# Build a Portfolio.
portfolio = oporexam.get_simulated_portfolio_example1(
event_loop,
market_data=market_data,
asset_ids=[101, 202],
)
config["order_type"] = "price@twap"
config["order_duration"] = 5
config["ath_start_time"] = datetime.time(9, 30)
config["trading_start_time"] = datetime.time(9, 35)
config["ath_end_time"] = datetime.time(16, 00)
config["trading_end_time"] = datetime.time(15, 55)
config["execution_mode"] = "batch"
# Run.
await oprofore.process_forecasts(
predictions,
volatility,
portfolio,
config,
)
actual = str(portfolio)
expected = r"""# historical holdings=
asset_id 101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 0.0 0.0 1000000.00
2000-01-01 09:40:01-05:00 33.32 66.65 900039.56
2000-01-01 09:45:01-05:00 -24.99 74.98 950024.38
# historical holdings marked to market=
asset_id 101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 0.0 0.0 1000000.00
2000-01-01 09:40:01-05:00 33329.65 66659.3 900039.56
2000-01-01 09:45:01-05:00 -24992.93 74978.79 950024.38
# historical statistics=
net_asset_holdings cash net_wealth gross_exposure leverage pnl realized_pnl unrealized_pnl
2000-01-01 09:35:00-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 NaN NaN NaN
2000-01-01 09:35:01-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 0.00 0.00 0.00
2000-01-01 09:40:01-05:00 99988.95 900039.56 1.00e+06 99988.95 0.1 28.51 -99960.44 99988.95
2000-01-01 09:45:01-05:00 49985.86 950024.38 1.00e+06 99971.72 0.1 -18.28 49984.81 -50003.09"""
self.assert_equal(actual, expected, fuzzy_match=True)
class TestMockedProcessForecasts1(omtodh.TestOmsDbHelper):
def test_mocked_system1(self) -> None:
with hasynci.solipsism_context() as event_loop:
# Build a Portfolio.
db_connection = self.connection
table_name = oomsdb.CURRENT_POSITIONS_TABLE_NAME
#
oomsdb.create_oms_tables(self.connection, incremental=False)
#
portfolio = oporexam.get_mocked_portfolio_example1(
event_loop,
db_connection,
table_name,
asset_ids=[101, 202],
)
# Build OrderProcessor.
get_wall_clock_time = portfolio._get_wall_clock_time
poll_kwargs = hasynci.get_poll_kwargs(get_wall_clock_time)
# poll_kwargs["sleep_in_secs"] = 1
poll_kwargs["timeout_in_secs"] = 60 * 10
delay_to_accept_in_secs = 3
delay_to_fill_in_secs = 10
broker = portfolio.broker
termination_condition = 3
order_processor = oordproc.OrderProcessor(
db_connection,
delay_to_accept_in_secs,
delay_to_fill_in_secs,
broker,
poll_kwargs=poll_kwargs,
)
order_processor_coroutine = order_processor.run_loop(
termination_condition
)
coroutines = [
self._test_mocked_system1(portfolio),
order_processor_coroutine,
]
hasynci.run(asyncio.gather(*coroutines), event_loop=event_loop)
async def _test_mocked_system1(
self,
portfolio,
) -> None:
"""
Run process_forecasts() logic with a given prediction df to update a
Portfolio.
"""
config = {}
# Build predictions.
index = [
pd.Timestamp("2000-01-01 09:35:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:40:00-05:00", tz="America/New_York"),
pd.Timestamp("2000-01-01 09:45:00-05:00", tz="America/New_York"),
]
columns = [101, 202]
prediction_data = [
[0.1, 0.2],
[-0.1, 0.3],
[-0.3, 0.0],
]
predictions = pd.DataFrame(prediction_data, index, columns)
volatility_data = [
[1, 1],
[1, 1],
[1, 1],
]
volatility = pd.DataFrame(volatility_data, index, columns)
config["order_type"] = "price@twap"
config["order_duration"] = 5
config["ath_start_time"] = datetime.time(9, 30)
config["trading_start_time"] = datetime.time(9, 35)
config["ath_end_time"] = datetime.time(16, 00)
config["trading_end_time"] = datetime.time(15, 55)
config["execution_mode"] = "batch"
# Run.
await oprofore.process_forecasts(
predictions,
volatility,
portfolio,
config,
)
# TODO(Paul): Re-check the correctness after fixing the issue with
# pricing assets not currently in the portfolio.
actual = str(portfolio)
# TODO(Paul): Get this and the simulated test output to agree perfectly.
expected = r"""# historical holdings=
asset_id 101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 NaN NaN 1000000.00
2000-01-01 09:40:01-05:00 33.32 66.65 900039.56
2000-01-01 09:45:01-05:00 -24.99 74.98 950024.38
# historical holdings marked to market=
101 202 -1
2000-01-01 09:35:00-05:00 0.0 0.0 1000000.00
2000-01-01 09:35:01-05:00 NaN NaN 1000000.00
2000-01-01 09:40:01-05:00 33329.65 66659.3 900039.56
2000-01-01 09:45:01-05:00 -24992.93 74978.79 950024.38
# historical statistics=
net_asset_holdings cash net_wealth gross_exposure leverage pnl realized_pnl unrealized_pnl
2000-01-01 09:35:00-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 NaN NaN NaN
2000-01-01 09:35:01-05:00 0.00 1000000.00 1.00e+06 0.00 0.0 0.00 0.00 0.00
2000-01-01 09:40:01-05:00 99988.95 900039.56 1.00e+06 99988.95 0.1 28.51 -99960.44 99988.95
2000-01-01 09:45:01-05:00 49985.86 950024.38 1.00e+06 99971.72 0.1 -18.28 49984.81 -50003.09"""
self.assert_equal(actual, expected, fuzzy_match=True)
class TestMockedProcessForecasts2(omtodh.TestOmsDbHelper):
def test_mocked_system1(self) -> None:
data = self._get_market_data_df1()
predictions, volatility = self._get_predictions_and_volatility1(data)
self._run_coroutines(data, predictions, volatility)
def test_mocked_system2(self) -> None:
data = self._get_market_data_df2()
predictions, volatility = self._get_predictions_and_volatility1(data)
self._run_coroutines(data, predictions, volatility)
def test_mocked_system3(self) -> None:
data = self._get_market_data_df1()
predictions, volatility = self._get_predictions_and_volatility2(data)
self._run_coroutines(data, predictions, volatility)
@pytest.mark.skip(
"This test times out because nothing interesting happens after the first set of orders."
)
def test_mocked_system4(self) -> None:
data = self._get_market_data_df2()
predictions, volatility = self._get_predictions_and_volatility2(data)
self._run_coroutines(data, predictions, volatility)
def _run_coroutines(self, data, predictions, volatility):
with hasynci.solipsism_context() as event_loop:
# Build MarketData.
initial_replayed_delay = 5
asset_id = [data["asset_id"][0]]
market_data, _ = mdata.get_ReplayedTimeMarketData_from_df(
event_loop,
initial_replayed_delay,
data,
)
# Create a portfolio with one asset (and cash).
db_connection = self.connection
table_name = oomsdb.CURRENT_POSITIONS_TABLE_NAME
oomsdb.create_oms_tables(self.connection, incremental=False)
portfolio = oporexam.get_mocked_portfolio_example1(
event_loop,
db_connection,
table_name,
market_data=market_data,
asset_ids=asset_id,
)
# Build OrderProcessor.
delay_to_accept_in_secs = 3
delay_to_fill_in_secs = 10
broker = portfolio.broker
poll_kwargs = hasynci.get_poll_kwargs(portfolio._get_wall_clock_time)
poll_kwargs["timeout_in_secs"] = 60 * 10
order_processor = oordproc.OrderProcessor(
db_connection,
delay_to_accept_in_secs,
delay_to_fill_in_secs,
broker,
poll_kwargs=poll_kwargs,
)
# Build order process coroutine.
termination_condition = 4
order_processor_coroutine = order_processor.run_loop(
termination_condition
)
coroutines = [
self._test_mocked_system1(predictions, volatility, portfolio),
order_processor_coroutine,
]
hasynci.run(asyncio.gather(*coroutines), event_loop=event_loop)
@staticmethod
def _get_market_data_df1() -> pd.DataFrame:
"""
Generate price series that alternates every 5 minutes.
"""
idx = pd.date_range(
start=pd.Timestamp(
"2000-01-01 09:31:00-05:00", tz="America/New_York"
),
end=pd.Timestamp("2000-01-01 09:55:00-05:00", tz="America/New_York"),
freq="T",
)
bar_duration = "1T"
bar_delay = "0T"
data = mdata.build_timestamp_df(idx, bar_duration, bar_delay)
price_pattern = [101.0] * 5 + [100.0] * 5
price = price_pattern * 2 + [101.0] * 5
data["price"] = price
data["asset_id"] = 101
return data
@staticmethod
def _get_market_data_df2() -> pd.DataFrame:
idx = pd.date_range(
start=pd.Timestamp(
"2000-01-01 09:31:00-05:00", tz="America/New_York"
),
end=pd.Timestamp("2000-01-01 09:55:00-05:00", tz="America/New_York"),
freq="T",
)
bar_duration = "1T"
bar_delay = "0T"
data = mdata.build_timestamp_df(idx, bar_duration, bar_delay)
data["price"] = 100
data["asset_id"] = 101
return data
@staticmethod
def _get_predictions_and_volatility1(
market_data_df,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Generate a signal that alternates every 5 minutes.
"""
# Build predictions.
asset_id = market_data_df["asset_id"][0]
index = [
pd.Timestamp("2000-01-01 09:35:00-05:00", tz="America/New_York"),
| pd.Timestamp("2000-01-01 09:40:00-05:00", tz="America/New_York") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Utils
==========================
Utility functions for the Distance Closure package
"""
# Copyright (C) 2015 by
# <NAME> <<EMAIL>>
# <NAME> <@.>
# <NAME> <<EMAIL>>
# All rights reserved.
# MIT license.
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
__author__ = """\n""".join([
'<NAME> <<EMAIL>>',
'<NAME> <@.>',
'<NAME> <<EMAIL>>',
])
__all__ = [
'dist2prox',
'prox2dist',
'dict2matrix',
'matrix2dict',
'dict2sparse'
]
#
# Proximity and Distance Conversions
#
def prox2dist(P):
"""
Transforms a matrix of non-negative ``[0,1]`` proximities P to distance weights in the ``[0,inf]`` interval:
.. math::
d = \\frac{1}{p} - 1
Args:
P (matrix): Proximity matrix
Returns:
D (matrix): Distance matrix
See Also:
:attr:`dist2prox`
"""
if (type(P).__module__.split('.')[0] == 'numpy'):
return _prox2dist_numpy(P)
elif (type(P).__module__.split('.')[1] == 'sparse'):
return _prox2dist_sparse(P)
else:
raise ("Format not accepted: try numpy or scipy.sparse formats")
def _prox2dist_sparse(A):
A.data = prox2dist_numpy(A.data)
return A
def _prox2dist_numpy(A):
f = np.vectorize(_prox2dist)
return f(A)
def _prox2dist(x):
if x == 0:
return np.inf
else:
return (1/float(x)) - 1
def dist2prox(D):
"""
Transforms a matrix of non-negative integer distances ``D`` to proximity/similarity weights in the ``[0,1]`` interval:
.. math::
p = \\frac{1}{(d+1)}
It accepts both dense and sparse matrices.
Args:
D (matrix): Distance matrix
Returns:
P (matrix): Proximity matrix
See Also:
:attr:`prox2dist`
"""
if (type(D).__module__.split('.')[0] == 'numpy'):
return _dist2prox_numpy(D)
elif (type(D).__module__.split('.')[1] == 'sparse'):
return _dist2prox_numpy(D)
else:
raise ValueError("Format not accepted: try numpy or scipy.sparse formats")
def _dist2prox_sparse(A):
A.data = _dist2prox_numpy(A.data)
return A
def _dist2prox_numpy(A):
f = np.vectorize(_dist2prox)
return f(A)
def _dist2prox(x):
if x == np.inf:
return 0
else:
return (x + 1) ** -1
#
# Data format Conversiosn
#
def dict2matrix(d):
"""
Tranforms a 2D dictionary into a numpy. Usefull when converting Dijkstra results.
Args:
d (dict): 2D dictionary
Returns:
m (matrix): numpy matrix
Warning:
If your nodes have names instead of number assigned to them, make sure to keep a mapping.
Usage:
>>> d = {0: {0: 0, 1: 1, 2:3}, 1: {0: 1, 1: 0, 2:2}, 2: {0: 3, 1:2, 2:0}}
>>> dict2matrix(d)
[[ 0 1 3]
[ 1 0 2]
[ 3 2 0]]
See Also:
:attr:`matrix2dict`
Note:
Uses pandas to accomplish this in a one liner.
"""
return pd.DataFrame.from_dict(d).values
def matrix2dict(m):
"""
Tranforms a Numpy matrix into a 2D dictionary. Usefull when comparing dense metric and Dijkstra results.
Args:
m (matrix): numpy matrix
Returns:
d (dict): 2D dictionary
Usage:
>>> m = [[0, 1, 3], [1, 0, 2], [3, 2, 0]]
>>> matrix2dict(m)
{0: {0: 0, 1: 1, 2:3}, 1: {0: 1, 1: 0, 2:2}, 2: {0: 3, 1:2, 2:0}}
See Also:
:attr:`dict2matrix`
Note:
Uses pandas to accomplish this in a one liner.
"""
df = pd.DataFrame(m)
return pd.DataFrame(m).to_dict()
def dict2sparse(d):
"""
Tranforms a 2D dictionary into a Scipy sparse matrix.
Args:
d (dict): 2D dictionary
Returns:
m (csr matrix): CRS Sparse Matrix
Usage:
>>> d = {0: {0: 0, 1: 1, 2:3}, 1: {0: 1, 1: 0, 2:2}, 2: {0: 3, 1:2, 2:0}}
>>> dict2sparse(d)
(0, 1) 1
(0, 2) 3
(1, 0) 1
(1, 2) 2
(2, 0) 3
(2, 1) 2
See Also:
:attr:`dict2matrix`, :attr:`matrix2dict`
Note:
Uses pandas to convert dict into dataframe and then feeds it to the `csr_matrix`.
"""
return csr_matrix( | pd.DataFrame.from_dict(d,orient='index') | pandas.DataFrame.from_dict |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = | concat([df, df], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 5 00:21:25 2019
@author: carlosalcantara
"""
'''
Conducts supervised machine learning algorithms (KNN, decision tree and random
forest) classifiers on csv data sets specified in the data directory using the
feature subset according to the options below, where 1-7 correspond to the addition
of the specified features to the base feature set 0.
0: duration, dPkts, dOctets
1: + dstaddrcount
2: + srcportcount
3: + dstportunique
4: + dstaddrcount, srcportcount
5: + dstaddrcount, dstportunique
6: + srcportcount, dstportunique
7: + dstaddrcount, srcportcount, dstportunique
Once the classification is conducted on the test sets, the results are saved to
txt files to the specified directory along with the trained model for each machine
learning algorithm.
Usage: ML.py feature_subset_# #_of_test_files path/to/data/dir/ path/to/results/dir/
'''
import pandas as pd
from sklearn import tree
from sklearn import ensemble
from sklearn import neighbors
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from scipy.stats import randint as sp_randint
import sys
import json
from joblib import dump
# Check for command line argument
if len(sys.argv) < 4:
print('Usage: python3 ML.py feature_subset_# #_of_test_files path/to/data/dir/ path/to/results/dir/')
exit()
no_test_files = sys.argv[2]
data_path = sys.argv[3]
results_path = sys.argv[4]
# load train data set
trainfile = data_path+'train_scale.csv'
traindf = | pd.read_csv(trainfile) | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from autopandas_v2.ml.featurization.featurizer import RelationGraph
from autopandas_v2.ml.featurization.graph import GraphEdge, GraphEdgeType, GraphNodeType, GraphNode
from autopandas_v2.ml.featurization.options import GraphOptions
get_node_type = GraphNodeType.get_node_type
class TestRelationGraphFeaturizer(unittest.TestCase):
def test_basic_max(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
input_20 = GraphNode("I0", '[2,0]', get_node_type(input_df.iat[2, 0]))
input_21 = GraphNode("I0", '[2,1]', get_node_type(input_df.iat[2, 1]))
output_df = pd.DataFrame([[2, 3]])
output_00 = GraphNode("O0", '[0,0]', get_node_type(output_df.iat[0, 0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output_df.iat[0, 1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output_df)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_20, GraphEdgeType.ADJACENCY),
GraphEdge(input_20, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_11, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
GraphEdge(input_10, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_20, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_00, GraphEdgeType.EQUALITY), # redundant
GraphEdge(input_11, output_01, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_max_series(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
input_20 = GraphNode("I0", '[2,0]', get_node_type(input_df.iat[2, 0]))
input_21 = GraphNode("I0", '[2,1]', get_node_type(input_df.iat[2, 1]))
output = pd.DataFrame.max(input_df)
output_00 = GraphNode("O0", '[0,0]', get_node_type(output.iat[0]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output.iat[1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_20, GraphEdgeType.ADJACENCY),
GraphEdge(input_20, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_11, input_21, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
GraphEdge(input_10, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_20, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_00, GraphEdgeType.EQUALITY), # redundant
GraphEdge(input_11, output_10, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_values(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
output = input_df.values
output_00 = GraphNode("O0", '[0,0]', get_node_type(output[0, 0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output[0, 1]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output[1, 0]))
output_11 = GraphNode("O0", '[1,1]', get_node_type(output[1, 1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY),
GraphEdge(output_10, output_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_01, output_11, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(input_00, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_10, output_10, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_01, GraphEdgeType.EQUALITY),
GraphEdge(input_11, output_11, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_dict(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = GraphNode("I0", '[0,0]', get_node_type(input_df.iat[0, 0]))
input_01 = GraphNode("I0", '[0,1]', get_node_type(input_df.iat[0, 1]))
input_10 = GraphNode("I0", '[1,0]', get_node_type(input_df.iat[1, 0]))
input_11 = GraphNode("I0", '[1,1]', get_node_type(input_df.iat[1, 1]))
output = {"A": [1, 3], "B": [2, 4]}
output_00 = GraphNode("O0", '[0,0]', get_node_type(output['A'][0]))
output_01 = GraphNode("O0", '[0,1]', get_node_type(output['B'][0]))
output_10 = GraphNode("O0", '[1,0]', get_node_type(output['A'][1]))
output_11 = GraphNode("O0", '[1,1]', get_node_type(output['B'][1]))
options = GraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
positional_edges = [
GraphEdge(input_00, input_01, GraphEdgeType.ADJACENCY),
GraphEdge(input_00, input_10, GraphEdgeType.ADJACENCY),
GraphEdge(input_10, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(input_01, input_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_01, GraphEdgeType.ADJACENCY),
GraphEdge(output_00, output_10, GraphEdgeType.ADJACENCY),
GraphEdge(output_10, output_11, GraphEdgeType.ADJACENCY),
GraphEdge(output_01, output_11, GraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(input_00, output_00, GraphEdgeType.EQUALITY),
GraphEdge(input_10, output_10, GraphEdgeType.EQUALITY),
GraphEdge(input_01, output_01, GraphEdgeType.EQUALITY),
GraphEdge(input_11, output_11, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_groupby_output(self):
input_df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
output = input_df.groupby("Name")
options = GraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
GraphNode("I0", '[0,0]', GraphNodeType.STR)
]
alice_nodes_out = [
GraphNode("O0_0", '[0,0]', GraphNodeType.STR)
]
bob_nodes_in = [
GraphNode("I0", '[1,0]', GraphNodeType.STR),
GraphNode("I0", '[4,0]', GraphNodeType.STR)
]
bob_nodes_out = [
GraphNode("O0_1", '[0,0]', GraphNodeType.STR),
GraphNode("O0_1", '[1,0]', GraphNodeType.STR)
]
mallory_nodes_in = [
GraphNode("I0", '[2,0]', GraphNodeType.STR),
GraphNode("I0", '[3,0]', GraphNodeType.STR),
GraphNode("I0", '[5,0]', GraphNodeType.STR)
]
mallory_nodes_out = [
GraphNode("O0_2", '[0,0]', GraphNodeType.STR),
GraphNode("O0_2", '[1,0]', GraphNodeType.STR),
GraphNode("O0_2", '[2,0]', GraphNodeType.STR)
]
seattle_nodes_in = [
GraphNode("I0", '[0,1]', GraphNodeType.STR),
GraphNode("I0", '[1,1]', GraphNodeType.STR),
GraphNode("I0", '[3,1]', GraphNodeType.STR),
GraphNode("I0", '[4,1]', GraphNodeType.STR),
]
seattle_nodes_out = [
GraphNode("O0_0", '[0,1]', GraphNodeType.STR),
GraphNode("O0_1", '[0,1]', GraphNodeType.STR),
GraphNode("O0_2", '[1,1]', GraphNodeType.STR)
]
portland_nodes_in = [
GraphNode("I0", '[2,1]', GraphNodeType.STR),
GraphNode("I0", '[5,1]', GraphNodeType.STR)
]
portland_nodes_out = [
GraphNode("O0_2", '[0,1]', GraphNodeType.STR),
GraphNode("O0_2", '[2,1]', GraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, GraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
check_edges(portland_nodes_in, portland_nodes_out)
check_edges(seattle_nodes_in, seattle_nodes_out)
def test_groupby_input(self):
df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
input_ = df.groupby("Name")
output = input_.count().reset_index()
options = GraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_], output)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
GraphNode("I0_0", '[0,0]', GraphNodeType.STR)
]
alice_nodes_out = [
GraphNode("O0", '[0,0]', GraphNodeType.STR)
]
bob_nodes_in = [
GraphNode("I0_1", '[0,0]', GraphNodeType.STR),
GraphNode("I0_1", '[1,0]', GraphNodeType.STR)
]
bob_nodes_out = [
GraphNode("O0", '[1,0]', GraphNodeType.STR)
]
mallory_nodes_in = [
GraphNode("I0_2", '[0,0]', GraphNodeType.STR),
GraphNode("I0_2", '[1,0]', GraphNodeType.STR),
GraphNode("I0_2", '[2,0]', GraphNodeType.STR)
]
mallory_nodes_out = [
GraphNode("O0", '[2,0]', GraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, GraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
def test_idx_multi(self):
tuples = [("bar", "one"), ("bar", "two")]
index = pd.MultiIndex.from_tuples(tuples)
data = [[0], [1]]
input_df = pd.DataFrame(data, index=index)
# 0
# bar one 0
# two 1
output_df = input_df.unstack()
# 0
# one two
# bar 0 1
options = GraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output_df)
rel_graph_edges = rel_graph.edges
bar_in_0 = GraphNode("I0", '[0,-2]', GraphNodeType.INDEX)
bar_in_1 = GraphNode("I0", '[1,-2]', GraphNodeType.INDEX)
bar_out = GraphNode("O0", '[0,-1]', GraphNodeType.INDEX)
one_in = GraphNode("I0", '[0,-1]', GraphNodeType.INDEX)
two_in = GraphNode("I0", '[1,-1]', GraphNodeType.INDEX)
one_out = GraphNode("O0", '[-1,0]', GraphNodeType.COLUMN)
two_out = GraphNode("O0", '[-1,1]', GraphNodeType.COLUMN)
in_0 = GraphNode("I0", '[0,0]', GraphNodeType.INT)
in_1 = GraphNode("I0", '[1,0]', GraphNodeType.INT)
out_0 = GraphNode("O0", '[0,0]', GraphNodeType.INT)
out_1 = GraphNode("O0", '[0,1]', GraphNodeType.INT)
adjacency_edges = [
GraphEdge(bar_in_0, bar_in_1, GraphEdgeType.ADJACENCY),
GraphEdge(bar_in_0, one_in, GraphEdgeType.ADJACENCY),
GraphEdge(bar_in_1, two_in, GraphEdgeType.ADJACENCY),
GraphEdge(one_in, two_in, GraphEdgeType.ADJACENCY)
]
for edge in adjacency_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
indexing_edges = [
GraphEdge(bar_in_0, in_0, GraphEdgeType.INDEX),
GraphEdge(one_in, in_0, GraphEdgeType.INDEX),
GraphEdge(bar_in_1, in_1, GraphEdgeType.INDEX),
GraphEdge(two_in, in_1, GraphEdgeType.INDEX),
GraphEdge(bar_out, out_0, GraphEdgeType.INDEX),
GraphEdge(bar_out, out_1, GraphEdgeType.INDEX)
]
for edge in indexing_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
GraphEdge(bar_in_0, bar_out, GraphEdgeType.EQUALITY),
GraphEdge(bar_in_1, bar_out, GraphEdgeType.EQUALITY),
GraphEdge(one_in, one_out, GraphEdgeType.EQUALITY),
GraphEdge(two_in, two_out, GraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_column_multi(self):
column_labels = [['bar', 'bar', 'baz', 'baz'], ['one', 'two', 'one', 'two']]
tuples = list(zip(*column_labels))
col_index = pd.MultiIndex.from_tuples(tuples)
data = [[0, 1, 2, 3], [4, 5, 6, 7]]
input_df = pd.DataFrame(data, columns=col_index)
# bar baz
# one two one two
# 0 0 1 2 3
# 1 4 5 6 7
output_df = input_df.stack().reset_index()
# level_0 level_1 bar baz
# 0 0 one 0 2
# 1 0 two 1 3
# 2 1 one 4 6
# 3 1 two 5 7
options = GraphOptions()
options.COLUMN_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([input_df], output_df)
rel_graph_edges = rel_graph.edges
col_nodes = [[GraphNode("I0", '[-2,0]', GraphNodeType.COLUMN),
GraphNode("I0", '[-2,1]', GraphNodeType.COLUMN),
GraphNode("I0", '[-2,2]', GraphNodeType.COLUMN),
GraphNode("I0", '[-2,3]', GraphNodeType.COLUMN)],
[GraphNode("I0", '[-1,0]', GraphNodeType.COLUMN),
GraphNode("I0", '[-1,1]', GraphNodeType.COLUMN),
GraphNode("I0", '[-1,2]', GraphNodeType.COLUMN),
GraphNode("I0", '[-1,3]', GraphNodeType.COLUMN)],
]
adjacency_edges = [
GraphEdge(col_nodes[0][0], col_nodes[1][0], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][0], col_nodes[0][1], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[1][0], col_nodes[1][1], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[1][1], col_nodes[1][2], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][1], col_nodes[1][1], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][1], col_nodes[0][2], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][2], col_nodes[1][2], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][2], col_nodes[0][3], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[1][2], col_nodes[1][3], GraphEdgeType.ADJACENCY),
GraphEdge(col_nodes[0][3], col_nodes[1][3], GraphEdgeType.ADJACENCY)
]
for edge in adjacency_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# indexing edges
input_coli_elems = [
[GraphNode("I0", '[0,0]', GraphNodeType.INT),
GraphNode("I0", '[1,0]', GraphNodeType.INT)],
[GraphNode("I0", '[0,1]', GraphNodeType.INT),
GraphNode("I0", '[1,1]', GraphNodeType.INT)],
[GraphNode("I0", '[0,2]', GraphNodeType.INT),
GraphNode("I0", '[1,2]', GraphNodeType.INT)],
[GraphNode("I0", '[0,3]', GraphNodeType.INT),
GraphNode("I0", '[1,3]', GraphNodeType.INT)]
]
def check_edges(in_nodes, out_nodes, edge_type):
for in_node in in_nodes:
for out_node in out_nodes:
edge = GraphEdge(in_node, out_node, edge_type)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
for i in range(4):
in_nodes = [col_nodes[0][i], col_nodes[1][i]]
out_nodes = input_coli_elems[i]
check_edges(in_nodes, out_nodes, GraphEdgeType.INDEX)
# equality_edges
bars = [col_nodes[0][0], col_nodes[0][1]]
bazs = [col_nodes[0][2], col_nodes[0][3]]
ones = [col_nodes[1][0], col_nodes[1][2]]
twos = [col_nodes[1][1], col_nodes[1][3]]
out_01 = GraphNode("O0", '[0,1]', GraphNodeType.STR)
out_11 = GraphNode("O0", '[1,1]', GraphNodeType.STR)
out_21 = GraphNode("O0", '[2,1]', GraphNodeType.STR)
out_31 = GraphNode("O0", '[3,1]', GraphNodeType.STR)
out_col_2 = GraphNode("O0", '[-1,2]', GraphNodeType.COLUMN)
out_col_3 = GraphNode("O0", '[-1,3]', GraphNodeType.COLUMN)
check_edges(bars, [out_col_2], GraphEdgeType.EQUALITY)
check_edges(bazs, [out_col_3], GraphEdgeType.EQUALITY)
check_edges(ones, [out_01, out_21], GraphEdgeType.EQUALITY)
check_edges(twos, [out_11, out_31], GraphEdgeType.EQUALITY)
def test_no_spurious_for_idx_arg(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns=["A", "B"])
options = GraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
options.INFLUENCE_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([df, df.columns], df)
index_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 4)
def test_no_spurious_for_list_arg(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns=["A", "B"])
options = GraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([df, [1, 3, 4]], df)
index_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 4)
def test_series_has_idx_and_cols(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns=["A", "B"])
options = GraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([df], df["A"])
index_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 3)
def test_groupby_has_artifacts(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns=["A", "B"])
output = df.groupby(by="A")
options = GraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([df], output)
index_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 6)
def test_index_name_nodes(self):
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
output = df.pivot(index='foo', columns='bar', values='baz')
options = GraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.INDEX_NAME_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = False
rel_graph: RelationGraph = RelationGraph(options)
rel_graph.from_input_output([df], output)
index_name_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.INDEX_NAME]
column_name_nodes = [node for node in rel_graph.nodes if node.ntype == GraphNodeType.COL_INDEX_NAME]
self.assertEqual(len(index_name_nodes), 1)
self.assertEqual(len(column_name_nodes), 1)
def test_index_name_nodes_multiindex(self):
df = pd.DataFrame([(389.0, 'fly'), (24.0, 'fly'), (80.5, 'run'), (np.nan, 'jump')],
index=pd.MultiIndex.from_tuples(
[('bird', 'falcon'), ('bird', 'parrot'), ('mammal', 'lion'),
('mammal', 'monkey')], names=['class', 'name']),
columns= | pd.MultiIndex.from_tuples([('speed', 'max'), ('species', 'type')]) | pandas.MultiIndex.from_tuples |
import logging
from typing import Callable, List, Optional
import pandas as pd
from oogeso import dto
from oogeso.core import devices
logger = logging.getLogger(__name__)
def get_device_from_model_name(model_name: str) -> Callable:
map_device_name_to_class = {
"powersource": devices.Powersource,
"powersink": devices.PowerSink,
"storageel": devices.StorageEl,
"compressorel": devices.CompressorEl,
"compressorgas": devices.CompressorGas,
"electrolyser": devices.Electrolyser,
"fuelcell": devices.FuelCell,
"gasheater": devices.GasHeater,
"gasturbine": devices.GasTurbine,
"heatpump": devices.HeatPump,
"pumpoil": devices.PumpOil,
"pumpwater": devices.PumpWater,
"separastor": devices.Separator,
"separator2": devices.Separator2,
"sinkel": devices.SinkEl,
"sinkheat": devices.SinkHeat,
"sinkgas": devices.SinkGas,
"sinkoil": devices.SinkOil,
"sinkwater": devices.SinkWater,
"sourceel": devices.SourceEl,
"sourcegas": devices.SourceGas,
"sourceoil": devices.SourceOil,
"sourcewater": devices.SourceWater,
"storagehydrogen": devices.StorageHydrogen,
"wellgaslift": devices.WellGasLift,
"wellproduction": devices.WellProduction,
}
if model_name in map_device_name_to_class:
return map_device_name_to_class[model_name]
else:
raise NotImplementedError(f"Device {model_name} has not been implemented.")
def get_class_from_dto(class_str: str) -> Callable:
"""
Search dto module for a callable that matches the signature given as class str
Fixme: Replace this (de-)serializer with a proper solution.
"""
if class_str in dto.__dict__.keys():
return dto.__dict__[class_str]
elif class_str.lower() in [x.lower() for x in dto.__dict__.keys()]:
return [v for k, v in dto.__dict__.items() if k.lower() == class_str.lower()][0]
elif class_str.lower().replace("_", "") in [x.lower() for x in dto.__dict__.keys()]:
return [v for k, v in dto.__dict__.items() if k.lower() == class_str.lower().replace("_", "")][0]
else:
raise NotImplementedError(f"Model {class_str} has not been implemented.")
def create_time_series_data(
df_forecast: pd.DataFrame,
df_nowcast: pd.DataFrame,
time_start: Optional[str],
time_end: Optional[str],
timestep_minutes: int,
resample_method: str = "linear",
) -> List[dto.TimeSeriesData]:
"""Rearrange and resample pandas timeseries to Oogeso data transfer object
The input dataframes should have a datetime index
"""
# Compine forecast and nowcast timeseries into a single dataframe
df_orig = | pd.concat({"forecast": df_forecast, "nowcast": df_nowcast}, axis=1) | pandas.concat |
"""Transformer for boolean data."""
import numpy as np
import pandas as pd
from rdt.transformers.base import BaseTransformer
from rdt.transformers.null import NullTransformer
class BinaryEncoder(BaseTransformer):
"""Transformer for boolean data.
This transformer replaces boolean values with their integer representation
transformed to float.
Null values are replaced using a ``NullTransformer``.
Args:
missing_value_replacement (object or None):
Indicate what to do with the null values. If an object is given, replace them
with the given value. If the string ``'mode'`` is given, replace them with the
most common value. If ``None`` is given, do not replace them.
Defaults to ``None``.
model_missing_values (bool):
Whether to create a new column to indicate which values were null or not. The column
will be created only if there are null values. If ``True``, create the new column if
there are null values. If ``False``, do not create the new column even if there
are null values. Defaults to ``False``.
"""
INPUT_SDTYPE = 'boolean'
DETERMINISTIC_TRANSFORM = True
DETERMINISTIC_REVERSE = True
null_transformer = None
def __init__(self, missing_value_replacement=None, model_missing_values=False):
self.missing_value_replacement = missing_value_replacement
self.model_missing_values = model_missing_values
def get_output_sdtypes(self):
"""Return the output sdtypes returned by this transformer.
Returns:
dict:
Mapping from the transformed column names to the produced sdtypes.
"""
output_sdtypes = {
'value': 'float',
}
if self.null_transformer and self.null_transformer.models_missing_values():
output_sdtypes['is_null'] = 'float'
return self._add_prefix(output_sdtypes)
def _fit(self, data):
"""Fit the transformer to the data.
Args:
data (pandas.Series):
Data to fit to.
"""
self.null_transformer = NullTransformer(
self.missing_value_replacement,
self.model_missing_values
)
self.null_transformer.fit(data)
def _transform(self, data):
"""Transform boolean to float.
The boolean values will be replaced by the corresponding integer
representations as float values.
Args:
data (pandas.Series):
Data to transform.
Returns
pandas.DataFrame or pandas.Series
"""
data = pd.to_numeric(data, errors='coerce')
return self.null_transformer.transform(data).astype(float)
def _reverse_transform(self, data):
"""Transform float values back to the original boolean values.
Args:
data (pandas.DataFrame or pandas.Series):
Data to revert.
Returns:
pandas.Series:
Reverted data.
"""
if not isinstance(data, np.ndarray):
data = data.to_numpy()
if self.missing_value_replacement is not None:
data = self.null_transformer.reverse_transform(data)
if isinstance(data, np.ndarray):
if data.ndim == 2:
data = data[:, 0]
data = | pd.Series(data) | pandas.Series |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from scipy.spatial import cKDTree
def msd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=['x', 'y']):
"""Compute the mean displacement and mean squared displacement of one
trajectory over a range of time intervals.
Parameters
----------
traj : DataFrame with one trajectory, including columns frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
detail : See below. Default False.
Returns
-------
DataFrame([<x>, <y>, <x^2>, <y^2>, msd], index=t)
If detail is True, the DataFrame also contains a column N,
the estimated number of statistically independent measurements
that comprise the result at each lagtime.
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
See also
--------
imsd() and emsd()
"""
pos = traj.set_index('frame')[pos_columns]
t = traj['frame']
# Reindex with consecutive frames, placing NaNs in the gaps.
pos = pos.reindex(np.arange(pos.index[0], 1 + pos.index[-1]))
max_lagtime = min(max_lagtime, len(t)) # checking to be safe
lagtimes = 1 + np.arange(max_lagtime)
disp = pd.concat([pos.sub(pos.shift(lt)) for lt in lagtimes],
keys=lagtimes, names=['lagt', 'frames'])
results = mpp*disp.mean(level=0)
results.columns = ['<{}>'.format(p) for p in pos_columns]
results[['<{}^2>'.format(p) for p in pos_columns]] = mpp**2*(disp**2).mean(level=0)
results['msd'] = mpp**2*(disp**2).mean(level=0).sum(1) # <r^2>
# Estimated statistically independent measurements = 2N/t
if detail:
results['N'] = 2*disp.icol(0).count(level=0).div(Series(lagtimes))
results['lagt'] = results.index.values/fps
return results[:-1]
def imsd(traj, mpp, fps, max_lagtime=100, statistic='msd', pos_columns=['x', 'y']):
"""Compute the mean squared displacement of each particle.
Parameters
----------
traj : DataFrame of trajectories of multiple particles, including
columns particle, frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
statistic : {'msd', '<x>', '<y>', '<x^2>', '<y^2>'}, default is 'msd'
The functions msd() and emsd() return all these as columns. For
imsd() you have to pick one.
Returns
-------
DataFrame([Probe 1 msd, Probe 2 msd, ...], index=t)
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
"""
ids = []
msds = []
# Note: Index is set by msd, so we don't need to worry
# about conformity here.
for pid, ptraj in traj.groupby('particle'):
msds.append(msd(ptraj, mpp, fps, max_lagtime, False, pos_columns))
ids.append(pid)
results = pd.concat(msds, keys=ids)
# Swap MultiIndex levels so that unstack() makes particles into columns.
results = results.swaplevel(0, 1)[statistic].unstack()
lagt = results.index.values.astype('float64')/float(fps)
results.set_index(lagt, inplace=True)
results.index.name = 'lag time [s]'
return results
def emsd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=['x', 'y']):
"""Compute the ensemble mean squared displacements of many particles.
Parameters
----------
traj : DataFrame of trajectories of multiple particles, including
columns particle, frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
detail : Set to True to include <x>, <y>, <x^2>, <y^2>. Returns
only <r^2> by default.
Returns
-------
Series[msd, index=t] or, if detail=True,
DataFrame([<x>, <y>, <x^2>, <y^2>, msd], index=t)
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
"""
ids = []
msds = []
for pid, ptraj in traj.reset_index(drop=True).groupby('particle'):
msds.append(msd(ptraj, mpp, fps, max_lagtime, True, pos_columns))
ids.append(pid)
msds = pd.concat(msds, keys=ids, names=['particle', 'frame'])
results = msds.mul(msds['N'], axis=0).mean(level=1) # weighted average
results = results.div(msds['N'].mean(level=1), axis=0) # weights normalized
# Above, lagt is lumped in with the rest for simplicity and speed.
# Here, rebuild it from the frame index.
if not detail:
return results.set_index('lagt')['msd']
return results
def compute_drift(traj, smoothing=0, pos_columns=['x', 'y']):
"""Return the ensemble drift, x(t).
Parameters
----------
traj : DataFrame of trajectories, including columns x, y, frame, and particle
smoothing : integer
Smooth the drift using a forward-looking rolling mean over
this many frames.
Returns
-------
drift : DataFrame([x, y], index=frame)
Examples
--------
compute_drift(traj).plot() # Default smoothing usually smooths too much.
compute_drift(traj, 0).plot() # not smoothed
compute_drift(traj, 15).plot() # Try various smoothing values.
drift = compute_drift(traj, 15) # Save good drift curves.
corrected_traj = subtract_drift(traj, drift) # Apply them.
"""
# Probe by particle, take the difference between frames.
delta = pd.concat([t.set_index('frame', drop=False).diff()
for p, t in traj.groupby('particle')])
# Keep only deltas between frames that are consecutive.
delta = delta[delta['frame'] == 1]
# Restore the original frame column (replacing delta frame).
del delta['frame']
delta.reset_index(inplace=True)
dx = delta.groupby('frame').mean()
if smoothing > 0:
dx = | pd.rolling_mean(dx, smoothing, min_periods=0) | pandas.rolling_mean |
def alpha_diversity_scatter_plot(TaXon_table_xlsx, meta_data_to_test, width, heigth, scatter_size, taxonomic_level, path_to_outdirs, template, theme, font_size, color_discrete_sequence):
import PySimpleGUI as sg
import pandas as pd
import numpy as np
from pathlib import Path
import webbrowser
import plotly.graph_objects as go
TaXon_table_xlsx = Path(TaXon_table_xlsx)
Meta_data_table_xlsx = Path(str(path_to_outdirs) + "/" + "Meta_data_table" + "/" + TaXon_table_xlsx.stem + "_metadata.xlsx")
TaXon_table_df = pd.read_excel(TaXon_table_xlsx, header=0).fillna("unidentified")
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
Meta_data_table_df = | pd.read_excel(Meta_data_table_xlsx, header=0) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 00:20:45 2020
@author: mhrahman
"""
#%%
import json,os , glob, shutil
import re
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import scipy.stats as st
from sklearn import preprocessing
import pickle
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.xmeans import xmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
#%%
## convert to time gap in second------------------------
#p_path = r'D:\Molla\Stoughton_data\Data_stoughtn\Data_value\Final_Data_value'
p_path = r'D:\Molla\Uark_Data\Extracted_data\Valid_action'
os.chdir(p_path)
all_file = os.listdir(p_path)
#%%
csv = pd.read_csv(all_file[25])
csv = csv[csv.Timegap != 0]
ax = plt.plot()
for j, txt in enumerate(list(csv.Action)):
ax.annotate()
plt.plot(pd.to_datetime(pd.read_csv(f_list[0]).Timestamp))
plt.plot(csv.Timegap)
plt.ylabel("Time gaps in second")
def Greaterthannumber(val,actions,number):
if len(val) != len(actions):
return
for i in range(0,len(actions)):
if val[i] > number:
plt.annotate(actions[i], (i,val[i]),rotation = -90, fontsize = 8)
Greaterthannumber(csv.Timegap,csv.Action,20)
plt.show()
bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100,110,120,130,140,150,160,170,180,190,200]
fu = csv['Timegap'].value_counts(bins=bins, sort=False)
bins = list(range(1, int(max(csv.Timegap)) ,1))
#%%
# Frequency
def pdf (file_list):
for i in range(len(file_list)):
os.chdir(p_path)
file_nm = os.path.splitext(file_list[i])[0]
csv = pd.read_csv(file_list[i])
csv = csv[csv.Timegap != 0]
bins = list(range(1, int(max(csv.Timegap)) ,1))
sns.histplot(csv.Timegap,bins = bins)
#out = r'D:\Molla\Stoughton_data\Distribution\PDF_timegap'
out = r'D:\Molla\Uark_Data\Result\Timegap\PDF'
os.chdir(out)
plt.savefig('{}.png'.format(file_nm),bbox_inches='tight',dpi = 600)
plt.close()
pdf(all_file)
def cdf (file_list):
for i in range(len(file_list)):
os.chdir(p_path)
file_nm = os.path.splitext(file_list[i])[0]
csv = pd.read_csv(file_list[i])
csv = csv[csv.Timegap != 0]
sns.kdeplot(csv.Timegap,cumulative = True)
#out = r'D:\Molla\Stoughton_data\Distribution\CDF_timegap'
out = r'D:\Molla\Uark_Data\Result\Timegap\CDF'
os.chdir(out)
plt.savefig('{}.png'.format(file_nm),bbox_inches='tight',dpi = 600)
plt.close()
cdf(all_file)
#%%
def get_best_distribution(data):
# dist_names = ["norm", "exponweib", "weibull_max", "weibull_min","expon","pareto", "genextreme","gamma","beta",'halfcauchy','lognorm']
dist_names = ["genextreme"]
dist_results = []
params = {}
for dist_name in dist_names:
dist = getattr(st, dist_name)
param = dist.fit(data)
params[dist_name] = param
# Applying the Kolmogorov-Smirnov test
D, p = st.kstest(data, dist_name, args=param)
print("p value for "+dist_name+" = "+str(p))
dist_results.append((dist_name, p))
# select the best fitted distribution
best_dist, best_p = (max(dist_results, key=lambda item: item[1]))
# store the name of the best fit and its p value
print("Best fitting distribution: "+str(best_dist))
print("Best p value: "+ str(best_p))
print("Parameters for the best fit: "+ str(params[best_dist]))
return best_dist, best_p, params[best_dist]
#%%
def pdf (file_list):
for i in range(len(file_list)):
os.chdir(p_path)
file_nm = os.path.splitext(file_list[i])[0]
csv = pd.read_csv(file_list[i])
csv = csv[csv.Timegap != 0]
bins = list(range(1, int(max(csv.Timegap)) ,1))
sns.distplot(csv.Timegap,bins = bins)
y = np.asarray(list(csv.Timegap))
x = np.arange(len(y))
number_of_bins = len(y)
bin_cutoffs = np.linspace(np.percentile(y,0), np.percentile(y,99),number_of_bins)
h = plt.hist(y, bins = bin_cutoffs, color='red')
pdf_fitted = dist.pdf(np.arange(len(y)),param[:-2],loc = param[-2],scale = param[-1])
scale_pdf = np.trapz(h[0],h[1][:-1])/np.trapz(pdf_fitted,x)
pdf_fitted *= scale_pdf
plt.plot(pdf_fitted)
plt.show()
#%%
def pdf_fitted(csv):
y = np.asarray(list(csv.Timegap))
x = np.arange(len(y))
number_of_bins = len(y)
# bin_cutoff = np.linspace(np.percentile(y,0),np.percentile(y,99),number_of_bins)
h = plt.hist(y,bins= 300)
k = get_best_distribution(y)
dist = getattr(st,k[0])
param = k[2]
# pdf_fit = dist.pdf(x,param[:-2],loc = param[-2],scale = param[-1])
pdf_fit = dist.pdf(x,param[0],param[1])
scaled_pdf = np.trapz(h[0],h[1][:-1])/np.trapz(pdf_fit,x)
# plt.xlim(0,300)
pdf_fit *= scaled_pdf
plt.plot(pdf_fit,'--g',linewidth = 0.6,label = 'GEV distribution')
plt.legend(loc = 'upper right')
# plt.xlabel("Time gap (second)")
# plt.ylabel("Frequecy of time gap")
plt.show()
#%%
#p_path = r'D:\Molla\Stoughton_data\Data_stoughtn\Data_value\Final_Data_value'
p_path = r'D:\Molla\Uark_Data\Extracted_data\Valid_action'
os.chdir(p_path)
all_file = os.listdir(p_path)
for i in range(len(all_file)):
os.chdir(p_path)
file_nm = os.path.splitext(all_file[i])[0]
csv = pd.read_csv(all_file[i])
csv = csv[csv.Timegap != 0]
pdf_fitted(csv)
#out = r'D:\Molla\Stoughton_data\Distribution\New_dist'
out = r'D:\Molla\Uark_Data\Result\Timegap\Fitter_dist'
os.chdir(out)
plt.savefig('{}.png'.format(file_nm),bbox_inches='tight',dpi = 600)
plt.close()
# Distribution for all
#%%
#p_path = r'D:\Molla\Stoughton_data\Data_stoughtn\Data_value\Final_Data_value'
p_path = r'D:\Molla\Uark_Data\Extracted_data\Valid_action'
os.chdir(p_path)
all_file = os.listdir(p_path)
file = []
dist_name = []
parameters = []
param_1 = []
param_2 = []
param_3 = []
for i in range(len(all_file)):
os.chdir(p_path)
file_nm = os.path.splitext(all_file[i])[0]
csv = pd.read_csv(all_file[i])
csv = csv[csv.Timegap != 0]
k = get_best_distribution(csv.Timegap)
dist_name.append(k[0])
file.append(file_nm)
a = k[2][0]
b = k[2][1]
c = k[2][2]
param_1.append(a)
param_2.append(b)
param_3.append(c)
Df = pd.DataFrame({
'Param 1': param_1,
'param 2':param_2,
'param 3': param_3})
Only_values = Df.values
#%%# Saving the embedding
#loc = r'D:\Molla\Stoughton_data\Models\New folder\Saved_embedding'
loc = r'D:\Molla\Uark_Data\Result\Saved_emd'
#loc = r'D:\Molla\Stoughton_data\For_Journal\Saved_embedding'
os.chdir(loc)
with open('Timegap.pkl','wb') as f:
pickle.dump(Df.values,f)
#%%
def elbow_plot(matrix):
wcss = []
for i in range(1,10):
Kmeans = KMeans(n_clusters= i, init= 'k-means++', random_state= 42)
Kmeans.fit(matrix)
wcss.append(Kmeans.inertia_)
plt.plot(range(1,10), wcss)
plt.title('The elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
elbow_plot(Only_values)
def plot_kmean(num_cluster,vector,n_component):
reduced_data_PCA = PCA(n_components= n_component).fit_transform(vector)
kmeans = KMeans(init='k-means++', n_clusters= num_cluster, n_init=10)
kmeans.fit(vector)
labels = kmeans.fit_predict(vector)
print(labels)
fig = plt.figure(figsize=(5.5, 3))
ax = Axes3D(fig, rect=[0, 0, .7, 1], elev=48, azim=134)
ax.scatter(reduced_data_PCA[:, 1], reduced_data_PCA[:, 0], reduced_data_PCA[:, 2],
c=labels.astype(np.float), edgecolor="k", s=50)
plt.show()
return kmeans
kmeans = plot_kmean(3,Only_values,3)
action_clust = []
for j in range(kmeans.n_clusters):
at = []
for i in np.where(kmeans.labels_ == j)[0]:
at.append(file[i])
action_clust.append(at)
df = pd.DataFrame(action_clust).T
columns = ["0", "1","2"]
df.columns = columns
## LOAD design_output
path = r'D:\Molla\Stoughton_data\Distribution'
os.chdir(path)
design_output = pd.read_csv('Design_output.csv')
design_output.set_index('Computer ID')
mean = []
std = []
for i in range(len(df.columns)):
cluster_wise = []
for j in range(len(df['{}'.format(i)])):
design = df['{}'.format(i)][j]
if design in list(design_output['Computer ID']):
a = design_output.loc[design_output['Computer ID'] == design, 'Co-efficient'].iloc[0]
cluster_wise.append(a)
m = np.mean(cluster_wise)
s = np.std(cluster_wise)
mean.append(m)
std.append(s)
df.loc[len(df)] = mean
df.loc[len(df)] = std
df = df.rename(index = {df.index[-2]:'mean',df.index[-1]:'std'})
out_path = r'D:\Molla\Stoughton_data\Distribution'
os.chdir(out_path)
df.to_csv('Timegap_cluster.csv', index = True)
# Additional for distribution-----
distribution = "expon"
data = np.asarray(list(csv.Timegap))
dist = getattr(st, distribution)
param = dist.fit(data)
# Get random numbers from distribution
norm = dist.rvs(loc=param[-2], scale=param[-1],size = len(data))
norm.sort()
# Create figure
fig = plt.figure(figsize=(8,5))
# qq plot
ax1 = fig.add_subplot(121) # Grid of 2x2, this is suplot 1
ax1.plot(norm,data,"o")
min_value = np.floor(min(min(norm),min(data)))
max_value = np.ceil(max(max(norm),max(data)))
ax1.plot([min_value,max_value],[min_value,max_value],'r--')
ax1.set_xlim(min_value,max_value)
ax1.set_xlabel('Theoretical quantiles')
ax1.set_ylabel('Observed quantiles')
title = 'qq plot for ' + distribution +' distribution'
ax1.set_title(title)
# pp plot
ax2 = fig.add_subplot(122)
# Calculate cumulative distributions
bins = np.percentile(norm,range(0,101))
data_counts, bins = np.histogram(data,bins)
norm_counts, bins = np.histogram(norm,bins)
cum_data = np.cumsum(data_counts)
cum_norm = np.cumsum(norm_counts)
cum_data = cum_data / max(cum_data)
cum_norm = cum_norm / max(cum_norm)
# plot
ax2.plot(cum_norm,cum_data,"o")
min_value = np.floor(min(min(cum_norm),min(cum_data)))
max_value = np.ceil(max(max(cum_norm),max(cum_data)))
ax2.plot([min_value,max_value],[min_value,max_value],'r--')
ax2.set_xlim(min_value,max_value)
ax2.set_xlabel('Theoretical cumulative distribution')
ax2.set_ylabel('Observed cumulative distribution')
title = 'pp plot for ' + distribution +' distribution'
ax2.set_title(title)
# Display plot
plt.tight_layout(pad=4)
plt.show()
#%%
#X_means clustering -------------------------------------------------------------
reduced_data = PCA(n_components=3).fit_transform(Only_values)
amount_initial_centers = 2
initial_centers = kmeans_plusplus_initializer(reduced_data,amount_initial_centers).initialize()
xmeans_instance = xmeans(reduced_data, initial_centers, 20)
xmeans_instance.process()
# Extract clustering results: clusters and their centers
clusters = xmeans_instance.get_clusters()
centers = xmeans_instance.get_centers()
# Visualize clustering results
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, reduced_data,marker = 'o',markersize = 20)
visualizer.append_cluster(centers, None, marker='*', markersize=100)
visualizer.show()
#%%
#For converting clusters assignment
clusts = []
order = np.concatenate(clusters).argsort()
clusts = list(np.concatenate([ [i]*len(e) for i,e in enumerate(clusters) ])[order])
print(clusts)
#plot cluster
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
scatter = ax.scatter(np.array(centers)[:, 1],
np.array(centers)[:, 0],
np.array(centers)[:, 2],
s = 250,
marker='o',
c='red',
label='centroids')
scatter = ax.scatter(reduced_data[:, 1], reduced_data[:, 0], reduced_data[:, 2],
c=clusts,s=20, cmap='winter')
#ax.set_title('X-Means Clustering')
ax.set_xlabel('Principal component 1')
ax.set_ylabel('Principal component 2')
ax.set_zlabel('Principal component 3')
ax.legend()
plt.show()
out_path = r'D:\Molla\Uark_Data\Result\Timegap\Result'
#out_path = r'D:\Molla\Stoughton_data\For_Journal\Result\Time_gap'
os.chdir(out_path)
fig.savefig('Timegap.tif', format='tif', dpi=300)
#%%
# For getting the student ID
action_clust = []
for j in range(len(clusters)):
at = []
for i in np.where(np.array(clusts) == j)[0]:
at.append(file[i])
action_clust.append(at)
df = | pd.DataFrame(action_clust) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match='Cannot divide'):
two / tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = | tm.box_expected(expected, xbox) | pandas.util.testing.box_expected |
# -*- coding: utf-8 -*-
""" test function application """
import pytest
from string import ascii_lowercase
from pandas import (date_range, Timestamp,
Index, MultiIndex, DataFrame, Series)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.compat import product as cart_product
import numpy as np
import pandas.util.testing as tm
import pandas as pd
from .common import MixIn
# describe
# --------------------------------
class TestDescribe(MixIn):
def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
assert_series_equal(result['mean'], grouped.mean(), check_names=False)
assert_series_equal(result['std'], grouped.std(), check_names=False)
assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
assert_series_equal(result, expected)
def test_series_index_name(self):
grouped = self.df.loc[:, ['C']].groupby(self.df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in self.tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
labels=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = self.tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
labels=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex(self):
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
pytest.raises(ValueError, lambda: df1.groupby('k').describe())
pytest.raises(ValueError, lambda: df2.groupby('key').describe())
def test_frame_describe_unstacked_format(self):
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
class TestNUnique(MixIn):
def test_series_groupby_nunique(self):
def check_nunique(df, keys, as_index=True):
for sort, dropna in cart_product((False, True), repeat=2):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
for n, m in cart_product(10 ** np.arange(2, 6), (10, 100, 1000)):
frame = DataFrame({
'jim': np.random.choice(
list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)
})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique(self):
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object(self):
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series(self):
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper(self):
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
Timestamp('2016-06-28 16:09:30'),
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']}).set_index('time')
result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
expected = test.groupby(
pd.Grouper(freq='h')
)['data'].apply(pd.Series.nunique)
tm.assert_series_equal(result, expected)
# count
# --------------------------------
class TestCount(MixIn):
def test_groupby_timedelta_cython_count(self):
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_count(self):
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, np.nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
assert_frame_equal(count_not_as, expected.reset_index())
assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
assert_series_equal(count_B, expected['B'])
def test_count_object(self):
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type(self): # GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count(self):
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception(self):
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
# size
# --------------------------------
class TestSize(MixIn):
def test_size(self):
grouped = self.df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = self.df.groupby('A')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = self.df.groupby('B')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
assert_series_equal(left, right, check_names=False)
# GH11699
df = | DataFrame([], columns=['A', 'B']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import itertools
import matplotlib.cm as cm
import networkx.algorithms.community as nxcom
from community import community_louvain
import os
def main(randseed, resolution):
def get_corr(data):
data = np.array(q)
data_mean = np.mean(data, axis=0)
data_std = np.std(data, axis=0)
data = (data-data_mean) / data_std
data_c = np.corrcoef(data)
return data_c
def assemble_det_relation_df(relation_df, seg, raw0, side):
# side should be 0 or 1
if side==1:
side_ind = 6
elif side==2:
side_ind = 7
det_relation_df = pd.DataFrame(columns=['from_det', 'to_det', 'flow', 'linkID'], index=range(len(relation_df)))
for i in range(len(relation_df)):
from_node = relation_df.iloc[i, 0]
to_node = relation_df.iloc[i, 1]
from_det = seg.loc[seg[0]==from_node].iloc[0, side_ind] # 0 indicates node
to_det = seg.loc[seg[0]==to_node].iloc[0, side_ind]
det_relation_df.loc[i, 'from_det'] = from_det
det_relation_df.loc[i, 'to_det'] = to_det
try:
# flow是两个detector的flow的平均
det_relation_df.iloc[i, 2] = (q_det[from_det] + q_det[to_det]) / 2
except:
# 假如det没有,就用node信息找到flow
try:
from_node = seg[seg[6]==from_det].iloc[0, 0]
to_node = seg[seg[6]==to_det].iloc[0, 0]
except:
from_node = seg[seg[7]==from_det].iloc[0, 0]
to_node = seg[seg[7]==to_det].iloc[0, 0]
from_det = raw0[raw0['id2']==from_node]['id'].iloc[0]
to_det = raw0[raw0['id2']==to_node]['id'].iloc[0]
det_relation_df.loc[i, 'flow'] = (q_det[from_det] + q_det[to_det]) / 2
det_relation_df.loc[i, 'linkID'] = i
return det_relation_df
def get_det_partition_results(seg, partition_results, side):
if side==1:
side_ind = 6
elif side==2:
side_ind = 7
det_partition_results = pd.DataFrame([], columns=[0, 'det'], index=range(len(partition_results)))
for i in range(len(partition_results)):
det_partition_results.loc[i, 'det'] = seg[seg[0]==partition_results.loc[i, 'node']].iloc[0, side_ind]
det_partition_results.loc[:, 0] = partition_results.loc[:, 0]
return det_partition_results
def get_bound_x_df(relation_df, partition_results):
relation_df['if boundary'] = ''
for i in range(len(relation_df)):
class1 = partition_results[partition_results.iloc[:, 1]==relation_df.iloc[i, 0]].iloc[0, 0]
class2 = partition_results[partition_results.iloc[:, 1]==relation_df.iloc[i, 1]].iloc[0, 0]
if class1 != class2:
relation_df.loc[i, 'if boundary'] = 1
else:
relation_df.loc[i, 'if boundary'] = 0
bound_x_df = relation_df[relation_df['if boundary']==1].iloc[:, 0:2]
# bound_nodes = np.array(bound_x_df).flatten()
return bound_x_df
def compare(det_id, b1, b2, det_partition_results):
if_adjust = 0
ini_var1 = np.var(np.array(b1[0])) # initial variance
ini_var2 = np.var(np.array(b2[0]))
ini_mean1 = np.mean(np.array(b1[0])) # initial mean
ini_mean2 = np.mean(np.array(b2[0]))
# 把det1从class1里面挑出来
b1_ = b1[b1['det']!=det_id][0]
# 计算不含det1的b11_的variance
var1 = np.var(np.array(b1_))
# 把det1加到class2
b2_ = np.append(np.array(b2[0]), np.array(b1[b1['det']==det_id][0]))
# 计算class2的variance
var2 = np.var(b2_)
# 比较两个variance,假如variance减小,则保留更改,反之恢复原位
if var1<ini_var1 and var2<ini_var2:
#ipdb.set_trace()
class2 = b2.iloc[0, 3] # 3 stands for class
b_det.loc[b_det['det']==det_id, 'class'] = class2
det_partition_results.loc[det_partition_results['det']==det_id, 0] = class2
if_adjust = 1
return b_det, det_partition_results, if_adjust
seg = pd.read_csv('./data/segement.csv', header=None)
raw0 = pd.read_csv(open('./data/id2000.csv'), header=0, index_col=0)
q = pd.read_csv('./data/q_20_aggragated.csv', index_col = 0)
b = pd.read_csv('./data/b_20_aggragated.csv', index_col = 0) # time occupancy, (density)
q_det = q.T.mean()
b_det = b.T.mean()
nodes = np.array(raw0['id2'])
relation_df = pd.read_csv('./data/edges_all.csv', header = None)
# 1 and 2 are different directions
det_relation_df1 = assemble_det_relation_df(relation_df, seg, raw0, side=1)
det_relation_df2 = assemble_det_relation_df(relation_df, seg, raw0, side=2)
relation_df['flow'] = ''
relation_df['linkID'] = ''
for i in range(len(relation_df)):
#ipdb.set_trace()
det1 = raw0[raw0['id2']==relation_df.iloc[i, 0]]['id'].iloc[0]
det2 = raw0[raw0['id2']==relation_df.iloc[i, 1]]['id'].iloc[0]
relation_df.loc[i, 'flow'] = (q_det[det1] + q_det[det2]) / 2
relation_df.loc[i, 'linkID'] = i
relation = np.array(relation_df.iloc[:, :3]) # relation and flow (weight)
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_weighted_edges_from(relation) # add weight from flow
pos0 = raw0.iloc[:, 1:3]
pos0 = np.array(pos0)
vnode = pos0
npos = dict(zip(nodes, vnode)) # 获取节点与坐标之间的映射关系,用字典表示
partition = community_louvain.best_partition(G, resolution=resolution, weight='weight', random_state=randseed)
# draw the graph
pos = nx.spring_layout(G)
# color the nodes according to their partition
cmap = cm.get_cmap('viridis', max(partition.values()) + 1)
fig_net = plt.figure(figsize = (10,10))
nx.draw_networkx(G, pos = npos, node_size=20, node_color=list(partition.values()), with_labels=False)
partition_results = pd.DataFrame(data = list(partition.values()))
partition_results['node'] = nodes
det_partition_results1 = get_det_partition_results(seg, partition_results, side=1)
det_partition_results2 = get_det_partition_results(seg, partition_results, side=2)
partition_results.to_csv('./res/%i_res%i_partition_results.csv'%(randseed, resolution), index=False)
det_partition_results1.to_csv('./res/%i_res%i_det_partition_results1.csv'%(randseed, resolution), index=False)
det_partition_results2.to_csv('./res/%i_res%i_det_partition_results2.csv'%(randseed, resolution), index=False)
b_det = pd.DataFrame(b_det)
b_det['det'] = b_det.index
b_det['node']=''
b_det.index=range(402)
for i in range(len(b_det)):
try:
b_det.iloc[i, 2] = seg[seg[6]==b_det.iloc[i, 1]].iloc[0, 0]
except:
b_det.iloc[i, 2] = seg[seg[7]==b_det.iloc[i, 1]].iloc[0, 0]
b_det['class'] = ''
for i in range(len(b_det)):
b_det.iloc[i, 3] = partition_results[partition_results['node']==b_det.iloc[i, 2]].iloc[0, 0]
org_bound_dets_df1 = get_bound_x_df(det_relation_df1, det_partition_results1)
org_bound_dets_df2 = get_bound_x_df(det_relation_df2, det_partition_results2)
org_det_partition_results1 = det_partition_results1.copy()
org_det_partition_results2 = det_partition_results2.copy()
# Boundary adjustment
for i in range(len(org_bound_dets_df1)):
adj_time = 0
while 1:
bound_nodes_df = get_bound_x_df(relation_df, partition_results)
bound_dets_df1 = get_bound_x_df(det_relation_df1, det_partition_results1)
bound_dets_df2 = get_bound_x_df(det_relation_df2, det_partition_results2)
n1d1 = bound_dets_df1.iloc[i, 0]
n1d2 = bound_dets_df2.iloc[i, 0]
n2d1 = bound_dets_df1.iloc[i, 1]
n2d2 = bound_dets_df2.iloc[i, 1]
try:
node1 = b_det[b_det['det']==n1d1].iloc[0, 2] # 2 means node
except:
node1 = b_det[b_det['det']==n1d2].iloc[0, 2] # 2 means node
try:
node2 = b_det[b_det['det']==n2d1].iloc[0, 2]
except:
node2 = b_det[b_det['det']==n2d2].iloc[0, 2]
class1 = b_det[b_det['node']==node1].iloc[0, 3] # 3 means class
class2 = b_det[b_det['node']==node2].iloc[0, 3]
b1 = b_det[b_det['class']==class1]
b2 = b_det[b_det['class']==class2]
#ipdb.set_trace()
b_det, det_partition_results1, if_adjust11 = compare(n1d1, b1, b2, det_partition_results1)
b_det, det_partition_results2, if_adjust12 = compare(n1d2, b1, b2, det_partition_results2)
b_det, det_partition_results1, if_adjust21 = compare(n2d1, b2, b1, det_partition_results1)
b_det, det_partition_results2, if_adjust22 = compare(n2d2, b2, b1, det_partition_results2)
if_adjust = if_adjust11 + if_adjust12 + if_adjust21 + if_adjust22
adj_time += if_adjust
#ipdb.set_trace()
if if_adjust==0:
break
# print('%i times done for boundary adjustment'%adj_time)
# else:
# print('%i times done for boundary adjustment'%adj_time)
# print(b_det[b_det['class']==class1].shape[0])
# print(det_partition_results1[det_partition_results1[0]==class1].shape[0])
b_det.to_csv('./res/%i_res%i_b_det.csv'%(randseed, resolution))
id_2000 = | pd.read_csv('./data/id2000.csv', index_col=0) | pandas.read_csv |
import os
from os.path import join as pjoin
import re
import multiprocessing as mp
from multiprocessing import Pool
from Bio.Seq import Seq
from Bio import SeqIO, SeqFeature
from Bio.SeqRecord import SeqRecord
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import time
import sqlite3 as sql
from collections import defaultdict
import gc
import sys
from gtr_utils import change_ToWorkingDirectory, make_OutputDirectory, merge_ManyFiles, multiprocesssing_Submission, generate_AssemblyId, chunks
def blast_Seed(assembly_id, query_file, blast_db_path, blast_hits_path):
'''
'''
full_db_path = pjoin(blast_db_path, assembly_id)
full_hits_path = pjoin(blast_hits_path, assembly_id+'.csv')
os.system('blastp -query {} -db {} -max_target_seqs 100 -evalue 1e-6 -outfmt "10 qseqid sseqid mismatch positive gaps ppos pident qcovs evalue bitscore qframe sframe sstart send slen qstart qend qlen" -num_threads 1 -out {}'.format(query_file, full_db_path, full_hits_path))
## write seed region gene content to file ##
def test_RegionOverlap(x1,x2,y1,y2):
'''
Asks: is the largest of the smallest distances less than or equal to the smallest of the largest distances?
Returns True or False
'''
return( max(x1,y1) <= min(x2,y2) )
def filter_BlastHits(df, identity_cutoff, coverage_cutoff):
'''
Remove hits that do not meet identity and coverage cutoffs
'''
df = df[ (df['pident'] >= identity_cutoff ) & (df['qcovs'] >= coverage_cutoff) ]
return(df)
def extract_SeedRegions(assembly_id, upstream_search_length, downstream_search_length, identity_cutoff, coverage_cutoff, hits_cutoff):
'''
Check for overlaps in seed hits. When this occurs, pick the best hit for overlap.
Extract x basepairs upstream and downstream of a seed blast hit.
Write to sql database under table 'seed_regions'
'''
#--------- Troubleshooting --------#
# pd.set_option('display.max_columns', None)
# upstream_search_length, downstream_search_length, identity_cutoff, coverage_cutoff, hits_cutoff = 10000,10000,20,80, 1
# # UserInput_main_dir = '/projects/b1042/HartmannLab/alex/GeneGrouper_test/testbed/dataset1/test1'#'/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/syntenease_project/gtr/testbed/dataset1/test1'
# UserInput_main_dir = '/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/syntenease_project/gtr/testbed/dataset3/test1'
# UserInput_output_dir_name = pjoin(UserInput_main_dir,'pdua')
# os.chdir(UserInput_main_dir)
# conn = sql.connect(pjoin(UserInput_main_dir,'genomes.db')) # genome database holds all base info
# conn2 = sql.connect(pjoin(UserInput_output_dir_name,'seed_results.db')) # seed_results database holds all seed search specific info.
# assembly_id = 'GCF_009800085' #GCF_009800085_02009 dbscan_label 3 ,global_strand -1 has more
# assembly_id = 'GCF_000583895' #GCF_000583895_02788 dbscan_label 13 ,global_strand 1 has fewer
# assembly_id = 'GCF_001077835'
# assembly_id = 'GCF_000251025'
#--------- Troubleshooting --------#
try:
df_hits = pd.read_csv(pjoin('temp_blast_results',assembly_id+'.csv'),names=['qseqid','sseqid','mismatch', 'positive','gaps', 'ppos','pident','qcovs','evalue','bitscore','qframe','sframe','sstart', 'send', 'slen', 'qstart', 'qend', 'qlen'])
except:
return(pd.DataFrame())
pass
## filter hit results, exit function if not hits pass cutoffs
df_hits = filter_BlastHits(df=df_hits, identity_cutoff=identity_cutoff, coverage_cutoff=coverage_cutoff)
if len(df_hits) == 0:
return(pd.DataFrame())
pass
## read in whole genome info
df_g = pd.read_sql_query("SELECT contig_id, locus_tag, cds_start, cds_end, strand, pseudo_check from gb_features WHERE assembly_id = '{}' ".format(assembly_id), sql.connect('genomes.db'))
## merge genome level data with blast hit results. sort by identity and evalue so that best hits are at the top of the table
df_hits = df_g.merge(df_hits[['sseqid','pident','qcovs','evalue']],left_on='locus_tag',right_on='sseqid',how='inner')
# if there are multiple hits that are identical, keep the one with the highest evalue
df_hits = df_hits.groupby('locus_tag').first().reset_index()
df_hits = df_hits.sort_values(['pident','evalue'],ascending=[False,False])
# keep only n total hits (default is all hits)
if type(hits_cutoff) == int:
df_hits = df_hits.iloc[:hits_cutoff, :]
## define upstream and downstream search lengths. The distance inputs for upstream/start and downstream/end are switched when the seed gene is in the -1 frame
## The usl and dsl are not strand specific now. They are relative to the genomes' upstream and downstream positions
df_hits['usl'] = np.where(df_hits['strand']==1, df_hits['cds_start'] - upstream_search_length, df_hits['cds_start'] - downstream_search_length)
df_hits['dsl'] = np.where(df_hits['strand']==1, df_hits['cds_end'] + downstream_search_length, df_hits['cds_end'] + upstream_search_length)
## for each contig, test for overlapping region ranges. Keep the region with the best hit.
## append subsets to new df called df_hits_parsed
df_hits_parsed = pd.DataFrame()
for cid in df_hits['contig_id'].unique().tolist():
#Looping over contigs to prevent instances where they overlap but are not on the same contig.
df_hits_contig = df_hits[df_hits['contig_id']==cid]
for hid in df_hits_contig['locus_tag']:
try:
# create range to compare all other ranges to
hid_x1, hid_x2 = df_hits_contig[df_hits_contig['contig_id']==cid][df_hits_contig['locus_tag']==hid]['usl'].item(), df_hits_contig[df_hits_contig['contig_id']==cid][df_hits_contig['locus_tag']==hid]['dsl'].item()
# test for overlap with all other ranges on the same contig
df_hits_contig['overlap'] = df_hits_contig[df_hits_contig['contig_id']==cid].apply(lambda x: test_RegionOverlap(x1=hid_x1, x2=hid_x2, y1=x['usl'], y2=x['dsl']), axis=1)
keep_index = (df_hits_contig['overlap']==True).idxmax()
df_hits_contig['overlap_representative'] = np.where(df_hits_contig.index==keep_index,True,False)
# remove rows that are not repesentatives but do overlap. keep everything else.
df_hits_contig = df_hits_contig[ (df_hits_contig['overlap']==False) | (df_hits_contig['overlap_representative'] == True) | (df_hits_contig['overlap']).isnull() == True ]
except:
continue
df_hits_parsed = df_hits_parsed.append(df_hits_contig)
## build a dataframe that contains genes upsteam and downstream of boundaries for each seed blast hit ##
df_rkeep = pd.DataFrame()
for h in df_hits_parsed.iterrows():
# get coordinates for upstream and downstream gene boundaries
# ----- attempt start ----- #
# cds_start for strand == 1
# cds_end for strand == -1
if h[1][4] == 1:
cds_search_list = df_g[df_g['contig_id'] == h[1][1]]['cds_start'].tolist()
usl_locus_tag_bp = min(cds_search_list, key = lambda x: abs(x-h[1][10]))
cds_search_list = df_g[df_g['contig_id'] == h[1][1]]['cds_end'].tolist()
dsl_locus_tag_bp = min(cds_search_list, key = lambda x: abs(x-h[1][11]))
if h[1][4] == -1:
cds_search_list = df_g[df_g['contig_id'] == h[1][1]]['cds_start'].tolist()
usl_locus_tag_bp = min(cds_search_list, key = lambda x: abs(x-h[1][10]))
cds_search_list = df_g[df_g['contig_id'] == h[1][1]]['cds_end'].tolist()
dsl_locus_tag_bp = min(cds_search_list, key = lambda x: abs(x-h[1][11]))
# subset the main genome dataframe to contain only ORFs that are within the designated bounds
if h[1][4] == 1:
df_r = df_g[ df_g['contig_id'] == h[1][1] ][ ( df_g['cds_end'] >= usl_locus_tag_bp) & (df_g['cds_start'] <= dsl_locus_tag_bp ) ]
if h[1][4] == -1:
df_r = df_g[ df_g['contig_id'] == h[1][1] ][ ( df_g['cds_end'] >= usl_locus_tag_bp) & (df_g['cds_start'] <= dsl_locus_tag_bp ) ]
# ----- attempt end ----- #
# append the subsetted dataframe to a new dataframe that will contain all region extractions
df_r['region_id'] = h[1][0]
df_rkeep = df_rkeep.append(df_r,ignore_index=True)
## add blast data to the extracted regions, remove unneeded columns, and return the dataframe
df_rkeep['assembly_id'] = assembly_id
df_rkeep = df_rkeep[['region_id','assembly_id','contig_id','locus_tag','strand','pseudo_check']]
df_rkeep = df_rkeep.merge(df_hits_parsed[['sseqid','pident','qcovs','evalue']], left_on='region_id',right_on='sseqid')
df_rkeep = df_rkeep.drop(columns='sseqid')
return(df_rkeep)
def write_RegionSeqsToFile(assembly_id, output_dir_name):
'''
Write a .faa containing all sequences that were found to be within a defined seed region for a given assembly_id
'''
df_s = pd.read_sql_query("SELECT locus_tag from seed_regions WHERE assembly_id = '{}'".format(assembly_id), conn2)
if len(df_s) == 0:
return()
seqs_to_write = df_s['locus_tag'].tolist()
with open(pjoin(output_dir_name,'regions',assembly_id+'.faa'),'w') as outfile:
for record in SeqIO.parse(pjoin('assemblies',assembly_id+'.faa'),'fasta'):
if record.id in seqs_to_write:
outfile.write('>{}\n'.format(record.id))
outfile.write('{}\n'.format(str(record.seq)))
def BuildQueryFiles(
UserInput_main_dir,
UserInput_genome_inputs_dir,
UserInput_output_dir_name,
UserInput_query_filename_path,
UserInput_upstream_search_length,
UserInput_downstream_search_length,
UserInput_identity_threshold,
UserInput_coverage_threshold,
UserInput_hitcount_threshold,
UserInput_processes,
):
'''
'''
#--------- Troubleshooting --------#
# pd.set_option('display.max_columns', None)
# mp.set_start_method("fork")
# # UserInput_main_dir = '/projects/b1042/HartmannLab/alex/GeneGrouper_test/testbed/dataset1/test1'#'/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/syntenease_project/gtr/testbed/dataset1/test1'
# UserInput_main_dir = '/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/syntenease_project/gtr/testbed/dataset3/test1'
# UserInput_output_dir_name = pjoin(UserInput_main_dir,'pdua')
# UserInput_genome_inputs_dir = '/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/syntenease_project/gtr/testbed/dataset3/core'
# UserInput_query_filename_path = pjoin(UserInput_genome_inputs_dir,'pdua.txt')
# UserInput_upstream_search_length = 2000
# UserInput_downstream_search_length = 18000
# UserInput_identity_threshold = 15
# UserInput_coverage_threshold = 70
# UserInput_hitcount_threshold = 5
# UserInput_processes = 8
# os.chdir(UserInput_main_dir)
# # conn = sql.connect(pjoin(UserInput_main_dir,'genomes.db')) # genome database holds all base info
# # conn2 = sql.connect(pjoin(UserInput_output_dir_name,'seed_results.db')) # seed_results database holds all seed search specific info.
#--------- Troubleshooting --------#
make_OutputDirectory(new_directory=UserInput_main_dir)
change_ToWorkingDirectory(directory_name=UserInput_main_dir)
make_OutputDirectory(new_directory=UserInput_output_dir_name)
make_OutputDirectory(new_directory=pjoin(UserInput_output_dir_name,'regions'))
make_OutputDirectory(new_directory=pjoin(UserInput_output_dir_name,'ortholog_clusters'))
make_OutputDirectory(new_directory=pjoin(UserInput_output_dir_name,'internal_data'))
make_OutputDirectory(new_directory=pjoin(UserInput_output_dir_name,'subgroups'))
make_OutputDirectory(new_directory=pjoin(UserInput_output_dir_name,'visualizations'))
make_OutputDirectory(new_directory='temp_blast_results')
global conn, conn2
conn = sql.connect(pjoin(UserInput_main_dir,'genomes.db')) # genome database holds all base info
conn2 = sql.connect(pjoin(UserInput_output_dir_name,'seed_results.db')) # seed_results database holds all seed search specific info.
##---- Blast seed against database ----- ##
r_params = [[generate_AssemblyId(input_gbff_file=f), UserInput_query_filename_path, 'blast_database', 'temp_blast_results'] for f in os.listdir(UserInput_genome_inputs_dir) if f.endswith('.gbff')]
print('Blasting seed against {} genomes'.format(len(r_params)))
start_t = time.time()
with Pool(processes=UserInput_processes) as p:
p.starmap(blast_Seed, r_params[:])
print((time.time()-start_t)/60)
##---- identify and extract seed regions ----- ##
r_params = [[generate_AssemblyId(input_gbff_file=f), UserInput_upstream_search_length, UserInput_downstream_search_length,UserInput_identity_threshold,UserInput_coverage_threshold, UserInput_hitcount_threshold] for f in os.listdir(UserInput_genome_inputs_dir) if f.endswith('.gbff')]
print('Identifying and extracting seed regions for {} genomes'.format(len(r_params)))
start_t = time.time()
df_regions = | pd.DataFrame() | pandas.DataFrame |
import wbgapi as wb
import xlsxwriter
import pandas as pd
# Function to mine data from the API
def get_values_1970_2019(indicator, country="KEN"):
data = wb.data.DataFrame(indicator, country)
data_T = data.T
clean_data = data_T.dropna()
data_1980_2019 = clean_data.loc["YR1970":"YR2019"]
return data_1980_2019
# Indicator variables
inflation_rate_indicator = ["FP.CPI.TOTL.ZG"]
real_interest_indicator = ["FR.INR.RINR"]
official_exchange_rate_indicator = ["PA.NUS.FCRF"]
pop_growth_rate_indicator = ["SP.POP.GROW"]
real_gdp_indicator = ["NY.GDP.MKTP.CD"]
broad_money_pc_gdp_indicator = ["FM.LBL.BMNY.GD.ZS"]
population_indicator = ["SP.POP.TOTL"]
per_capita_USD_indicator = ["NY.GDP.PCAP.CD"]
gdp_growth_indicator = ["NY.GDP.MKTP.KD.ZG"]
lending_interest_indicator = ["FR.INR.LEND"]
deposit_interest_rate_indicator = ["FR.INR.DPST"]
current_exports_indicator = ["NE.EXP.GNFS.CD"]
unemp_modeled_indicator = ["SL.UEM.TOTL.ZS"]
imports_USD_indicator = ["NE.IMP.GNFS.CD"]
cpi_indicator = ["FP.CPI.TOTL"]
millitary_expenditure_indicator = ["MS.MIL.XPND.CD"]
gvt_exp_on_education_indicator = ["SE.XPD.TOTL.GD.ZS"]
life_expc_years_indicator = ["SP.DYN.LE00.IN"]
co2_emissions_per_capita_indicator = ["EN.ATM.CO2E.PC"]
health_exp_per_capita_indicator = ["SH.XPD.CHEX.PC.CD"]
health_expe_pc_GDP_indicator = ["SH.XPD.CHEX.GD.ZS"]
risk_premium_indicator = ["FR.INR.RISK"]
# Output from the api
real_interest = get_values_1970_2019(real_interest_indicator)
inflation = get_values_1970_2019(inflation_rate_indicator)
ex_rate = get_values_1970_2019(official_exchange_rate_indicator)
pop_growth_rate = get_values_1970_2019(pop_growth_rate_indicator)
real_gdp = get_values_1970_2019(real_gdp_indicator)
broad_money = get_values_1970_2019(broad_money_pc_gdp_indicator)
pop = get_values_1970_2019(population_indicator)
per_capita = get_values_1970_2019(per_capita_USD_indicator)
gdp_growth = get_values_1970_2019(gdp_growth_indicator)
lending_interest = get_values_1970_2019(lending_interest_indicator)
deposit_rate = get_values_1970_2019(deposit_interest_rate_indicator)
current_exports = get_values_1970_2019(current_exports_indicator)
modeled_unemp = get_values_1970_2019(unemp_modeled_indicator)
imports = get_values_1970_2019(imports_USD_indicator)
consumer_price_index = get_values_1970_2019(cpi_indicator)
millitary_expenditure = get_values_1970_2019(millitary_expenditure_indicator)
education_exp_pec_of_gdp = get_values_1970_2019(gvt_exp_on_education_indicator)
life_expc_years = get_values_1970_2019(life_expc_years_indicator)
co2_emmisions = get_values_1970_2019(co2_emissions_per_capita_indicator)
health_exp_per_capita_USD = get_values_1970_2019(health_exp_per_capita_indicator)
health_expe_pc_GDP = get_values_1970_2019(health_expe_pc_GDP_indicator)
risk_premium = get_values_1970_2019(risk_premium_indicator)
# Create a dataframe
df = pd.DataFrame(pop)
df = df.rename(columns={"KEN": "population"})
df["broad_money"] = broad_money
df["real_gdp"] = real_gdp
df["pop_growth"] = pop_growth_rate
df["exc_rate"] = ex_rate
df["inflation"] = inflation
df["real_interest"] = real_interest
df["per_capita_USD"] = per_capita
df["gdp_growth_rate"] = gdp_growth
df["lending_interest_rate"] = lending_interest
df["deposit_interest_rate"] = deposit_rate
df["current_exports"] = current_exports
df["modeled_unemp"] = modeled_unemp
df["imports_in_USD"] = imports
df["cpi"] = consumer_price_index
df["millitary_expenditure_USD"] = millitary_expenditure
df["edu_exp_pc_of_GDP"] = education_exp_pec_of_gdp
df["life_exp_years"] = life_expc_years
df["co2_emmisions_per_capita"] = co2_emmisions
df["health_exp_percapita_USD"] = health_exp_per_capita_USD
df["health_exp_pc_of_GDP"] = health_expe_pc_GDP
df["risk_premium"] = risk_premium
df["treasury_rate"] = df["lending_interest_rate"] - df["risk_premium"]
print(df)
# Create a pandas excel writer
writer = | pd.ExcelWriter("project_data.xlsx", engine="xlsxwriter") | pandas.ExcelWriter |
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
from .date_time import datetime64_to_seconds, seconds_to_datetime64
import numpy as np
import pandas as pd
from .utils import _sampling_rate
def interpolate(df, verbose=True, prev_df=None, next_df=None, sr=None, start_time=None, stop_time=None, fill_big_gap_with_na=True, gap_threshold = 1, method="spline"):
"""Make timestamps with consistent intervals with interpolation.
Delete duplicate timestamps, interpolate to make sampling rate consistent with provided interpolation method, default is spline interpolation. Big gaps (more than 1s will not be interpolated)
Keyword arguments:
start_time, stop_time -- specified start and end time to be used in the interpolated dataframe. If there are multiple sensor data object from different sensor that may not have exactly the same start time, user can provide one to be used by every one of them. So that it will be easier for feature calculation and data merging later.
sr -- desired sampling rate
fill_big_gap_with_na -- whether big gaps should be filled with NaN or just simply not included in the interpolated data frame
gap_threshold -- time in second to be counted as big gap
method -- interpolation method, current only support 'spline' and 'linear'
"""
if verbose:
print("Original sampling rate: " + str(_sampling_rate(df)))
if sr is None:
sr = _sampling_rate(df)
else:
sr = np.float64(sr)
if verbose:
print("New sampling rate: " + str(sr))
# save current file's start and stop time
chunk_st = datetime64_to_seconds(df.iloc[0, 0].to_datetime64().astype('datetime64[h]'))
chunk_et = datetime64_to_seconds(df.iloc[df.shape[0]-1, 0].to_datetime64().astype('datetime64[h]') + np.timedelta64(1, 'h'))
combined_df = pd.concat([prev_df, df, next_df], axis=0)
# Drop duplication
cols = combined_df.columns.values
combined_df.drop_duplicates(
subset=cols[0], keep="first", inplace=True)
ts = combined_df.iloc[:,0].values
# Convert timestamp column to unix numeric timestamps
ts = datetime64_to_seconds(ts)
combined_st = ts[0]
combined_et = ts[-1]
if start_time is None:
start_time = ts[0]
if stop_time is None:
stop_time = ts[-1]
# make sure st and et are also in unix timestamps
if type(start_time) != np.float64:
start_time = datetime64_to_seconds(start_time)
stop_time = datetime64_to_seconds(stop_time)
if chunk_st < start_time:
chunk_st = start_time
if chunk_et > stop_time:
chunk_et = stop_time
chunk_st = seconds_to_datetime64([chunk_st])[0]
chunk_et = seconds_to_datetime64([chunk_et])[0]
# make the reference timestamp for interpolation
ref_ts = np.linspace(start_time, stop_time, np.ceil((stop_time - start_time) * sr))
# only get the combined_df part
combined_ref_ts = ref_ts[(ref_ts >= combined_st) & (ref_ts < combined_et)]
# check whether there are big gaps in the data, we don't interpolate
# for big gaps!
big_gap_positions = check_large_gaps(combined_df, ts, gap_threshold = gap_threshold)
values = combined_df[cols[1:cols.size]].values
if big_gap_positions.size == 1:
if verbose:
print("Use regular interpolation")
# no big gap then just interpolate regularly
print(ts.shape)
print(values.shape)
print(combined_ref_ts.shape)
new_ts, new_values = interpolate_regularly(ts, values, combined_ref_ts, sr, method)
else:
if verbose:
print("Use interpolation with big gaps: " + str(big_gap_positions.size))
# big gaps found, interpolate by chunks
new_ts, new_values = interpolate_for_big_gaps(big_gap_positions, ts, values, combined_ref_ts, sr, method)
# Convert the interpolated timestamp column and the reference timestamp
# column back to datetime
new_ts = seconds_to_datetime64(new_ts)
combined_ref_ts = seconds_to_datetime64(combined_ref_ts)
# make new dataframe
new_df = pd.DataFrame(
new_values, columns=cols[1:cols.size], copy=False)
new_df.insert(0, cols[0], new_ts)
# Fill big gap with NaN if set
if fill_big_gap_with_na:
new_df = new_df.set_index(cols[0]).reindex(
| pd.Index(combined_ref_ts, name=cols[0]) | pandas.Index |
import hashlib
import math
import numpy as np
import pprint
import pytest
import random
import re
import subprocess
import sys
import tempfile
import json
from catboost import (
CatBoost,
CatBoostClassifier,
CatBoostRegressor,
CatBoostError,
EFstrType,
FeaturesData,
Pool,
cv,
sum_models,
train,)
from catboost.eval.catboost_evaluation import CatboostEvaluation, EvalType
from catboost.utils import eval_metric, create_cd, get_roc_curve, select_threshold
from catboost.utils import DataMetaInfo, TargetStats, compute_training_options
import os.path
from pandas import read_table, DataFrame, Series, Categorical
from six import PY3
from six.moves import xrange
from catboost_pytest_lib import (
DelayedTee,
binary_path,
data_file,
local_canonical_file,
permute_dataset_columns,
remove_time_from_json,
test_output_path,
generate_random_labeled_set
)
if sys.version_info.major == 2:
import cPickle as pickle
else:
import _pickle as pickle
pytest_plugins = "list_plugin",
fails_on_gpu = pytest.mark.fails_on_gpu
EPS = 1e-5
BOOSTING_TYPE = ['Ordered', 'Plain']
OVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']
NONSYMMETRIC = ['Lossguide', 'Depthwise']
TRAIN_FILE = data_file('adult', 'train_small')
TEST_FILE = data_file('adult', 'test_small')
CD_FILE = data_file('adult', 'train.cd')
NAN_TRAIN_FILE = data_file('adult_nan', 'train_small')
NAN_TEST_FILE = data_file('adult_nan', 'test_small')
NAN_CD_FILE = data_file('adult_nan', 'train.cd')
CLOUDNESS_TRAIN_FILE = data_file('cloudness_small', 'train_small')
CLOUDNESS_TEST_FILE = data_file('cloudness_small', 'test_small')
CLOUDNESS_CD_FILE = data_file('cloudness_small', 'train.cd')
QUERYWISE_TRAIN_FILE = data_file('querywise', 'train')
QUERYWISE_TEST_FILE = data_file('querywise', 'test')
QUERYWISE_CD_FILE = data_file('querywise', 'train.cd')
QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT = data_file('querywise', 'train.cd.group_weight')
QUERYWISE_CD_FILE_WITH_GROUP_ID = data_file('querywise', 'train.cd.query_id')
QUERYWISE_CD_FILE_WITH_SUBGROUP_ID = data_file('querywise', 'train.cd.subgroup_id')
QUERYWISE_TRAIN_PAIRS_FILE = data_file('querywise', 'train.pairs')
QUERYWISE_TRAIN_PAIRS_FILE_WITH_PAIR_WEIGHT = data_file('querywise', 'train.pairs.weighted')
QUERYWISE_TEST_PAIRS_FILE = data_file('querywise', 'test.pairs')
AIRLINES_5K_TRAIN_FILE = data_file('airlines_5K', 'train')
AIRLINES_5K_TEST_FILE = data_file('airlines_5K', 'test')
AIRLINES_5K_CD_FILE = data_file('airlines_5K', 'cd')
SMALL_CATEGORIAL_FILE = data_file('small_categorial', 'train')
SMALL_CATEGORIAL_CD_FILE = data_file('small_categorial', 'train.cd')
BLACK_FRIDAY_TRAIN_FILE = data_file('black_friday', 'train')
BLACK_FRIDAY_TEST_FILE = data_file('black_friday', 'test')
BLACK_FRIDAY_CD_FILE = data_file('black_friday', 'cd')
OUTPUT_MODEL_PATH = 'model.bin'
OUTPUT_COREML_MODEL_PATH = 'model.mlmodel'
OUTPUT_CPP_MODEL_PATH = 'model.cpp'
OUTPUT_PYTHON_MODEL_PATH = 'model.py'
OUTPUT_JSON_MODEL_PATH = 'model.json'
OUTPUT_ONNX_MODEL_PATH = 'model.onnx'
PREDS_PATH = 'predictions.npy'
PREDS_TXT_PATH = 'predictions.txt'
FIMP_NPY_PATH = 'feature_importance.npy'
FIMP_TXT_PATH = 'feature_importance.txt'
OIMP_PATH = 'object_importances.txt'
JSON_LOG_PATH = 'catboost_info/catboost_training.json'
TARGET_IDX = 1
CAT_FEATURES = [0, 1, 2, 4, 6, 8, 9, 10, 11, 12, 16]
model_diff_tool = binary_path("catboost/tools/model_comparator/model_comparator")
np.set_printoptions(legacy='1.13')
class LogStdout:
def __init__(self, file):
self.log_file = file
def __enter__(self):
self.saved_stdout = sys.stdout
sys.stdout = self.log_file
return self.saved_stdout
def __exit__(self, exc_type, exc_value, exc_traceback):
sys.stdout = self.saved_stdout
self.log_file.close()
def compare_canonical_models(model, diff_limit=0):
return local_canonical_file(model, diff_tool=[model_diff_tool, '--diff-limit', str(diff_limit)])
def map_cat_features(data, cat_features):
result = []
for i in range(data.shape[0]):
result.append([])
for j in range(data.shape[1]):
result[i].append(str(data[i, j]) if j in cat_features else data[i, j])
return result
def _check_shape(pool, object_count, features_count):
return np.shape(pool.get_features()) == (object_count, features_count)
def _check_data(data1, data2):
return np.all(np.isclose(data1, data2, rtol=0.001, equal_nan=True))
def _count_lines(afile):
with open(afile, 'r') as f:
num_lines = sum(1 for line in f)
return num_lines
def _generate_nontrivial_binary_target(num, seed=20181219, prng=None):
'''
Generate binary vector with non zero variance
:param num:
:return:
'''
if prng is None:
prng = np.random.RandomState(seed=seed)
def gen():
return prng.randint(0, 2, size=num)
if num <= 1:
return gen()
y = gen() # 0/1 labels
while y.min() == y.max():
y = gen()
return y
def _generate_random_target(num, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
return prng.random_sample((num,))
def set_random_weight(pool, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
pool.set_weight(prng.random_sample(pool.num_row()))
if pool.num_pairs() > 0:
pool.set_pairs_weight(prng.random_sample(pool.num_pairs()))
def verify_finite(result):
inf = float('inf')
for r in result:
assert(r == r)
assert(abs(r) < inf)
def append_param(metric_name, param):
return metric_name + (':' if ':' not in metric_name else ';') + param
# returns (features DataFrame, cat_feature_indices)
def load_pool_features_as_df(pool_file, cd_file, target_idx):
data = read_table(pool_file, header=None, dtype=str)
data.drop([target_idx], axis=1, inplace=True)
return (data, Pool(pool_file, column_description=cd_file).get_cat_feature_indices())
# Test cases begin here ########################################################
def test_load_file():
assert _check_shape(Pool(TRAIN_FILE, column_description=CD_FILE), 101, 17)
def test_load_list():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = map_cat_features(pool.get_features(), cat_features)
label = pool.get_label()
assert _check_shape(Pool(data, label, cat_features), 101, 17)
def test_load_ndarray():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = np.array(map_cat_features(pool.get_features(), cat_features))
label = np.array(pool.get_label())
assert _check_shape(Pool(data, label, cat_features), 101, 17)
@pytest.mark.parametrize('dataset', ['adult', 'adult_nan', 'querywise'])
def test_load_df_vs_load_from_file(dataset):
train_file, cd_file, target_idx, other_non_feature_columns = {
'adult': (TRAIN_FILE, CD_FILE, TARGET_IDX, []),
'adult_nan': (NAN_TRAIN_FILE, NAN_CD_FILE, TARGET_IDX, []),
'querywise': (QUERYWISE_TRAIN_FILE, QUERYWISE_CD_FILE, 2, [0, 1, 3, 4])
}[dataset]
pool1 = Pool(train_file, column_description=cd_file)
data = read_table(train_file, header=None)
labels = | DataFrame(data.iloc[:, target_idx], dtype=np.float32) | pandas.DataFrame |
import numpy as np
import pandas as pd
from perceptron import MLP
import seaborn as sns
import matplotlib.pyplot as plt
if __name__ == "__main__":
# create a dataset to train and test a network
x_train = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
y_train = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
# create a Multilayer Perceptron with one hidden layer
mlp = MLP(4, [2], 4, bias=False)
# train network
x = mlp.train(x_train, y_train, 1, 0.2, verbose=True)
# plot a MSE for each outputs and avg output
mlp.plot_mse('example_plot')
# plot activations on hidden layer each training session
list_epochs = [i+1 for i in range(1000)]
df_hidden = | pd.DataFrame(mlp.hidden_activates, columns=['1 neuron', '2 neuron']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from typing import Union, List, Optional, Iterable
import h5py
#######################
## Loading metadata ##
#######################
def _load_samples_metadata(model):
samples_metadata = pd.DataFrame(
[
[cell, group]
for group, cell_list in model.samples.items()
for cell in cell_list
],
columns=["sample", "group"],
)
if "samples_metadata" in model.model:
if len(list(model.model["samples_metadata"][model.groups[0]].keys())) > 0:
_samples_metadata = pd.concat(
[
pd.concat(
[
pd.Series(model.model["samples_metadata"][g][k])
for k in model.model["samples_metadata"][g].keys()
],
axis=1,
)
for g in model.groups
],
axis=0,
)
_samples_metadata.columns = list(
model.model["samples_metadata"][model.groups[0]].keys()
)
if "group" in _samples_metadata.columns:
del _samples_metadata["group"]
if "sample" in _samples_metadata.columns:
del _samples_metadata["sample"]
samples_metadata = pd.concat(
[
samples_metadata.reset_index(drop=True),
_samples_metadata.reset_index(drop=True),
],
axis=1,
)
# Decode objects as UTF-8 strings
for column in samples_metadata.columns:
if samples_metadata[column].dtype == "object":
try:
samples_metadata[column] = [
i.decode() for i in samples_metadata[column].values
]
except (UnicodeDecodeError, AttributeError):
pass
samples_metadata = samples_metadata.set_index("sample")
return samples_metadata
def _load_features_metadata(model):
features_metadata = pd.DataFrame(
[
[feature, view]
for view, feature_list in model.features.items()
for feature in feature_list
],
columns=["feature", "view"],
)
if "features_metadata" in model.model:
if len(list(model.model["features_metadata"][model.views[0]].keys())) > 0:
features_metadata_dict = {
m: pd.concat(
[
| pd.Series(model.model["features_metadata"][m][k]) | pandas.Series |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
"""Various PEST(++) control file peripheral operations"""
from __future__ import print_function, division
import os
import warnings
import multiprocessing as mp
import re
import numpy as np
import pandas as pd
pd.options.display.max_colwidth = 100
import pyemu
from ..pyemu_warnings import PyemuWarning
# formatters
# SFMT = lambda x: "{0:>20s}".format(str(x.decode()))
def SFMT(item):
try:
s = "{0:<20s} ".format(item.decode())
except:
s = "{0:<20s} ".format(str(item))
return s
SFMT_LONG = lambda x: "{0:<50s} ".format(str(x))
IFMT = lambda x: "{0:<10d} ".format(int(x))
FFMT = lambda x: "{0:<20.10E} ".format(float(x))
def str_con(item):
if len(item) == 0:
return np.NaN
return item.lower().strip()
pst_config = {}
# parameter stuff
pst_config["tied_dtype"] = np.dtype([("parnme", "U20"), ("partied", "U20")])
pst_config["tied_fieldnames"] = ["parnme", "partied"]
pst_config["tied_format"] = {"parnme": SFMT, "partied": SFMT}
pst_config["tied_converters"] = {"parnme": str_con, "partied": str_con}
pst_config["tied_defaults"] = {"parnme": "dum", "partied": "dum"}
pst_config["par_dtype"] = np.dtype(
[
("parnme", "U20"),
("partrans", "U20"),
("parchglim", "U20"),
("parval1", np.float64),
("parlbnd", np.float64),
("parubnd", np.float64),
("pargp", "U20"),
("scale", np.float64),
("offset", np.float64),
("dercom", np.int),
]
)
pst_config["par_fieldnames"] = (
"PARNME PARTRANS PARCHGLIM PARVAL1 PARLBND PARUBND " + "PARGP SCALE OFFSET DERCOM"
)
pst_config["par_fieldnames"] = pst_config["par_fieldnames"].lower().strip().split()
pst_config["par_format"] = {
"parnme": SFMT,
"partrans": SFMT,
"parchglim": SFMT,
"parval1": FFMT,
"parlbnd": FFMT,
"parubnd": FFMT,
"pargp": SFMT,
"scale": FFMT,
"offset": FFMT,
"dercom": IFMT,
}
pst_config["par_alias_map"] = {
"name": "parnme",
"transform": "partrans",
"value": "parval1",
"upper_bound": "parubnd",
"lower_bound": "parlbnd",
"group": "pargp",
}
pst_config["par_converters"] = {
"parnme": str_con,
"pargp": str_con,
"parval1": np.float,
"parubnd": np.float,
"parlbnd": np.float,
"scale": np.float,
"offset": np.float,
}
pst_config["par_defaults"] = {
"parnme": "dum",
"partrans": "log",
"parchglim": "factor",
"parval1": 1.0,
"parlbnd": 1.1e-10,
"parubnd": 1.1e10,
"pargp": "pargp",
"scale": 1.0,
"offset": 0.0,
"dercom": 1,
}
# parameter group stuff
pst_config["pargp_dtype"] = np.dtype(
[
("pargpnme", "U20"),
("inctyp", "U20"),
("derinc", np.float64),
("derinclb", np.float64),
("forcen", "U20"),
("derincmul", np.float64),
("dermthd", "U20"),
("splitthresh", np.float64),
("splitreldiff", np.float64),
("splitaction", "U20"),
]
)
pst_config["pargp_fieldnames"] = (
"PARGPNME INCTYP DERINC DERINCLB FORCEN DERINCMUL "
+ "DERMTHD SPLITTHRESH SPLITRELDIFF SPLITACTION"
)
pst_config["pargp_fieldnames"] = pst_config["pargp_fieldnames"].lower().strip().split()
pst_config["pargp_format"] = {
"pargpnme": SFMT,
"inctyp": SFMT,
"derinc": FFMT,
"forcen": SFMT,
"derincmul": FFMT,
"dermthd": SFMT,
"splitthresh": FFMT,
"splitreldiff": FFMT,
"splitaction": SFMT,
}
pst_config["pargp_converters"] = {
"pargpnme": str_con,
"inctyp": str_con,
"dermethd": str_con,
"derinc": np.float,
"derinclb": np.float,
"splitaction": str_con,
"forcen": str_con,
"derincmul": np.float,
}
pst_config["pargp_defaults"] = {
"pargpnme": "pargp",
"inctyp": "relative",
"derinc": 0.01,
"derinclb": 0.0,
"forcen": "switch",
"derincmul": 2.0,
"dermthd": "parabolic",
"splitthresh": 1.0e-5,
"splitreldiff": 0.5,
"splitaction": "smaller",
}
# observation stuff
pst_config["obs_fieldnames"] = "OBSNME OBSVAL WEIGHT OBGNME".lower().split()
pst_config["obs_dtype"] = np.dtype(
[
("obsnme", "U20"),
("obsval", np.float64),
("weight", np.float64),
("obgnme", "U20"),
]
)
pst_config["obs_format"] = {
"obsnme": SFMT,
"obsval": FFMT,
"weight": FFMT,
"obgnme": SFMT,
}
pst_config["obs_converters"] = {
"obsnme": str_con,
"obgnme": str_con,
"weight": np.float,
"obsval": np.float,
}
pst_config["obs_defaults"] = {
"obsnme": "dum",
"obsval": 1.0e10,
"weight": 1.0,
"obgnme": "obgnme",
}
pst_config["obs_alias_map"] = {"name": "obsnme", "value": "obsval", "group": "obgnme"}
# prior info stuff
pst_config["null_prior"] = | pd.DataFrame({"pilbl": None, "obgnme": None}, index=[]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 27 21:48:06 2021
@author: <NAME>
"""
import pandas as pd
import re
import os
import requests
from config import MY_API_KEYS
from datetime import datetime
import string
import time
'''
1. Full-text collection from ScienceDirect
If you have any questions regarding to the full text collection script below, please contact <EMAIL>
Note: in order to use Elsevier APIs (ScienceDirect, Scopus, ...), you should have registered an API account at Elsevier Developer Portal and your
institution should subscribed some full text resources (e.g., journal).
Option 1: If you download the citation information from the Science Direct website, then go with the following option 1.
'''
meta_folder = 'name a folder to save meta data here'
# set directory
print('Getting directory...')
cwd = os.getcwd()
dir_meta = os.path.join(cwd, meta_folder)
dir_corpus = os.path.join(cwd, 'corpus')
# load the api key from config file
api_idx = 0
my_api_key = MY_API_KEYS[api_idx]
# if you download metafile manually from ScienceDirect website, then go with follows
def meta_data_processing(meta_directory, if_save=True):
# meta file processing
print("Processing meta data...")
target_dois = []
corresponding_titles = []
# we check each folder under the meta-file directory
for folder_file in os.listdir(meta_directory):
if '.txt' in folder_file:
with open(os.path.join(meta_directory, folder_file), 'r') as meta_ref:
# read the text content of each meta file
meta_data = meta_ref.read()
# split the text into individual records
meta_records = meta_data.split('\n\n')
for meta_record in meta_records:
# split each individual record to detailed info
meta_record = meta_record.split('\n')
# we record the title and doi number for download
for sub_record in meta_record:
if 'https://doi.org' in sub_record:
# add the doi number to the download list
target_dois += [sub_record]
# since title is the second line of each record
corresponding_titles += [meta_record[1]]
df_integrated_meta = pd.DataFrame(columns=['doi', 'title'])
df_integrated_meta['doi'] = target_dois
df_integrated_meta['title'] = corresponding_titles
if if_save:
df_integrated_meta.to_csv('{}.csv'.format(meta_folder), index=False)
return df_integrated_meta
df_meta = meta_data_processing(dir_meta, True)
# check previously downloaded literature
downloaded_articles = []
for file in os.listdir(dir_corpus):
if '.xml' in file:
downloaded_articles += [file.replace('.xml', '')]
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print('Start downloading at {}'.format(dt_string))
count = 0
target_doi = list(df_meta['doi'])
# remove previously downloaded articles from download queue
target_doi = list(set(target_doi)-set(downloaded_articles))
# collecting full articles
for idx in range(len(target_doi)):
if count % 200 == 0 and count != 0:
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print('{} Full articles have been scanned at {}'.format(count, dt_string))
article_name = target_doi[idx].translate(str.maketrans('', '', string.punctuation))
try:
now = datetime.now()
article_request = requests.get('https://api.elsevier.com/content/article/doi/' + target_doi[idx],
params={'Accept': 'text/xml', 'apiKey': my_api_key})
time.sleep(0.1) # API Quota: 10 request/ second
except:
count += 1
continue
with open('./corpus/{}.xml'.format(article_name), 'wb') as f:
f.write(article_request.content)
count += 1
'''
Option 2: if you want to use keyword to automatically retrieve articles from Science Direct, then go with the following option 2.
Note: Be aware of your API quota.
'''
operation_count = 0
# query all results including un-subscribed contents
# API quota for ScienceDirect Search V2 : 20000/week + 2 requests/sec
# More details at dev.elsevier.com/api_key_settings.html
start_year = 2022
# we collect every subscribed articles from 2000 to now
while start_year >= 2000:
query_dois = []
query_piis = []
query_titles = []
query_resp = requests.get('https://api.elsevier.com/content/search/sciencedirect',
params={'Accept': 'application/json', 'apiKey': my_api_key, 'query': '{type your keyword here}',
'date': str(start_year), 'count': 100, 'subs':'true'})
query_json = query_resp.json()
total_num_articles = int(query_json['search-results']['opensearch:totalResults'])
try:
batch_records = query_json['search-results']['entry'] # which contains a batch of #count articles metadata
except:
print('{} have been processed - Year {}.'.format(operation_count * 100, start_year))
operation_count = 0
start_year -= 1
with open('{}_meta.txt'.format(str(start_year)), 'w', encoding='utf-8') as f:
f.write('\n'.join(
[query_dois[i] + '\t' + query_titles[i] + '\t' + query_piis[i] for i in range(len(query_dois))]))
continue
for entry in batch_records:
try:
query_dois += [entry['dc:identifier'].replace('DOI:', '')]
except:
query_dois += ['None']
try:
query_titles += [entry['dc:title']]
except:
query_titles += ['None']
try:
query_piis += [entry['pii']]
except:
query_piis += ['None']
operation_count += 1
total_num_articles -= 100
time.sleep(0.7) # to avoid quota exceed
while total_num_articles > 0:
# if the start value is greater than 6000, we stop collecting this year's articles
# because 6000 is the Science Direct API system global maximum value, you will not be able to retrieve articles after 6000
if operation_count * 100 >= 6000:
print('{} have been processed - Year {}.'.format(operation_count * 100, start_year))
start_year -= 1
operation_count = 0
with open('{}_meta.txt'.format(str(start_year)), 'w', encoding='utf-8') as f:
f.write('\n'.join(
[query_dois[i] + '\t' + query_titles[i] + '\t' + query_piis[i] for i in range(len(query_dois))]))
break
query_resp = requests.get('https://api.elsevier.com/content/search/sciencedirect',
params={'Accept': 'application/json', 'apiKey': my_api_key,
'query': '{amorphous alloy}',
'start': operation_count * 100, 'date': str(start_year), 'count': 100,
'subs': 'true'})
query_json = query_resp.json()
try:
batch_records = query_json['search-results']['entry'] # which contains a batch of #count articles metadata
except:
print('{} have been processed - Year {}.'.format(operation_count * 100, start_year))
start_year -= 1
operation_count = 0
with open('{}_meta.txt'.format(str(start_year)), 'w', encoding='utf-8') as f:
f.write('\n'.join(
[query_dois[i] + '\t' + query_titles[i] + '\t' + query_piis[i] for i in range(len(query_dois))]))
for entry in batch_records:
try:
query_dois += [entry['dc:identifier'].replace('DOI:', '')]
except:
query_dois += ['None']
try:
query_titles += [entry['dc:title']]
except:
query_titles += ['None']
try:
query_piis += [entry['pii']]
except:
query_piis += ['None']
operation_count += 1
total_num_articles -= 100
time.sleep(0.7) # to avoid quota exceed
# if the total number of articles from the current year is done retrieving, we continue to the next year
# record article identifiers in a txt file
if total_num_articles <= 0:
print('{} have been processed - Year {}.'.format(operation_count * 100, start_year))
start_year -= 1
operation_count = 0
with open('{}_meta.txt'.format(str(start_year)), 'w', encoding='utf-8') as f:
f.write('\n'.join(
[query_dois[i] + '\t' + query_titles[i] + '\t' + query_piis[i] for i in range(len(query_dois))]))
# create a dataframe to store the metafiles
df_meta = pd.DataFrame(columns=['title', 'doi', 'pii'])
meta_files = os.listdir(os.path.join(dir_meta, 'amorphous_alloy'))
for meta_file in meta_files:
with open(os.path.join(os.path.join(dir_meta, 'amorphous_alloy', meta_file)), 'r', encoding='utf-8') as f:
content = f.read()
rows_content = content.split('\n')
for row in rows_content:
row_list = row.split('\t')
try:
article_title = row_list[1]
except:
article_title = 'None'
try:
article_doi = row_list[0]
except:
article_doi = 'None'
try:
article_pii = row_list[2]
except:
article_pii = 'None'
temp_df = {'title': article_title, 'doi': article_doi, 'pii': article_pii}
df = df_meta.append(temp_df, ignore_index=True)
df_meta = df_meta.drop_duplicates(subset=['doi'], keep='first')
df_meta = df_meta[df_meta['doi'].str.contains("None") == False]
df_meta.to_csv('query_result{}.csv'.format(datetime.now()), index=False)
target_doi = [i.replace('https://doi.org/', '') for i in list(df_meta['doi'])]
titles = list(df_meta['title'])
'''
For using Scopus API to perform abstract retrieval, see below
Before using this script, you will need to get a list of eids (a type of unique identifier of abstracts) for target articles
'''
list_eids = []
list_abstracts = []
count = 0
idx_api = 0
for eid in eids:
# for every 2000 articles, we save the progress
if count % 2000 == 0 and count != 0:
data = pd.DataFrame(columns=['eid', 'abstract'])
data['eid'] = list_eids
data['abstract'] = list_abstracts
data.to_csv('the name of your csv file%d.csv' % (count), index=False)
try:
# here we send request to Scopus
# be aware of API quota
resp = requests.get("https://api.elsevier.com/content/abstract/eid/" + eid,
headers={'Accept': 'application/json',
'X-ELS-APIKey': my_api_key})
abstract = resp.json()['abstracts-retrieval-response']['coredata']['dc:description']
list_eids += [eid]
list_abstracts += [abstract]
time.sleep(0.7)
except:
time.sleep(0.7)
continue
count += 1
if count % 100 == 0 and count != 0:
print("%d have been collected." % (count))
output = | pd.DataFrame(columns=['eid', 'abstract']) | pandas.DataFrame |
import time
import logging
import pandas as pd
logger = logging.getLogger(__name__)
# this function didn't improve the speed so I didn't use it
def convert_xls_to_csv_in_chunks(excel_file, nrows):
""" Covert excel file to csv in chunks"""
chunks = []
i_chunk = 0
csv_file_name = excel_file+'.csv'
# The first row is the header. We have already read it, so we skip it.
skiprows = 1
df_header = pd.read_excel(excel_file, nrows=1)
logger.info(" *** Start converting Excel file {} to CSV ***".format(excel_file))
while True:
df_chunk = | pd.read_excel(excel_file, nrows=nrows, skiprows=skiprows, header=None) | pandas.read_excel |
import pathlib
import os
# os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cuda"
import pandas as pd
import uuid as uuid_m
from typing import Union
from pathlib import Path
import pooch
path_to_cache_default_test = os.path.dirname(__file__) + '/../../test/data/cache_db/'
# To find the cache file in docker.csv
file = Path(path_to_cache_default_test)
if True:
path_to_cache_default = "/home/smartVP/SMART_VP_server/test/data/cache_db/"
class ModelsIndex:
""" Class tha handles the stored _models
"""
def __init__(self, db_name: object, read_cache: object = True,
path_to_cache=0) -> object:
file_not_found = False
if path_to_cache == 0:
self.path_to_cache = path_to_cache_default
elif path_to_cache == 1:
self.path_to_cache = path_to_cache_default_test
else:
self.path_to_cache = path_to_cache
self.db_name = db_name
if read_cache:
try:
self._read_cache(db_name)
except FileNotFoundError:
file_not_found = True
if read_cache is False or file_not_found:
self.df = pd.DataFrame(columns=['name', 'm_state', 'address', 'uuid', 'loaded'])
# TODO: add types str, str, int, str, bool
self.df.rename_axis(index='modelUrn', inplace=True)
def _write_cache(self):
df_aux = self.df.copy()
df_aux.to_csv(self.path_to_cache + self.db_name)
def _read_cache(self, db_name):
self.df = | pd.read_csv(self.path_to_cache + db_name, index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
import statsmodels.api as sm
tsa = sm.tsa # as shorthand
mdata = sm.datasets.macrodata.load().data
type(mdata)
endog = np.log(mdata['m1'])
exog = np.column_stack([np.log(mdata['realgdp']), np.log(mdata['cpi'])])
exog = sm.add_constant(exog, prepend=True)
exog
res1 = sm.OLS(endog, exog).fit()
acf, ci, Q, pvalue = tsa.acf(res1.resid, nlags=4,alpha=.05, qstat=True,unbiased=True)
acf
pvalue
tsa.pacf(res1.resid, nlags=4)
#==============================================================================
# FILTER
#==============================================================================
from scipy.signal import lfilter
data = sm.datasets.macrodata.load()
infl = data.data.infl[1:]
data.data.shape
# get 4 qtr moving average
infl = lfilter(np.ones(4)/4, 1, infl)[4:]
unemp = data.data.unemp[1:]
#To apply the Hodrick-Prescott filter to the data 3, we can do
infl_c, infl_t = tsa.filters.hpfilter(infl)
unemp_c, unemp_t = tsa.filters.hpfilter(unemp)
#The Baxter-King filter 4 is applied as
infl_c = tsa.filters.bkfilter(infl)
unemp_c = tsa.filters.bkfilter(unemp)
#The Christiano-Fitzgerald filter is similarly applied 5
infl_c, infl_t = tsa.filters.cfilter(infl)
unemp_c, unemp_t = tsa.filters.cfilter(unemp)
#plot
INFLA=pd.DataFrame(infl_c,columns=['INFLA'])
UNEMP=pd.DataFrame(unemp_c[4:],columns=['UNEMP'])
pd.concat([INFLA,UNEMP],axis=1).plot()
INFLA=pd.DataFrame(infl_t,columns=['INFLA'])
UNEMP=pd.DataFrame(unemp_t[4:],columns=['UNEMP'])
pd.concat([INFLA,UNEMP],axis=1).plot()
#==============================================================================
# BENCHMARKING TO STANDARDISE LOWER FREQ TO HIGHER FREQ
#==============================================================================
iprod_m = np.array([ 87.4510, 86.9878, 85.5359, #INDUSTRIAL PRODUCTION INDEX
84.7761, 83.8658, 83.5261, 84.4347,
85.2174, 85.7983, 86.0163, 86.2137,
86.7197, 87.7492, 87.9129, 88.3915,
88.7051, 89.9025, 89.9970, 90.7919,
90.9898, 91.2427, 91.1385, 91.4039,
92.5646])
gdp_q = np.array([14049.7, 14034.5, 14114.7,14277.3, 14446.4, 14578.7, 14745.1,14871.4])
gdp_m = tsa.interp.dentonm(iprod_m, gdp_q,freq="qm")
a=[]
[a.extend([i]*4) for i in gdp_q]
x=pd.DataFrame([iprod_m,gdp_m],index=['IPROD','GDP MONTHLY']).T
x.plot(secondary_y='IPROD')
| pd.DataFrame([gdp_m,a],index=['monthly','quarterly']) | pandas.DataFrame |
"""
Background:
===========
CTDpNUT_ncgen.py
Purpose:
========
Creates EPIC flavored, merged .nc files downcast ctd and nutrient data.
Data assumes a sparse grid for nutrient data, scales it up to the full 1m grid of
ctd data and then matches on depth. Finally, it writes a new file (mirrored to the
ctd file but with addtional variables defined by the nut config file)
Todo: switch from EPIC to CF , copy global attributes and ctd files from CTD/Nut casts instead
of specifying config files.
File Format:
============
- S.Bell - epic ctd and epic nut data
- Pavlof DB for cruise/cast metadata
(Very Long) Example Usage:
==========================
History:
========
Compatibility:
==============
python >=3.6
python 2.7 - ?
"""
from __future__ import absolute_import, division, print_function
import argparse
import datetime
import os
import sys
from shutil import copyfile
import numpy as np
import pandas as pd
from netCDF4 import Dataset
import io_utils.ConfigParserLocal as ConfigParserLocal
import io_utils.EcoFOCI_netCDF_write as EcF_write
from calc.EPIC2Datetime import Datetime2EPIC, get_UDUNITS
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__created__ = datetime.datetime(2018, 6, 14)
__modified__ = datetime.datetime(2018, 6, 14)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = "netCDF", "meta", "header", "QC", "bottle", "discreet"
"""------------------------------- MAIN--------------------------------------------"""
parser = argparse.ArgumentParser(
description="Merge and archive nutrient csv data and 1m downcast data"
)
parser.add_argument(
"CruiseID", metavar="CruiseID", type=str, help="provide the cruiseid"
)
parser.add_argument(
"ctd_ncpath", metavar="ctd_ncpath", type=str, help="ctd netcdf directory"
)
parser.add_argument(
"nut_ncpath", metavar="nut_ncpath", type=str, help="nutrient netcdf directory"
)
parser.add_argument(
"output",
metavar="output",
type=str,
help="full path to output folder (files will be generated there",
)
parser.add_argument(
"config_file_name",
metavar="config_file_name",
type=str,
default="",
help="full path to config file - ctdpnut_epickeys.yaml",
)
parser.add_argument("-v", "--verbose", action="store_true", help="output messages")
parser.add_argument(
"-csv", "--csv", action="store_true", help="output merged data as csv"
)
args = parser.parse_args()
# Get all netcdf files from mooring directory
ctd_ncfiles = [
args.ctd_ncpath + f for f in os.listdir(args.ctd_ncpath) if f.endswith(".nc")
]
nut_ncfiles = [
args.nut_ncpath + f for f in os.listdir(args.nut_ncpath) if f.endswith(".nc")
]
# get config file for output content
if args.config_file_name.split(".")[-1] in ["json", "pyini"]:
EPIC_VARS_dict = ConfigParserLocal.get_config(args.config_file_name, "json")
elif args.config_file_name.split(".")[-1] in ["yaml"]:
EPIC_VARS_dict = ConfigParserLocal.get_config(args.config_file_name, "yaml")
else:
sys.exit("Exiting: config files must have .pyini, .json, or .yaml endings")
# loop through all ctd files - skip files without downcast for now
for ind, cast in enumerate(ctd_ncfiles):
nut_cast = cast.split("/")[-1].replace("_ctd", "_nut")
print(
"Merging {ctdfile} and {nutfile}".format(
ctdfile=cast, nutfile=(args.nut_ncpath + nut_cast)
)
)
###nc readin/out
df = EcoFOCI_netCDF(cast)
global_atts = df.get_global_atts()
vars_dic = df.get_vars()
ncdata = df.ncreadfile_dic(output="vector")
ncdata_coords = [ncdata.pop(x, "-9999") for x in ["time", "time2", "lat", "lon"]]
df.close()
if "depth" in vars_dic:
ncdata["dep"] = ncdata["depth"]
### read paired nut file
try:
ncdata_nut = {}
dfn = EcoFOCI_netCDF(args.nut_ncpath + nut_cast)
global_atts_nut = dfn.get_global_atts()
vars_dic_nut = dfn.get_vars()
ncdata_nut = dfn.ncreadfile_dic(output="vector")
dfn.close()
except:
print("No matched Nutrient Data from cast:ctd{}".format(global_atts["CAST"]))
print("Copy CTD file to output dir")
copyfile(cast, args.output + cast.split("/")[-1])
if args.csv:
nc_only = | pd.DataFrame.from_dict(ncdata) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
"""Count the number of :term:`read alignments<alignment>` and calculate
read densities (in :term:`RPKM`) over genes, correcting gene boundaries
for overlap with other genes or regions specified in a :term:`mask file`.
:term:`Counts <counts>` and densities are calculated separately per gene for
exons, 5' UTRs, coding regions, and 3' UTRs. In addition, positions overlapped
by multiple genes are excluded, as are positions annotated in
:term:`mask annotation files<crossmap>`, if one is provided.
The script's operation is divided into three subprograms:
Generate
The :func:`generate <do_generate>` mode pre-process a genome annotation
as follows:
#. All genes whose transcripts share exact exons are collapsed to
"merged" genes.
#. Positions covered by more than one merged gene on the same strand
are excluded from analysis, and subtracted from each merged genes.
#. Remaining positions in each merged gene are then divided
into the following groups:
*exon*
all positions in all transcripts mapping to the merged gene
*CDS*
positions which appear in coding regions in *all* transcript
isoforms mapping to the merged gene. i.e. These positions
are never part of a fiveprime or threeprime UTR in *any*
transcript mapping to the merged gene
*UTR5*
positions which are annotated only as *5' UTR* in all
transcript isoforms mapping to the merged gene
*UTR3*
positions which are annotated only as *3 UTR* in all
transcript isoforms mapping to the merged gene
*masked*
positions excluded from analyses as directed in an optional
:term:`mask file`
The following files are output, where `OUTBASE` is a name supplied
by the user:
OUTBASE_gene.positions
Tab-delimited text file. Each line is a merged gene, and columns
indicate the genomic coordinates and lengths of each of the position
sets above.
OUTBASE_transcript.positions
Tab-delimited text file. Each line is a transcript, and columns
indicate the genomic coordinates and lengths of each of the position
sets above.
OUTBASE_gene_REGION.bed
`BED`_ files showing position sets for `REGION`,
where `REGION` is one of *exon*, *CDS*, *UTR5*, and *UTR3* or
*masked*. These contain the same information in
``OUTBASE_gene.positions``, but can be visualized easily in a
:term:`genome browser`
Count
The :func:`count <do_count>` mode counts the number and density of
read alignments in each sub-region (*exon*, *CDS*, *UTR5*, and *UTR3*)
of each gene.
Chart
The :func:`chart <do_chart>` mode takes output from one or more samples
run under :func:`count <do_count>` mode and generates several tables and
charts that provide broad overviews of the data.
See command-line help for each subprogram for details on each mode
See also
--------
:mod:`~plastid.bin.counts_in_region` script
Calculate the number and density of :term:`read alignments <alignment>`
covering any set of regions of interest, making no corrections for gene
boundaries.
"""
import os
import sys
import argparse
import itertools
import copy
import inspect
import gc
import warnings
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.stats
from plastid.util.scriptlib.argparsers import AnnotationParser,\
AlignmentParser,\
MaskParser,\
PlottingParser, \
BaseParser
from plastid.util.scriptlib.help_formatters import format_module_docstring
from plastid.genomics.roitools import positions_to_segments, SegmentChain
from plastid.genomics.genome_hash import GenomeHash
from plastid.util.io.openers import opener, get_short_name, argsopener, read_pl_table
from plastid.util.io.filters import NameDateWriter
from plastid.util.services.sets import merge_sets
from plastid.util.services.decorators import skipdoc
from plastid.util.services.exceptions import DataWarning, FileFormatWarning
import numpy.ma as ma
from plastid.plotting.plots import scatterhist_xy, ma_plot, clean_invalid
from plastid.plotting.colors import process_black
from plastid.readers.bigbed import BigBedReader
printer = NameDateWriter(get_short_name(inspect.stack()[-1][1]))
#===============================================================================
# 'generate' subprogram
#===============================================================================
@skipdoc
def write_output_files(table, title, args):
"""Write gene info table from :py:func:`do_generate` to several output files:
OUTBASE_gene.positions
Tab-delimited text file. Each line is a merged gene, and columns
indicate the genomic coordinates and lengths of each of the position
sets above.
OUTBASE_transcript.positions
Tab-delimited text file. Each line is a transcript, and columns
indicate the genomic coordinates and lengths of each of the position
sets above.
OUTBASE_gene_REGION.bed
`BED`_ files showing position sets for `REGION`,
where `REGION` is one of *exon*, *utr5*, *cds*, *utr3*, or
*masked*. These contain the same information in
``OUTBASE_gene.positions``, but can be visualized easily in a
:term:`genome browser`
Parameters
----------
table : :class:`pandas.DataFrame`
Gene info table made in :py:func:`do_generate`
title : str
Title ("gene" or "transcript")
args : :py:class:`argparse.Namespace`
Command-line arguments
"""
keys = ("utr5", "utr3", "cds", "masked", "exon")
bed_columns = ["%s_bed" % K for K in keys]
bedfiles = {X: argsopener("%s_%s_%s.bed" % (args.outbase, title, X), args) for X in keys}
for _, row in table[bed_columns].iterrows():
for k in keys:
bedfiles[k].write(row["%s_bed" % k])
for k in bedfiles:
bedfiles[k].close()
pos_out = argsopener("%s_%s.positions" % (args.outbase, title), args)
table.to_csv(
pos_out,
sep = "\t",
header = True,
index = False,
na_rep = "nan",
float_format = "%.8f",
columns = [
"region", "exon", "utr5", "cds", "utr3", "masked", "exon_unmasked",
"transcript_ids"
]) # yapf: disable
pos_out.close()
def merge_genes(tx_ivcs):
"""Merge genes whose transcripts share exons into a combined, "merged" gene
Parameters
----------
tx_ivcs : dict
Dictionary mapping unique transcript IDs to |Transcripts|
Returns
-------
dict
Dictionary mapping raw gene names to the names of the merged genes
"""
dout = {}
exondicts = {"+": {}, "-": {}}
# backmap exons to genes as tuples
printer.write("Mapping exons to genes ...")
for txid in tx_ivcs.keys():
my_segmentchain = tx_ivcs[txid]
my_gene = my_segmentchain.get_gene()
chrom = my_segmentchain.spanning_segment.chrom
strand = my_segmentchain.spanning_segment.strand
for my_iv in my_segmentchain:
start = my_iv.start
end = my_iv.end
# separate exons by chromosome and strand to reduce
# downstream comparisons in merging
if chrom not in exondicts[strand]:
exondicts[strand][chrom] = {}
try:
exondicts[strand][chrom][(start, end)].append(my_gene)
except KeyError:
exondicts[strand][chrom][(start, end)] = [my_gene]
for strand in exondicts:
for chrom in exondicts[strand]:
exondicts[strand][chrom] = {K: set(V) for K, V in exondicts[strand][chrom].items()}
printer.write("Flattening genes on %s(%s) ..." % (chrom, strand))
gene_groups = merge_sets(exondicts[strand][chrom].values(), printer=printer)
for group in gene_groups:
merged_name = ",".join(sorted(group))
for gene in group:
dout[gene] = merged_name
printer.write("Flattened to %s groups." % len(dout))
return dout
def process_partial_group(transcripts, mask_hash, printer):
"""Correct boundaries of merged genes, as described in :func:`do_generate`
Parameters
----------
transcripts : dict
Dictionary mapping unique transcript IDs to |Transcripts|.
This set should be complete in the sense that it should contain
all transcripts that have any chance of mutually overlapping
each other (e.g. all on same chromosome and strand).
mask_hash : |GenomeHash|
|GenomeHash| of regions to exclude from analysis
Returns
-------
:class:`pandas.DataFrame`
Table of merged gene positions
:class:`pandas.DataFrame`
Table of adjusted transcript positions
:class:`dict`
Dictionary mapping raw gene names to merged gene names
"""
gene_table = {
"region" : [],
"transcript_ids" : [],
"exon_unmasked" : [],
"exon" : [],
"masked" : [],
"utr5" : [],
"cds" : [],
"utr3" : [],
"exon_bed" : [],
"utr5_bed" : [],
"cds_bed" : [],
"utr3_bed" : [],
"masked_bed" : [],
} # yapf: disable
# data table for transcripts
transcript_table = {
"region" : [],
"exon" : [],
"utr5" : [],
"cds" : [],
"utr3" : [],
"masked" : [],
"exon_unmasked" : [],
"transcript_ids" : [],
"exon_bed" : [],
"utr5_bed" : [],
"cds_bed" : [],
"utr3_bed" : [],
"masked_bed" : [],
} # yapf: disable
keycombos = list(itertools.permutations(("utr5", "cds", "utr3"), 2))
# merge genes that share exons & write output
printer.write("Collapsing genes that share exons ...")
merged_genes = merge_genes(transcripts)
# remap transcripts to merged genes
# and vice-versa
merged_gene_tx = {}
tx_merged_gene = {}
printer.write("Mapping transcripts to merged genes...")
for txid in transcripts:
my_tx = transcripts[txid]
my_gene = my_tx.get_gene()
my_merged = merged_genes[my_gene]
tx_merged_gene[txid] = my_merged
try:
merged_gene_tx[my_merged].append(txid)
except KeyError:
merged_gene_tx[my_merged] = [txid]
# flatten merged genes
printer.write("Flattening merged genes, masking positions, and labeling subfeatures ...")
for n, (gene_id, my_txids) in enumerate(merged_gene_tx.items()):
if n % 1000 == 0 and n > 0:
printer.write(" %s genes ..." % n)
my_gene_positions = []
chroms = []
strands = []
for my_txid in my_txids:
my_segmentchain = transcripts[my_txid]
chroms.append(my_segmentchain.chrom)
strands.append(my_segmentchain.strand)
my_gene_positions.extend(my_segmentchain.get_position_list())
try:
assert len(set(chroms)) == 1
except AssertionError:
printer.write(
"Skipping gene %s which contains multiple chromosomes: %s" %
(gene_id, ",".join(chroms))
)
try:
assert len(set(strands)) == 1
except AssertionError:
printer.write(
"Skipping gene %s which contains multiple strands: %s" %
(gene_id, ",".join(strands))
)
my_gene_positions = set(my_gene_positions)
gene_ivc_raw = SegmentChain(
*positions_to_segments(chroms[0], strands[0], my_gene_positions)
)
gene_table["region"].append(gene_id)
gene_table["transcript_ids"].append(",".join(sorted(my_txids)))
gene_table["exon_unmasked"].append(gene_ivc_raw)
printer.write(" %s genes total." % (n + 1))
# mask genes
printer.write("Masking positions and labeling subfeature positions ...")
gene_hash = GenomeHash(gene_table["exon_unmasked"], do_copy=False)
for n, (gene_id, gene_ivc_raw) in enumerate(zip(gene_table["region"],
gene_table["exon_unmasked"])):
if n % 2000 == 0:
printer.write(" %s genes ..." % n)
my_chrom = gene_ivc_raw.spanning_segment.chrom
my_strand = gene_ivc_raw.spanning_segment.strand
masked_positions = []
nearby_genes = gene_hash[gene_ivc_raw]
# don't mask out positions from identical gene
gene_ivc_raw_positions = gene_ivc_raw.get_position_set()
nearby_genes = [X for X in nearby_genes if X.get_position_set() != gene_ivc_raw_positions]
for gene in nearby_genes:
masked_positions.extend(gene.get_position_list())
nearby_masks = mask_hash[gene_ivc_raw]
for mask in nearby_masks:
masked_positions.extend(mask.get_position_list())
masked_positions = set(masked_positions)
gene_positions_raw = gene_ivc_raw.get_position_set()
mask_ivc_positions = gene_positions_raw & masked_positions
total_mask_ivc = SegmentChain(
*positions_to_segments(my_chrom, my_strand, mask_ivc_positions),
ID=gene_id
)
gene_table["masked"].append(total_mask_ivc)
gene_table["masked_bed"].append(total_mask_ivc.as_bed())
gene_post_mask = gene_positions_raw - masked_positions
gene_post_mask_ivc = SegmentChain(
*positions_to_segments(my_chrom, my_strand, gene_post_mask),
ID=gene_id
)
gene_table["exon"].append(gene_post_mask_ivc)
gene_table["exon_bed"].append(gene_post_mask_ivc.as_bed())
masked_positions = total_mask_ivc.get_position_set()
tmp_positions = {
"utr5" : set(),
"cds" : set(),
"utr3" : set(),
} # yapf: disable
txids = sorted(merged_gene_tx[gene_id])
chrom = gene_post_mask_ivc.chrom
strand = gene_post_mask_ivc.strand
# pool transcript positions
for txid in txids:
transcript = transcripts[txid]
# yapf: disable
utr5pos = transcript.get_utr5().get_position_set()
cdspos = transcript.get_cds().get_position_set()
utr3pos = transcript.get_utr3().get_position_set()
tmp_positions["utr5"] |= utr5pos
tmp_positions["cds"] |= cdspos
tmp_positions["utr3"] |= utr3pos
# yapf: enable
# eliminate positions in which CDS & UTRs overlap from each transcript
for txid in txids:
transcript = transcripts[txid]
transcript_positions = {
"utr5": transcript.get_utr5().get_position_set(),
"cds" : transcript.get_cds().get_position_set(),
"utr3": transcript.get_utr3().get_position_set(),
} # yapf: disable
for key1, key2 in keycombos:
transcript_positions[key1] -= tmp_positions[key2]
transcript_positions[key1] -= masked_positions
transcript_table["region"].append(txid)
# all unmasked positions
my_chain = SegmentChain(
*positions_to_segments(
chrom,
strand,
transcript.get_position_set() - masked_positions
),
ID=txid
)
transcript_table["exon"].append(str(my_chain))
transcript_table["exon_bed"].append(my_chain.as_bed())
# all uniquely-labeled unmasked positions
for k, v in transcript_positions.items():
my_chain = SegmentChain(*positions_to_segments(chrom, strand, v), ID=txid)
transcript_table[k].append(str(my_chain))
transcript_table["%s_bed" % k].append(my_chain.as_bed())
total_mask_ivc.attr["ID"] = txid
transcript_table["masked"].append(str(total_mask_ivc))
transcript_table["masked_bed"].append(total_mask_ivc.as_bed())
transcript_table["exon_unmasked"].append(str(transcript))
transcript_table["transcript_ids"].append(txid)
tmp_positions2 = copy.deepcopy(tmp_positions)
for k1, k2 in keycombos:
tmp_positions[k1] -= tmp_positions2[k2]
tmp_positions[k1] -= masked_positions
for k in (tmp_positions.keys()):
my_chain = SegmentChain(
*positions_to_segments(chrom, strand, tmp_positions[k]), ID=gene_id
)
gene_table[k].append(str(my_chain))
gene_table["%s_bed" % k].append(my_chain.as_bed())
printer.write(" %s genes total." % (n + 1))
# cast SegmentChains/Transcripts to strings to keep numpy from unpacking them
conversion_keys = ["exon", "utr5", "cds", "utr3", "masked", "exon_unmasked"]
for k in conversion_keys:
gene_table[k] = [str(X) for X in gene_table[k]]
transcript_table[k] = [str(X) for X in transcript_table[k]]
gene_df = pd.DataFrame(gene_table)
gene_df.sort_values(["region"], inplace=True)
transcript_df = pd.DataFrame(transcript_table)
transcript_df.sort_values(["region"], inplace=True)
return gene_df, transcript_df, merged_genes
def do_generate(args, annotation_parser, mask_parser):
"""Generate gene position files from gene annotations.
1. Genes whose transcripts share exons are first collapsed into merged
genes.
2. Within merged genes, all positions are classified. All positions are
included in a set called *exon*. All positions that appear as coding
regions in all transcripts (i.e. are never part of a 5'UTR or 3'UTR)
included in a set called *CDS*. Similarly, all positions that appear
as 5' UTR or 3' UTR in all transcripts are included in sets called
*UTR5* or *UTR3*, respectively.
3. Genomic positions that are overlapped by multiple merged genes are
excluded from the position sets for those genes.
4. If a :term:`mask file` is supplied, positions annotated in the mask file
are also excluded
5. Output is given as a series of `BED`_ files and a `positions` file
containing the same data.
Parameters
----------
args : :py:class:`argparse.Namespace`
command-line arguments for ``generate`` subprogram
"""
# variables for transcript <-> merged gene mapping
transcripts = {}
merged_genes = {}
# data table for merged genes
gene_table = pd.DataFrame({
"region" : [],
"transcript_ids" : [],
"exon_unmasked" : [],
"exon" : [],
"masked" : [],
"utr5" : [],
"cds" : [],
"utr3" : [],
"exon_bed" : [],
"utr5_bed" : [],
"cds_bed" : [],
"utr3_bed" : [],
"masked_bed" : [],
}) # yapf: disable
# data table for transcripts
transcript_table = pd.DataFrame({
"region" : [],
"exon" : [],
"utr5" : [],
"cds" : [],
"utr3" : [],
"exon_bed" : [],
"utr5_bed" : [],
"cds_bed" : [],
"utr3_bed" : [],
"masked" : [],
"exon_unmasked" : [],
"transcript_ids" : [],
"masked_bed" : [],
}) # yapf: disable
# data
is_sorted = (args.sorted == True) or \
(args.tabix == True) or \
(args.annotation_format == "BigBed")
annotation_message = (
"`cs` relies upon relationships between transcripts and genes "
"to collapse transcripts to genes for quantitation. "
"Gene-transcript relationships are not generally preserved in BED "
"or BigBed files, and a `gene_id` column could not be found in the "
"input data. This may yield nonsensical results in the output. "
"Consider either (1) using a GTF2 or GFF3 file or (2) creating an "
"extended BED or BigBed file with a `gene_id` column."
)
if args.annotation_format == "BED":
if not isinstance(args.bed_extra_columns, list) \
or 'gene_id' not in args.bed_extra_columns:
warnings.warn(annotation_message, FileFormatWarning)
elif args.annotation_format == "BigBed":
reader = BigBedReader(args.annotation_files[0])
if 'gene_id' not in reader.extension_fields:
warnings.warn(annotation_message, FileFormatWarning)
source = iter(
annotation_parser.get_transcripts_from_args(args, printer=printer)
)
mask_hash = mask_parser.get_genome_hash_from_args(args)
# loop conditions
last_chrom = None
do_loop = True
# to save memory, we process one chromosome at a time if input file is sorted
# knowing that at that moment all transcript parts are assembled
while do_loop == True:
try:
tx = next(source)
except StopIteration:
do_loop = False
try:
# if chromosome is completely processed or EOF
if (is_sorted and tx.spanning_segment.chrom != last_chrom) \
or do_loop == False:
if do_loop == True:
source = itertools.chain([tx], source)
if last_chrom is not None or do_loop == False:
printer.write("Merging genes on chromosome/contig '%s'" % last_chrom)
my_gene_table, my_transcript_table, my_merged_genes = process_partial_group(
transcripts,
mask_hash,
printer,
)
gene_table = pd.concat((gene_table, my_gene_table), axis=0)
transcript_table = pd.concat((transcript_table, my_transcript_table), axis=0)
merged_genes.update(my_merged_genes)
del transcripts
gc.collect()
del gc.garbage[:]
transcripts = {}
# reset last chrom
last_chrom = tx.spanning_segment.chrom
# otherwise, remember transcript
else:
transcripts[tx.get_name()] = tx
# exit gracefully if no transcripts found
except UnboundLocalError:
pass
# write output
printer.write("Writing output ...")
merged_fn = "%s_merged.txt" % args.outbase
number_merged = len(set(merged_genes.values()))
printer.write(
"Collapsed %s genes to %s merged groups. Writing to %s" %
(len(merged_genes), number_merged, merged_fn)
)
fout = argsopener(merged_fn, args, "w")
for gene, merged_name in sorted(merged_genes.items()):
fout.write("%s\t%s\n" % (gene, merged_name))
fout.close()
printer.write("Writing gene table and BED files ...")
write_output_files(gene_table, "gene", args)
printer.write("Writing transcript summary table and BED files ...")
write_output_files(transcript_table, "transcript", args)
printer.write("Done!")
#===============================================================================
# 'count' subprogram
#===============================================================================
def do_count(args, alignment_parser):
"""Count the number and density covering each merged gene in an annotation
made made using the `generate` subcommand).
Parameters
----------
args : :py:class:`argparse.Namespace`
command-line arguments for ``count`` subprogram
"""
# we expect many zero-lenght segmentchains, so turn these off for now
warnings.filterwarnings(
"ignore",
".*zero-length SegmentChain.*",
)
keys = ("exon", "utr5", "cds", "utr3")
column_order = ["region"]
gene_positions = read_pl_table(args.position_file)
# read count files
ga = alignment_parser.get_genome_array_from_args(args, printer=printer)
total_counts = ga.sum()
normconst = 1000.0 * 1e6 / total_counts
printer.write("Dataset has %s counts in it." % total_counts)
printer.write("Tallying genes ...")
dtmp = {"region": []}
for x in keys:
for y in ("reads", "length", "rpkm"):
label = "%s_%s" % (x, y)
dtmp[label] = []
column_order.append(label)
for i, name in enumerate(gene_positions["region"]):
dtmp["region"].append(name)
if i % 500 == 0:
printer.write("Processed %s genes ..." % i)
for k in keys:
ivc = SegmentChain.from_str(gene_positions[k][i])
total = sum(ivc.get_counts(ga))
length = ivc.length
rpkm = (normconst * total / length) if length > 0 else numpy.nan
dtmp["%s_reads" % k].append(total)
dtmp["%s_length" % k].append(length)
dtmp["%s_rpkm" % k].append(rpkm)
fout = argsopener("%s.txt" % args.outbase, args, "w")
dtmp = | pd.DataFrame(dtmp) | pandas.DataFrame |
import pandas as pd
import fnmatch
import numpy as np
import os, glob
from astropy.table import Table
from astropy.time import Time
import psycopg2
#from psycopg2 import pool
from datetime import datetime
from bokeh.models import ColumnDataSource
conn = psycopg2.connect(host="db.replicator.dev-cattle.stable.spin.nersc.org", port="60042", database="desi_dev", user="desi_reader", password="<PASSWORD>")
petal_loc_to_id = {0:'4',1:'5',2:'6',3:'3',4:'8',5:'10',6:'11',7:'2',8:'7',9:'9'}
def get_dfs(start, end):
exp_cols = ['id','data_location','targtra','targtdec','skyra','skydec','deltara','deltadec','reqtime','exptime','flavor','program','lead','focus','airmass', 'mountha','zd','mountaz','domeaz','spectrographs','s2n','transpar','skylevel','zenith','mjd_obs','date_obs','night','moonra','moondec','parallactic','mountel','dome','telescope','tower','hexapod','adc','sequence','obstype']
exp_df = pd.read_sql_query("SELECT * FROM exposure WHERE date_obs >= '{}' AND date_obs < '{}'".format(start, end), conn)
exp_df_new = exp_df[exp_cols]
exp_df_new = exp_df_new.rename(columns={'id':'EXPID'})
exp_df_base = exp_df_new[['EXPID','date_obs']]
print('get telem data')
dfs = []
for d in ['telescope','tower','dome']:
get_keys = True
i = 0
while get_keys:
try:
t_keys = list(exp_df_new.iloc[i][d].keys())
get_keys = False
except:
i += 1
dd = {}
for t in t_keys:
dd[t] = []
for item in exp_df_new[d]:
if item is not None:
for key in t_keys:
try:
dd[key].append(item[key])
except:
dd[key].append(None)
else:
for key, l in dd.items():
dd[key].append(None)
df = pd.DataFrame.from_dict(dd)
dfs.append(df)
for i, df in enumerate(dfs):
df.reset_index(inplace=True, drop=True)
dfs[i] = df
telem_df = pd.concat(dfs, axis=1)
telem_df = pd.concat([exp_df_base, telem_df], axis=1)
print('get_coord_data')
nights = np.unique(exp_df_new['night'])
dates = [int(d) for d in nights[np.isfinite(nights)]]
coord_dir = '/global/cfs/cdirs/desi/spectro/data/'
coord_df = []
for date in dates:
coord_files = glob.glob(coord_dir+'{}/*/coordinates-*'.format(date))
for f in coord_files:
exposure = int(os.path.splitext(os.path.split(f)[0])[0][-6:])
try:
df = Table.read(f, format='fits').to_pandas()
good = df['OFFSET_0']
df = df[['PETAL_LOC', 'DEVICE_LOC','TARGET_RA', 'TARGET_DEC','FA_X', 'FA_Y','OFFSET_0','OFFSET_1']]
df = df.rename(columns={'OFFSET_1':'OFFSET_FINAL','FA_X':'FIBERASSIGN_X','FA_Y':'FIBERASSIGN_Y'})
df['EXPID'] = exposure
coord_df.append(df)
except:
pass
coord_df = pd.concat(coord_df)
print('done with dfs')
ptl_dbs = {}
for ptl in petal_loc_to_id.values():
print('Getting data for Petal {}'.format(ptl))
ptl_dbs[ptl] = pd.read_sql_query("SELECT * FROM positioner_moves_p{} WHERE time_recorded >= '{}' AND time_recorded < '{}'".format(ptl, start,end),conn)
return exp_df_base, telem_df, coord_df, ptl_dbs
def get_fiberpos_data(pos, coord_df, fiberpos):
init_df = fiberpos[fiberpos.CAN_ID == pos]
ptl_loc = int(np.unique(init_df.PETAL))
ptl = petal_loc_to_id[ptl_loc]
dev = int(np.unique(init_df.DEVICE))
init_df.drop(['PETAL_LOC','DEVICE_LOC'], axis=1, inplace=True)
pos_df = pd.merge(coord_df, init_df, how='inner',left_on=['PETAL_LOC','DEVICE_LOC'], right_on=['PETAL','DEVICE'])
return ptl, dev, pos_df
def add_posmove_telemetry(ptl, dev, start, end, exp_df_base, ptl_dbs):
df = ptl_dbs[ptl]
df = df[df.device_loc == dev]
idx = []
for time in exp_df_base.date_obs:
ix = np.argmin(np.abs(df['time_recorded'] - time))
idx.append(ix)
df = df.iloc[idx]
df.reset_index(inplace=True, drop=True)
exp_df_base.reset_index(inplace=True, drop=True)
pos_telem = pd.concat([exp_df_base, df], axis=1)
return pos_telem
def save_data(pos, df, mode):
save_dir = os.path.join(os.environ['DATA_DIR'],'positioners')
filen = os.path.join(save_dir, '{}.csv'.format(pos))
if mode == 'new':
final_df = df
elif mode == 'update':
old_df = | pd.read_csv(filen) | pandas.read_csv |
import numpy as np
import cvxpy as cp
import os
import pandas as pd
from mlopt import settings as stg
import joblib
# def args_norms(expr):
# """Calculate norm of the arguments in a cvxpy expression"""
# if expr.args:
# norms = []
# # Expression contains arguments
# for arg in expr.args:
# # norms += args_norms(arg)
# norms += [cp.norm(arg, np.inf).value]
# else:
# norms = [0.]
# return norms
# def tight_components(con):
# """Return which components are tight in the constraints."""
# # rel_norm = np.amax([np.linalg.norm(np.atleast_1d(a.value), np.inf)
# # for a in con.expr.args])
# # If Equality Constraint => all tight
# if type(con) in [Equality, Zero]:
# return np.full(con.shape, True)
#
# # Otherwise return violation
# rel_norm = 1.0
# return np.abs(con.expr.value) <= stg.TIGHT_CONSTRAINTS_TOL * (1 + rel_norm)
def get_n_processes(max_n=np.inf):
"""Get number of processes from current cps number
Parameters
----------
max_n: int
Maximum number of processes.
Returns
-------
float
Number of processes to use.
"""
try:
# Check number of cpus if we are on a SLURM server
n_cpus = int(os.environ["SLURM_NPROCS"])
except KeyError:
n_cpus = joblib.cpu_count()
n_proc = min(max_n, n_cpus)
return n_proc
def n_features(df):
"""
Get number of features in dataframe
where cells contain tuples.
Parameters
----------
df : pandas DataFrame
Dataframe.
Returns
-------
int
Number of features.
"""
return sum(np.atleast_1d(x).size for x in df.iloc[0])
# n = 0
# for c in df.columns.values:
#
# if isinstance(df[c].iloc[0], list): # If list add length
# n += len(df[c].iloc[0])
# else: # If number add 1
# n += 1
# return n
def pandas2array(X):
"""
Unroll dataframe elements to construct 2d array in case of
cells containing tuples.
"""
if isinstance(X, np.ndarray):
# Already numpy array. Return it.
return X
else:
if isinstance(X, pd.Series):
X = | pd.DataFrame(X) | pandas.DataFrame |
from gym_AlphaBuilding import medOff_env
from dist_util import vav_obs
import numpy as np
import pandas as pd
import os
import argparse
import torch as T
from tensorboardX import SummaryWriter
def train(vavs, env, seed, result_save_path, exp_name):
ResultName = '{}_run{}'.format(exp_name, seed)
writer = SummaryWriter(comment=ResultName)
np.random.seed(seed)
with open(os.path.join(result_save_path, "run{}.log".format(seed)), "a") as f:
ahuSAT = 12.75
ahuSAT_scaled = 2.0 * (ahuSAT - env.action_space.low[0]) / \
(env.action_space.high[0] - env.action_space.low[0]) - 1
tempSP = 22
# For the whole HVAC system
best_score = np.NINF
total_reward_history = []
total_energy_history = []
total_comfort_history = []
total_uncDegHour_history = []
total_crt_loss_history = []
total_act_loss_history = []
# For each VAV, to determine when to save the network
vavs_best_score = []
vavs_reward_history = []
for _ in range(9):
vavs_best_score.append(np.NINF)
vavs_reward_history.append([])
for episode in range(21):
result_all = []
obs_scaled = env.reset()
vavs_s_old, _ = vav_obs(obs_scaled, tempSP, ahuSAT, env)
done = False
total_reward = 0
total_energy = 0
total_comfort = 0
total_uncDegHour = 0
total_crt_loss = 0
total_act_loss = 0
vavs_episode_reward = [0]*9
while not done:
# All the states, acts here are scaled
# step1: Calculate actions
vavs_a = []
acts = [ahuSAT_scaled]
for i, vav in enumerate(vavs):
vav_s = vavs_s_old[i]
vav_a = vav.choose_action(vav_s)
acts.extend(vav_a.tolist())
vavs_a.append(vav_a)
acts = np.array(acts)
# step2: One step simulation
new_obs, reward, done, comments = env.step(acts)
hvacEnergy, hvacComfort, temp_min, temp_max, hvacUncDegHour, fanE, coolE, heatE = comments
total_reward += reward
# step3: Calculate reward and save to the buffer
ahuEnergy = fanE + coolE + heatE # unit: kWh, already consisder gas-electricity
vavs_s_new, vavs_c = vav_obs(
new_obs, tempSP, ahuSAT, env)
acts_raw = env.rescale_action(acts)
ahuFR = acts_raw[1::2].sum()
for i, vav in enumerate(vavs):
comfort_cost = vavs_c[i]
reheatE = acts_raw[2+i*2]/(1000*4) # kWh
ahuE = (acts_raw[1+i*2]/ahuFR)*ahuEnergy # kWh
energy_cost = reheatE + ahuE # kWh
vav_reward = -1 * (10*comfort_cost + energy_cost)
vav.remember(vavs_s_old[i], vavs_a[i],
vav_reward, vavs_s_new[i], int(done))
vavs_episode_reward[i] += vav_reward
loss = vav.learn()
if loss:
crt_loss, act_loss = loss
total_crt_loss += crt_loss
total_act_loss += act_loss
# Book keeping for the whole HVAC System
total_energy += hvacEnergy
total_comfort += hvacComfort
total_uncDegHour += hvacUncDegHour
vavs_s_old = vavs_s_new
result = np.concatenate(
(env.rescale_state(new_obs), env.rescale_action(acts), comments, np.array([reward])))
result_all.append(result)
# Save the result
result_all = pd.DataFrame(result_all)
result_all.columns = env.states_time + env.states_amb + env.states_temp + env.action_names + \
['cost_energy', 'cost_comfort', 'temp_min', 'temp_max', 'UDH',
'fanEnergy', 'coolEnergy', 'heatEnergy'] + ['reward']
result_all.round(decimals=2)
result_all.to_csv('log/{}/run{}_trainE{}.csv'.format(exp_name, seed, episode))
# Determine whether to save the vav controller or not
for vav_index in range(9):
vavs_reward_history[vav_index].append(
vavs_episode_reward[vav_index])
avg_score = np.mean(vavs_reward_history[vav_index][-3:])
if avg_score > vavs_best_score[vav_index]:
vavs_best_score[vav_index] = avg_score
vavs[vav_index].save_models()
# Save the results of the whole HVAC system
total_reward_history.append(total_reward)
total_energy_history.append(total_energy)
total_comfort_history.append(total_comfort)
total_uncDegHour_history.append(total_uncDegHour)
total_crt_loss_history.append(total_crt_loss)
total_act_loss_history.append(total_act_loss)
f.write("%d,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n" % (episode, total_reward, total_energy,
total_comfort, total_uncDegHour, total_crt_loss, total_act_loss))
writer.add_scalar("reward", total_reward, episode)
writer.add_scalar("energy", total_energy, episode)
writer.add_scalar("comfort", total_comfort, episode)
writer.add_scalar("uncDegHour", total_uncDegHour, episode)
writer.add_scalar("crt_loss", total_crt_loss, episode)
writer.add_scalar("act_loss", total_act_loss, episode)
for vav_index in range(9):
writer.add_scalar("VAV{}_reward".format(vav_index), vavs_episode_reward[vav_index], episode)
def test(algorithm, exp_name, Agent, env, seed):
# Load the environment
filepath = 'tmp/{}/{}/run{}'.format(algorithm, exp_name, seed)
vavs = []
for vav_idx in range(9):
chkp_path = '{}/Actor_vav{}'.format(filepath, vav_idx)
checkpoint = T.load(chkp_path)
vav = Agent(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'],
chkpt_dir=filepath, name='vav{}'.format(vav_idx))
vav.actor.load_state_dict(checkpoint['state_dict'])
vavs.append(vav)
# Test its performance
result_all = []
obs = env.reset()
done = False
ahuSAT = 12.75
ahuSAT_scaled = 2.0 * (ahuSAT - env.action_space.low[0]) / \
(env.action_space.high[0] - env.action_space.low[0]) - 1
tempSP = 22
for vav in vavs:
vav.load_models()
vav.actor.eval()
while not done:
if algorithm == 'ddpg':
vavs_s, _ = vav_obs(obs, tempSP, ahuSAT, env)
acts = [ahuSAT_scaled]
for i, vav in enumerate(vavs):
vav_s = vavs_s[i]
vav_s = T.tensor(vav_s, dtype=T.float).to(vav.actor.device)
vav_a = vav.actor(vav_s).to(
vav.actor.device).detach().numpy()
acts.extend(vav_a.tolist())
acts = np.array(acts)
# elif test_algorithm == 'sac':
# obs = T.Tensor([obs]).to(agent.actor.device)
# act, _ = agent.actor.sample_normal(obs, reparameterize=False)
# act = act.detach().numpy()[0]
# elif test_algorithm == 'td3':
# obs = T.tensor(obs, dtype=T.float).to(agent.actor.device)
# act = agent.actor.forward(obs).to(
# agent.actor.device).detach().numpy()
new_state, reward, done, comments = env.step(acts)
new_state = env.rescale_state(new_state)
acts = env.rescale_action(acts)
result = np.concatenate(
(new_state, acts, comments, np.array([reward])))
result_all.append(result)
obs = new_state
result_all = | pd.DataFrame(result_all) | pandas.DataFrame |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(len(self), dtype=np.intp)
else:
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[np.ndarray, RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other: Index, sort=False):
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super()._difference(other, sort=sort)
res_name = | ops.get_op_result_name(self, other) | pandas.core.ops.get_op_result_name |
from pytest import raises, approx
import pandas as pd
from ..Lib.types.simple_moving_average import simpleMovingAverage
def test_initialisation_series_failure():
"""
Tests initialisation failure if incorrect input series
"""
notAPandasSeries = [1,2,3,4,5]
with raises(ValueError):
simpleMovingAverage(notAPandasSeries, 5)
def test_initialisation_period_failure():
"""
tests initialisation failure if incorrect input period
"""
data = [1,2,3,4,5]
series = pd.Series(data)
non_postv_int = 0
with raises(ValueError):
simpleMovingAverage(series, non_postv_int)
non_postv_int = -1
with raises(ValueError):
simpleMovingAverage(series, non_postv_int)
non_postv_int = 'Im the incorrect type'
with raises(ValueError):
simpleMovingAverage(series, non_postv_int)
def test_initialisation():
"""
Tests correct initialisation.
"""
data = [1,2,3,4,5]
series = | pd.Series(data) | pandas.Series |
"""
oil price data source: https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf
"""
import pandas as pd
import numpy as np
import tabula
import requests
import plotly.express as px
import plotly.graph_objects as go
import time
from pandas.tseries.offsets import MonthEnd
import re
import xmltodict
def process_table(table_df):
print("processing the downloaded PDF from PPAC website.")
cols = ['Date', 'Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol',
'Date_D', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']
table_df.columns = cols
table_df.drop(table_df.index[[0,3]],inplace=True)
table_df.drop('Date_D',axis=1,inplace=True)
table_df.dropna(how='any',inplace=True)
table_df = table_df.astype(str)
table_df = table_df.apply(lambda x: x.str.replace(" ", ""))
table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']] = table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']].astype(float)
table_df['Date'] = pd.to_datetime(table_df['Date'])
table_petrol = table_df[['Date','Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol','Kolkata_Petrol']]
table_diesel = table_df[['Date','Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']]
new_cols = [i.replace("_Petrol", "") for i in list(table_petrol.columns)]
table_petrol.columns = new_cols
table_diesel.columns = new_cols
return table_petrol, table_diesel
def get_international_exchange_rates(start_date,end_date):
print("sending request for international exchange rates.")
exchange_dates_url = "https://api.exchangeratesapi.io/history?"
params = {"start_at": start_date, "end_at":end_date, "base":"USD", "symbols":"INR"}
try:
req = requests.get(exchange_dates_url,params=params)
except Exception as e:
print(e)
print("request failed. using the saved data.")
dollar_exchange_rates = pd.read_csv("dollar_exhange_rates.csv")
dollar_exchange_rates['Date'] = pd.to_datetime(dollar_exchange_rates)
dollar_exchange_rates.set_index('Date').sort_index(ascending=False)
return dollar_exchange_rates
else:
print("request successful. processing the data.")
dollar_exchange_rates = pd.DataFrame(req.json()['rates']).T.reset_index()
dollar_exchange_rates['index'] = pd.to_datetime(dollar_exchange_rates['index'])
dollar_exchange_rates.set_index('index').sort_index(ascending=False)
dollar_exchange_rates.to_csv("dollar_exhange_rates.csv")
return dollar_exchange_rates
# def merge_data(dollar_exchange_rates, international_oil_prices, oil_price_data):
# print("merging the international oil price data, international exchange rate data and domestic oil price data.")
# trim_int = international_oil_prices.loc[international_oil_prices.index.isin(oil_price_data.index)].dropna()
# oil_price_data = oil_price_data.merge(trim_int, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data = oil_price_data.merge(dollar_exchange_rates, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data['INR'] = oil_price_data['INR'].round(2)
# oil_price_data['INR_pc'] = (((oil_price_data['INR'] - oil_price_data['INR'].iloc[-1])/oil_price_data['INR'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude'] = (oil_price_data['Price'] / 159) * oil_price_data['INR']
# oil_price_data['int_pc'] = (((oil_price_data['Price'] - oil_price_data['Price'].iloc[-1])/oil_price_data['Price'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude_pc'] = (((oil_price_data['rup_lit_crude'] - oil_price_data['rup_lit_crude'].iloc[-1])/oil_price_data['rup_lit_crude'].iloc[-1])*100).round(2)
# return oil_price_data
def download_ppac():
print("sending request for domestic oil price data from PPAC website.")
ppac_url = r"https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf"
try:
req = requests.get(ppac_url)
except Exception as e:
print(e)
print("Request unsuccessful. The saved file will be used.")
else:
with open('DATA/price_data.pdf', 'wb') as file:
file.write(req.content)
print('file saved successfully.')
def prepare_downloaded_file():
print("preparing downloaded file for analysis.")
oil_prices = 'DATA/price_data.pdf'
tables = tabula.read_pdf(oil_prices, pages="all")
proc_dfs = [process_table(i) for i in tables]
petrol_df = pd.concat(i[0] for i in proc_dfs)
diesel_df = pd.concat(i[1] for i in proc_dfs)
print(f"Success. Length of Petrol prices {len(petrol_df)}------ diesel prices {len(diesel_df)}")
petrol_df['mean_price'] = (petrol_df['Delhi']+petrol_df['Mumbai']+petrol_df['Chennai']+petrol_df['Kolkata'])/4
diesel_df['mean_price'] = (diesel_df['Delhi']+diesel_df['Mumbai']+diesel_df['Chennai']+diesel_df['Kolkata'])/4
print("Adding percent change columns")
for i in petrol_df.columns[1:]:
petrol_df[f'{i}_pc'] = (((petrol_df[i] - petrol_df[i].iloc[-1])/petrol_df[i].iloc[-1]) * 100).round(2)
for i in diesel_df.columns[1:]:
diesel_df[f'{i}_pc'] = (((diesel_df[i] - diesel_df[i].iloc[-1])/diesel_df[i].iloc[-1]) * 100).round(2)
petrol_df.set_index("Date",inplace=True)
diesel_df.set_index("Date",inplace=True)
return petrol_df, diesel_df
def prep_consumption_df(consumption_df,year):
consumption_df.reset_index(inplace=True)
consumption_df.dropna(how='any',inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
#print(consumption_df)
cols = ['products', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
consumption_df.drop(consumption_df.index[0],inplace=True)
consumption_df.columns = cols
consumption_df = consumption_df.loc[(consumption_df['products']=='MS')|(consumption_df['products']=='HSD')].reset_index().drop(['index','Total'],axis=1)
melt_df = pd.melt(consumption_df, id_vars = 'products',var_name='month',value_name='average_cons')
melt_df.sort_values('products',inplace=True)
melt_df = melt_df.reset_index().drop('index',axis=1)
melt_df['year'] = year
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df['average_cons'] = melt_df['average_cons'].astype(float).round(2)
return melt_df
def prep_consumption_df_present(consumption_df,year):
consumption_df.reset_index().drop('index',inplace=True,axis=1)
consumption_df.drop(consumption_df.index[range(0,6)],inplace=True)
consumption_df.reset_index(inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
print(consumption_df)
consumption_df.drop(consumption_df.index[range(14,20)],inplace=True)
consumption_df.reset_index(inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
#print(consumption_df)
cols = ['products', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
consumption_df.drop(consumption_df.index[0],inplace=True)
consumption_df.columns = cols
consumption_df = consumption_df.loc[(consumption_df['products']=='MS')|(consumption_df['products']=='HSD')].reset_index().drop(['index','Total'],axis=1)
melt_df = pd.melt(consumption_df, id_vars = 'products',var_name='month',value_name='average_cons')
melt_df.sort_values('products',inplace=True)
melt_df = melt_df.reset_index().drop('index',axis=1)
melt_df['year'] = year
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df['average_cons'] = melt_df['average_cons'].astype(float).round(2)
return melt_df
def prep_historical_crude(hist_crude_df,year):
cols = ['year', 'April','May','June','July','August','September','October','November','December','January','February','March','Average','Ratio']
hist_crude_df = hist_crude_df.dropna(how='any').reset_index().drop('index',axis=1)
hist_crude_df.columns = cols
hist_crude_df.drop(hist_crude_df.index[0],inplace=True)
hist_crude_df.drop(['Average','Ratio'],axis=1,inplace=True)
melt_df = pd.melt(hist_crude_df, id_vars = 'year',var_name='month',value_name='import_bbl_usd')
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df['import_bbl_usd'] = melt_df['import_bbl_usd'].astype(float).round(2)
melt_df = melt_df.loc[melt_df['year']>=year].sort_values(['year','month']).reset_index().drop('index',axis=1)
return melt_df
def prep_current_crude(current_crude_df):
current_crude_df.drop(current_crude_df.index[[i for i in range(0,12)]],inplace=True)
current_crude_df.reset_index(inplace=True)
current_crude_df.drop('index',inplace=True,axis=1)
current_crude_df.drop(current_crude_df.index[[2,3,4]],inplace=True)
cols = ['year', 'April','May','June','July','August','September','October','November','December','January','February','March']
current_crude_df.columns = cols
current_crude_df.drop(current_crude_df.index[0],inplace=True)
melt_df = pd.melt(current_crude_df, id_vars = 'year',var_name='month',value_name='import_bbl_usd')
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df.dropna(inplace=True,how='any')
melt_df['import_bbl_usd'] = melt_df['import_bbl_usd'].astype(float).round(2)
return melt_df
def prep_historical_import(historical_import_df, year):
cols = ['product', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
print(historical_import_df)
historical_import_df.dropna(how='all',inplace=True,axis=1)
historical_import_df.columns = cols
historical_import_df = historical_import_df.dropna(how='any').reset_index().drop('index',axis=1)
historical_import_df = historical_import_df.loc[historical_import_df['product'].str.contains('import oil|ms|hsd|total',flags=re.I)].reset_index()
historical_import_df.drop('index',axis=1,inplace=True)
historical_import_df = historical_import_df[:4]
historical_import_df = historical_import_df.melt(id_vars='product',var_name='month',value_name='import_rs_cr')
historical_import_df['sheetname'] = year
historical_import_df['year'] = historical_import_df['sheetname'].str.extract("(\d+)").astype(int)
historical_import_df['year'] = np.where((historical_import_df['month'].isin(['January','February','March'])),historical_import_df['year']+1,historical_import_df['year'])
historical_import_df.drop('sheetname',axis=1,inplace=True)
return historical_import_df
def get_opec_crude(dmin):
opec_url = "https://www.opec.org/basket/basketDayArchives.xml"
req = requests.get(opec_url)
xml_dict = xmltodict.parse(req.content)
opec_df = pd.DataFrame(xml_dict['Basket']['BasketList'],columns=['Date','Price'])
opec_df['Date'] = pd.to_datetime(opec_df["Date"])
return opec_df.loc[opec_df['Date']>=dmin]
#Downloading the PDF file from PPAC website, saving the file and returning the final dataframes.
# if __name__ == "__main__"
download_ppac()
petrol_df, diesel_df = prepare_downloaded_file()
# int_oil_prices = pd.read_csv(r'oil-prices-master\oil-prices-master\data\brent-daily.csv')
# print(f'loaded international oil prices. Length {len(int_oil_prices)}')
# int_oil_prices["Date"] = pd.to_datetime(int_oil_prices['Date'])
# int_oil_prices.set_index("Date",inplace=True)
# Saving the merged petrol and diesel data
petrol_df['Type'], diesel_df['Type'] = 'Petrol', 'Diesel'
merged_price_data = pd.concat([petrol_df, diesel_df])
merged_price_data.to_csv('price_df_merged.csv')
#Getting the international exchange rates.
start_date = str(petrol_df.index.min())[:10]
end_date = str(petrol_df.index.max())[:10]
dollar_exchange_rates = get_international_exchange_rates(start_date, end_date)
dollar_exchange_rates.set_index('index',inplace=True)
month_avg_dol = dollar_exchange_rates.resample('M').mean()
month_avg_dol['month'] = month_avg_dol.index.month_name()
month_avg_dol['year'] = month_avg_dol.index.year
#creating merged dataframes for international section analysis.
# petrol_df_merged = merge_data(dollar_exchange_rates, int_oil_prices, petrol_df)
# diesel_df_merged = merge_data(dollar_exchange_rates, int_oil_prices, diesel_df)
#loading the monthly average crude prices dataset
consumption_dict = pd.read_excel("DATA/consumption_historical_original.xls", sheet_name=["2017-18","2018-19","2019-20"])
consumption_hist = pd.concat([prep_consumption_df(df,year) for year,df in consumption_dict.items()]).reset_index()
consumption_hist.drop('index',axis=1,inplace=True)
consumption_present = | pd.read_excel("DATA/PT_consumption.xls") | pandas.read_excel |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes(self):
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
def test_sem(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).sem()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
with pytest.warns(FutureWarning):
modin_df.set_axis(axis, labels, inplace=False)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
with pytest.warns(FutureWarning):
modin_df.set_axis(labels, axis=axis, inplace=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys)
)
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(self, request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(
key, drop=drop, append=append, inplace=False
)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
def test_set_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).set_value(0, 0, 0)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
def test_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(self, data):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from datetime import datetime
from os import mkdir
from sklearn.preprocessing import OrdinalEncoder, KBinsDiscretizer
import concurrent
# import cProfile
from statistics import mean
# from math import factorial
# from tqdm import tqdm
# from scipy.stats import poisson
# import matplotlib.pyplot as plt
# from numba import jit
'''
This file evaluates the presence of outliers in 3+ dimensions in the openml.org dataset collection
'''
np.random.seed(0)
pd.options.display.max_columns = 1000
pd.options.display.max_rows = 1000
pd.options.display.width = 10000
DIVISOR = 0.25 # todo: loop through different values of this to see how it affects the results.
def flatten(arr):
flatten_1d = lambda x: [i for row in x for i in row]
if len(arr) == 0:
return arr
try:
while True:
arr = flatten_1d(arr)
if len(arr) == 0:
return arr
except:
pass
return arr
def is_float(v):
if str(v).isdigit():
return False
try:
float(v)
return True
except ValueError:
return False
class CountsOutlierDetector:
def __init__(self, n_bins=7, max_dimensions=5, results_folder="", results_name="", run_parallel=False):
self.n_bins = n_bins
self.max_dimensions = max_dimensions
self.results_folder = results_folder
self.results_name = results_name
self.run_parallel = run_parallel
self.col_types_arr = []
self.ordinal_encoders_arr = []
def get_col_types_arr(self, X):
col_types_arr = ['N'] * len(X.columns)
for c in range(len(X.columns)):
num_unique = X[X.columns[c]].nunique()
if not is_numeric_dtype(X[X.columns[c]]):
col_types_arr[c] = 'C'
# Even if the values are numeric, if there are few of them, consider them categorical, though if the values
# are all float, the column will be cast to 'N' when collecting the unique values.
elif | is_numeric_dtype(X[X.columns[c]]) | pandas.api.types.is_numeric_dtype |
import pandas as pd
class Session:
def __init__(self, students_df, df_session_chat, meta_data):
self._first_message_time = df_session_chat["time"].sort_values().iloc[0]
self._relevant_chat = self.get_participants_in_session(students_df, df_session_chat, meta_data)
@ staticmethod
def get_participants_in_session(df_students, df_chat, meta_data):
"""
finds students that attendant to the session. runs over each mode which represent different way to declare that
the student attendant (for example: phone number, ID). merges this data to the csv table with the zoom name that
added it
:param df_chat: that table of the chat for the specific session
:return: df of the attendance in the session
"""
final_df = None
for mode in meta_data.filter_modes:
merged_df = pd.merge(df_students, df_chat.reset_index(), left_on=mode, right_on="message", how="left")
final_df = pd.concat([merged_df, final_df])
final_df.sort_values(by="time", inplace=True)
df_participated = final_df.groupby("zoom_name").first().reset_index()
df_participated["index"] = df_participated["index"].astype(int)
df_participated = df_participated.loc[:, ["id", "zoom_name", "time", "message", "index"]].set_index("index")
filt = df_chat['zoom_name'].str.contains('|'.join(meta_data.zoom_names_to_ignore))
df_relevant_chat = pd.merge(df_chat[~filt], df_participated, how="left")
df_relevant_chat["relevant"] = df_relevant_chat["id"].apply(lambda x: 1 if x == x else 0)
df_relevant_chat["id"] = df_relevant_chat["id"].apply(lambda x: int(x) if x == x else -1)
return df_relevant_chat
def zoom_names_table(self, session_id):
zoom_df = self._relevant_chat.loc[:, ["zoom_name", "id"]].rename(columns={"zoom_name": "name", "id": "student_id"})
zoom_df['session_id'] = pd.Series([session_id] * zoom_df.shape[0])
return zoom_df.sort_values(by="student_id", ascending=False).groupby("name").first().reset_index()
def chat_table(self, zoom_df):
relevant_chat = self._relevant_chat.drop(columns=["id"])
chat_session_table = | pd.merge(relevant_chat, zoom_df, left_on="zoom_name", right_on="name") | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 7 14:21:28 2021
@author: angus
"""
import streamlit as st
import pandas as pd
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
import numpy as np
import investpy
import base64
import yfinance as yf
import datetime as dt
from io import BytesIO
st.set_page_config(layout="wide")
st.title('HKEX IPO Performance')
st.write ('All assumptions and further info can be found in [documentation](https://github.com/epiphronquant/HKEX-IPO-app)')
df = pd.read_excel(r'RawData.xlsx')
df_export = df
df = df.loc[df['Count as IPO?'] == 1] ### Filters rows where it is actually an IPO
df['Listing Date▼']= pd.to_datetime(df['Listing Date▼'])### converts listing date to datetime variable
### create dropdown selector
column_1, column_2, column_3 = st.columns(3) ### Divides page into 2 columns
with column_1:
language = st.selectbox(
'Which language should the company names be?',
['English', '中文'])
'You selected: ', language
if language =='中文':
df['Name'] = df['Name CN']
else:
pass
with column_2:### Chart of distribution and Lead 1 Chart
sectors = df['Sector']
sectors = sectors.tolist()
sectors = list(dict.fromkeys(sectors))
healthcare = sectors [0]
sectors.append('All')### adds an option for All IPOs
sectors = sorted(sectors [1:])
sectors.insert(0, healthcare)
sector = st.selectbox(
'Which sector are you interested in?',
sectors)
'You selected: ', sector
if sector == 'All':
df = df[(df['Listing Date▼'] >= '2019-01-01')] ### healthcare data runs from 2018 while all IPO data runs from 2019
else:
df = df.loc[df['Sector'] == sector]
with column_3:
### Dropdown box for median or mean
central_tendancy = ['Average', 'Median']
select_central = st.selectbox(
'Average or Median?',
central_tendancy)
'You selected: ', select_central
### add a slider to filter data by dates
format = 'MMM DD, YYYY' # format output
start_date = df ['Listing Date▼'].iloc [0]
start_date = start_date.date()
end_date = df ['Listing Date▼'].iloc [-1]
end_date = end_date.date()
slider = st.slider('Select date', min_value=start_date, value=(start_date,end_date) ,max_value=end_date, format=format)
start_date = slider [0].strftime('%Y%m%d')
end_date = slider [1].strftime('%Y%m%d')
def clean_time (date): ### presents selected slider time in the same format as the slider
a = date.ctime()
a = a.split()
a = a[1] + ' '+ a[2] + ', '+ a[-1]
return a
st.info('Start: **%s** End: **%s**' % (clean_time(slider[0]),clean_time(slider[1]))) ### info bar
df = df[(df['Listing Date▼'] >= start_date) & (df['Listing Date▼'] <= end_date)] ### filter the data
### create charts
def lead_chart(x, y1, y2, title):
fig = go.Figure(
data=[go.Bar(name='Count', x=x, y=y1, yaxis='y', offsetgroup=1),
go.Bar(name='% Chg Debut', x=x, y=y2, yaxis='y2', offsetgroup=2)],
layout={'yaxis': {'title': 'Count'},
'yaxis2': {'title': '% Chg Debut', 'overlaying': 'y', 'side': 'right', 'tickformat': ',.0%'}})
fig.update_layout(barmode='group',title={'text': title})
fig.update_xaxes(categoryorder='max descending')
return fig
### Charts for normal distribution, industry performance, lead 1, lead 2
column_1, column_2 = st.columns(2) ### Divides page into 2 columns
with column_1:### Chart of distribution and Lead 1 Chart
### Chart of distribution
x1 = df ['% Chg. on2Debut▼']
x1 = x1.tolist()
x1 = [x1]
label = '% Chg. on Debut'
label = [label]
names = df['Name']
names = names.tolist()
names = [names]
fig = ff.create_distplot(x1, label, rug_text = names, bin_size = .2)
fig.update_layout( xaxis_tickformat = ',.2%',title={'text': "Normal Distribution Plot and Rugplot for First Day Return"})
st.plotly_chart(fig)
#### Lead 1 Chart
lead1 = df [['% Chg. on2Debut▼', 'Industry', 'Name', 'Lead 1', 'Listing Date▼']]
a = lead1.groupby(['Lead 1']).count() ### gathers data by Lead 1
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼'] ### data column that shows deal count
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = lead1.groupby(['Lead 1']).mean()
else:
b = lead1.groupby(['Lead 1']).median()
b = b['% Chg. on2Debut▼'].to_list()
fig = lead_chart(industries, a, b,"Lead 1 Deal Count and " + select_central + " First Day Return" )
st.plotly_chart(fig)
with column_2:
### chart of industry performance and Lead 2 Chart
industry = df [['% Chg. on2Debut▼', 'Industry', 'Name', 'Listing Date▼']]
a = industry.groupby(['Industry']).count()
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼']
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = industry.groupby(['Industry']).mean()
else:
b = industry.groupby(['Industry']).median()
b = b['% Chg. on2Debut▼'].to_list()
fig = go.Figure(
data=[
go.Bar(name='Count', x=industries, y=a, yaxis='y', offsetgroup=1),
go.Bar(name='% Chg Debut', x=industries, y=b, yaxis='y2', offsetgroup=2)],
layout={
'yaxis': {'title': 'Count'},
'yaxis2': {'title': '% Chg Debut', 'overlaying': 'y', 'side': 'right', 'tickformat': ',.0%'}
})
fig.update_layout(barmode='group',legend=dict(yanchor="top",y=1,xanchor="right",x=1.35),
title={'text': "Industry Deal Count and " + select_central + " First Day Return"})
fig.update_xaxes(categoryorder='max descending')
st.plotly_chart(fig)
#### Lead 2 Chart
lead2 = df [['% Chg. on2Debut▼', 'Industry', 'Name', 'Lead 2', 'Listing Date▼']]
a = lead2.groupby(['Lead 2']).count()
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼']
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = lead2.groupby(['Lead 2']).mean()
else:
b = lead2.groupby(['Lead 2']).median()
b = b['% Chg. on2Debut▼'].to_list()
fig = lead_chart(industries, a, b,"Lead 2 Deal Count and " + select_central+ " First Day Return" )
st.plotly_chart(fig)
### Charts for Lead 1&2 Performance
#### combine lead 1 with lead 2
lead12 = df [['% Chg. on2Debut▼', 'Industry', 'Name','Lead 1', 'Lead 2', 'Listing Date▼']]
lead12 ['Lead 1 & 2'] = df ['Lead 1'] + ' & ' + df['Lead 2']
a = lead12.groupby(['Lead 1 & 2']).count()
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼']
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = lead12.groupby(['Lead 1 & 2']).mean()
else:
b = lead12.groupby(['Lead 1 & 2']).median()
# b = lead12.groupby(['Lead 1 & 2']).mean()
b = b['% Chg. on2Debut▼'].to_list()
### graph
fig = lead_chart(industries, a, b,"Lead 1 & 2 Deal Count and "+ select_central+" First Day Return" )
st.plotly_chart(fig, use_container_width=True)
column_1, column_2 = st.columns(2) ### Divides page into 2 columns
with column_1:
### Chart showing first day return performance over time with HSH and HSI
#add a box to select Chg on debut or -1 trading day as primary axis
ret = st.selectbox(
'Which return would you like to analyse?',
['Chg on Debut', 'Return till Today'])
'You selected: ', ret
if ret == 'Chg on Debut':
fdayret = df [['Listing Date▼','% Chg. on2Debut▼']]
else:
fdayret = df [['Listing Date▼','-1 Trading Days']]
# fdayret = df [['Listing Date▼','% Chg. on2Debut▼']]
if select_central == 'Average':
a = fdayret.groupby(['Listing Date▼']).mean()
else:
a = fdayret.groupby(['Listing Date▼']).median()
fig = make_subplots(specs=[[{"secondary_y": True}]])
with column_2:
#add a box to select HSI or HSH as second axis
index = st.selectbox(
'Which index would you like to compare it to?',
['Hang Seng Healthcare', 'Hang Seng Index'])
'You selected: ', index
### Download HSH and HSI data
today = pd.to_datetime('today').strftime('%d/%m/%Y')
start = a.index[0].strftime('%d/%m/%Y')
end = a.index[-1].strftime('%d/%m/%Y')
if index == 'Hang Seng Index':
df_index = investpy.get_index_historical_data(index='Hang Seng',
country='hong kong',
from_date= start,
to_date= end)
else:
df_index = investpy.get_index_historical_data(index='hs healthcare',
country='hong kong',
from_date= start,
to_date= end)
# Add traces
if ret == 'Chg on Debut':
fig.add_trace(go.Scatter(x= a.index, y= a['% Chg. on2Debut▼'], name= ret),
secondary_y=False)
else:
# fdayret = df [['Listing Date▼','-1 HSI Days']]
fig.add_trace(go.Scatter(x= a.index, y= a['-1 Trading Days'], name= ret),
secondary_y=False)
fig.add_trace(go.Scatter(x = df_index.index, y= df_index['Close'], name= index),
secondary_y=True)
# Add figure title
fig.update_layout(title_text= ret + " with Index Level")
# Set x-axis title
fig.update_xaxes(title_text="Date")
# Set y-axes titles
fig.update_yaxes(title_text= ret, secondary_y=False)
fig.update_yaxes(title_text="Index Level", secondary_y=True)
fig.layout.yaxis.tickformat= ',.2%'
# fig.show()
st.plotly_chart(fig, use_container_width=True)
#### trading performance's chart
column_1, column_2 = st.columns(2) ### Divides page into 2 columns
with column_1:
last_x = st.number_input('Number of most recent IPOs to display', value = 10)
last_x = int(last_x)
@st.cache(ttl = 1800)
def chart_7(df):
### add chart showing last 10 IPOs and their detailed trading performances
## gather the last 10 tickers and stock names and listing date and price
df_10 = df [-last_x:]
df_10 = df_10 [['Name', 'Code', 'Listing Price', 'Listing Date▼']]
## Gather stock codes and tickers
df_yf = df_10 ['Code']
df_tickers = df_yf.tolist()
df_yf = yf.download(df_tickers)
df_yf = df_yf ['Close']
df_yf = df_yf [df_tickers] ## reorder the columns so it is in the order as inputted
df_name = df_10 ['Name']
df_name = df_name.tolist()
df_yf.columns = df_name ## rename the column codes to column names
df_yf8 = pd.DataFrame()
for name in df_name:
df_yf2 = df_yf [name]
df_yf2 = df_yf2.dropna()
list_date = df_10.loc [df_10['Name'] == name]
list_date = list_date ['Listing Date▼'].values
list_date = list_date [0]
df_yf2 = df_yf2.reset_index()
df_yf2 = df_yf2[(df_yf2['Date'] >= list_date)]
df_yf2 = df_yf2.set_index('Date')
df_yf2=df_yf2.iloc[:,0]
price = df_10.loc [df_10['Name'] == name]
price = price ['Listing Price'].values
price = price [0]
df_yf2 = df_yf2 / price -1
date = df_yf2.index [0]
date = date - dt.timedelta(days=1)
ser = pd.Series(data= {date : 0}, index=[date], name = 'Stock Name')# df_yf2 = df_yf2.append()
ser = ser.append(df_yf2)
ser = pd.DataFrame(ser, columns = ['Close'])
ser ['Stock Name'] = name
df_yf8 = df_yf8.append(ser)
df_yf8 = df_yf8.reset_index()
df_yf8 = df_yf8.rename({'index':'Date', 'Close':'Return'}, axis = 'columns')
markers = df_yf8 ['Date'].min() ### display of markers settings
markers = markers.date()
markers = markers > dt.date.today() - dt.timedelta(days=120)
return df_yf8, markers
df_yf8, markers = chart_7(df)
fig = px.line(df_yf8, x= 'Date', y= 'Return', color = 'Stock Name', title= 'Last ' + str(last_x)+' ' + sector + ' IPOs Return Post IPO', markers = markers)
fig.layout.yaxis.tickformat = ',.0%'
st.plotly_chart(fig)
with column_2:
### customizable chart for displaying various IPOs
names = st.text_input('Type in names of stock/stocks that have IPOd in the past 3 years. e.g TRANSCENTA-B, bioheart-b')
@st.cache(ttl = 1800)
def chart_8(names):
names = names.split(',')
names = map(str.strip, names)
names = map(str.upper, names)
names = list(names)
tickers = []
for name in names:
# name = 'SENSETIME-W'
ticker = df.loc [df['Name'] == name]
ticker = ticker ['Code'].values
ticker = ticker [0]
tickers.append(ticker)
df_yf = yf.download(tickers) ['Close']
if len(tickers) ==1:
df_yf = df_yf.rename(names[0])
else:
df_yf = df_yf [tickers] ## reorder the columns so it is in the order as inputted
df_yf.columns = names ## rename the column codes to column names
df_yf8 = | pd.DataFrame() | pandas.DataFrame |
import json
import pandas as pd
def parse_json(filename):
f = open(filename, encoding="utf8")
data = json.load(f)
return data
def getGrades(filename=r'data\final_grades.csv'):
f = | pd.read_csv(filename) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Home Broker API - Market data downloader
# https://github.com/crapher/pyhomebroker.git
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from . import __user_agent__
from . import online_helper as helper
from .exceptions import DataException, SessionException, ServerException
import requests as rq
import pandas as pd
from signalr import Connection
import urllib.parse
class OnlineSignalR:
def __init__(self, auth, on_open=None, on_personal_portfolio=None,
on_securities=None, on_options=None, on_repos=None, on_order_book=None,
on_error=None, on_close=None, proxy_url=None):
"""
Class constructor.
Parameters
----------
auth : home_broker_session
An object with the authentication information.
on_open : function(), optional
Callable object which is called at opening the signalR connection.
This function has no argument.
on_personal_portfolio : function(quotes), optional
Callable object which is called when personal portfolio data is received.
This function has 1 argument. The argument is the dataframe with the quotes.
on_securities : function(quotes), optional
Callable object which is called when security data is received.
This function has 1 argument. The argument is the dataframe with the quotes.
on_options : function(quotes), optional
Callable object which is called when options data is received.
This function has 1 argument. The argument is the dataframe with the quotes.
on_repos : function(quotes), optional
Callable object which is called when repo data is received.
This function has 1 argument. The argument is the dataframe with the quotes.
on_order_book : function(quotes), optional
Callable object which is called when the order book data (level 2) is received.
This function has 1 argument. The argument is the dataframe with the quotes.
on_error : function(error), optional
Callable object which is called when we get error.
This function has 1 arguments. The argument is the exception object.
on_close : function(), optional
Callable object which is called when closed the connection.
This function has no argument.
proxy_url : str, optional
The proxy URL with one of the following formats:
- scheme://user:pass@hostname:port
- scheme://user:pass@ip:port
- scheme://hostname:port
- scheme://ip:port
Ex. https://john:[email protected]:3128
"""
self._proxies = {'http': proxy_url, 'https': proxy_url} if proxy_url else None
self._auth = auth
self._on_open = on_open
self._on_personal_portfolio = on_personal_portfolio
self._on_securities = on_securities
self._on_options = on_options
self._on_repos = on_repos
self._on_order_book = on_order_book
self._on_error = on_error
self._on_close = on_close
self._connection = None
self._hub = None
self.is_connected = False
########################
#### PUBLIC METHODS ####
########################
def connect(self):
"""
Connects to the signalR server.
Raises
------
pyhomebroker.exceptions.SessionException
If the user is not logged in.
"""
if not self._auth.is_user_logged_in:
raise SessionException('User is not logged in')
url = '{}/signalr/hubs'.format(self._auth.broker['page'])
with rq.Session() as session:
rq.utils.add_dict_to_cookiejar(session.cookies, self._auth.cookies)
if self._proxies:
session.proxies.update(self._proxies)
self._connection = Connection(url, session)
self._hub = self._connection.register_hub('stockpriceshub')
self._hub.client.on('broadcast', self.__internal_securities_options_repos)
self._hub.client.on('sendStartStockFavoritos', self.__internal_personal_portfolio)
self._hub.client.on('sendStockFavoritos', self.__internal_personal_portfolio)
self._hub.client.on('sendStartStockPuntas', self.__internal_order_book)
self._hub.client.on('sendStockPuntas', self.__internal_order_book)
if self._on_error:
self._connection.error += self._on_error
self._connection.exception += self.__on_internal_exception
self._connection.start()
self.is_connected = self._connection.is_open
if self.is_connected and self._on_open:
self._on_open()
def disconnect(self):
"""
Disconnects from the signalR server.
Raises
------
pyhomebroker.exceptions.SessionException
If the user is not logged in.
If the connection or hub is not assigned.
"""
if not self._auth.is_user_logged_in:
raise SessionException('User is not logged in')
if not self._connection or not self._hub:
raise SessionException('Connection or hub is not assigned')
if self._connection.is_open:
self._connection.close()
self._connection = None
self._hub = None
self.is_connected = False
if self._on_close:
self._on_close()
def join_group(self, group_name):
"""
Subscribe to a group to start receiving event notifications.
Raises
------
pyhomebroker.exceptions.SessionException
If the user is not logged in.
If the connection or hub is not assigned.
If the connection is not open.
"""
if not self._auth.is_user_logged_in:
raise SessionException('User is not logged in')
if not self._connection or not self._hub:
raise SessionException('Connection or hub is not assigned')
if not self._connection.is_open:
raise SessionException('Connection is not open')
self._hub.server.invoke('JoinGroup', group_name)
def quit_group(self, group_name):
"""
Unsubscribe from a group to stop receiving event notifications.
Raises
------
pyhomebroker.exceptions.SessionException
If the user is not logged in.
If the connection or hub is not assigned.
If the connection is not open.
"""
if not self._auth.is_user_logged_in:
raise SessionException('User is not logged in')
if not self._connection or not self._hub:
raise SessionException('Connection or hub is not assigned')
if not self._connection.is_open:
raise SessionException('Connection is not open')
self._hub.server.invoke('QuitGroup', group_name)
#########################
#### PRIVATE METHODS ####
#########################
def __internal_personal_portfolio(self, data):
try: # Handle any exception processing the information or triggered by the user code
if self._on_personal_portfolio:
if data and not isinstance(data, list):
data = [data]
df = pd.DataFrame(data if data else pd.DataFrame())
self._on_personal_portfolio(helper.process_personal_portfolio(df))
except Exception as ex:
if self._on_error:
try: # Catch user exceptions inside the except block (Inception Mode Activated :D)
self._on_error(ex)
except:
pass
def __internal_securities_options_repos(self, data):
try: # Handle any exception processing the information or triggered by the user code
df = pd.DataFrame(data) if data else pd.DataFrame()
df_repo = df[df.Group == 'cauciones-']
df_options = df[df.Group == 'opciones-']
df_securities = df[(df.Group != 'cauciones-') & (df.Group != 'opciones-')]
if len(df_repo) and self._on_repos:
self._on_repos(helper.process_repos(df_repo))
if len(df_options) and self._on_options:
self._on_options(helper.process_options(df_options))
if len(df_securities) and self._on_securities:
self._on_securities(helper.process_securities(df_securities))
except Exception as ex:
if self._on_error:
try: # Catch user exceptions inside the except block (Inception Mode Activated :D)
self._on_error(ex)
except:
pass
def __internal_order_book(self, data):
try: # Handle any exception processing the information or triggered by the user code
if self._on_order_book and data:
symbol = data['Symbol']
settlement = data['Term']
if data['StockDepthBox'] and data['StockDepthBox']['PriceDepthBox']:
df_buy = pd.DataFrame(data['StockDepthBox']['PriceDepthBox']['BuySide'])
df_sell = pd.DataFrame(data['StockDepthBox']['PriceDepthBox']['SellSide'])
else:
df_buy = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 18:54:29 2019
@author: suvodeepmajumder
"""
import sys
sys.path.append("..")
from pygit2 import clone_repository
from pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE,GIT_MERGE_ANALYSIS_UP_TO_DATE,GIT_MERGE_ANALYSIS_FASTFORWARD,GIT_MERGE_ANALYSIS_NORMAL,GIT_RESET_HARD
from pygit2 import Repository
import shutil,os
import pygit2
from git_log import git2repo
import os
import re
import shlex
import numpy as np
import pandas as pd
from glob2 import glob, iglob
import subprocess as sp
import understand as und
from pathlib import Path
from pdb import set_trace
import sys
from collections import defaultdict
from utils.utils import utils
import platform
from os.path import dirname as up
from multiprocessing import Pool, cpu_count
import threading
from multiprocessing import Queue
from threading import Thread
import random
import string
#from main.utils.utils.utils import printProgressBar
# class ThreadWithReturnValue(Thread):
# def __init__(self, group=None, target=None, name=None,
# args=(), kwargs={}, Verbose=None):
# Thread.__init__(self, group, target, name, args, kwargs)
# self._return = None
# def run(self):
# #print(type(self._target))
# if self._target is not None:
# self._return = self._target(*self._args,
# **self._kwargs)
# def join(self, *args):
# Thread.join(self, *args)
# return self._return
class MetricsGetter(object):
"""
Generate class, file, function, object oriented metrics for a project.
Parameters
----------
sources_path: str or pathlib.PosixPath
Notes
-----
The class is designed to run in conjunction with a context manager.
"""
def __init__(self,repo_url,repo_name,repo_lang,code_path):
self.repo_url = repo_url
self.repo_name = repo_name
self.repo_lang = repo_lang
#self.repo_obj = git2repo.git2repo(self.repo_url,self.repo_name)
self.root_dir = code_path
print("root:",self.root_dir)
if platform.system() == 'Darwin' or platform.system() == 'Linux':
self.repo_path = self.root_dir+ '/commit_guru/ingester/CASRepos/git/' + self.repo_name
self.file_path = up(self.root_dir) + '/data/commit_guru/' + self.repo_name + '.csv'
#self.committed_file = up(os.getcwd()) + '/data/committed_files/' + self.repo_name + '_committed_file.pkl'
self.und_file = up(self.root_dir) + '/data/understand_files/' + self.repo_name + '_understand.csv'
else:
self.repo_path = up(os.getcwd()) + '\\temp_repo\\' + self.repo_name
self.file_path = up(os.getcwd()) + '\\data\\commit_guru\\' + self.repo_name + '.pkl'
#self.committed_file = up(os.getcwd()) + '\\data\\committed_files\\' + self.repo_name + '_committed_file.pkl'
self.buggy_clean_pairs = self.read_commits()
# Reference current directory, so we can go back after we are done.
self.cwd = Path('/home/suvodeep/Documents/AI4SE/Data_Miner/github/API_V3/suvodeep/')
self.cores = cpu_count()
#self.repo = self.clone_repo()
# if self.repo == None:
# raise ValueError
# else:
# print(self.repo)
# Generate path to store udb files
print("cwd",self.cwd)
self.udb_path = self.cwd.joinpath("temp", "udb/"+self.repo_name)
print("udb at init",self.udb_path)
# Create a folder to hold the udb files
if not self.udb_path.is_dir():
os.makedirs(self.udb_path)
# Generate source path where the source file exist
#self.source_path = self.cwd.joinpath(
# ".temp", "sources", self.repo_name)
def clone_repo(self):
git_path = pygit2.discover_repository(self.repo_path)
if git_path is not None:
self.repo = pygit2.Repository(git_path)
return self.repo
self.repo = None
return self.repo
def read_commits(self):
df = | pd.read_csv(self.file_path) | pandas.read_csv |
# Lint as: python3
"""Tests for main_heatmap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import metric_history
import numpy as np
import pandas as pd
SAMPLE_LOGS_LINK = 'https://console.cloud.google.com/logs?project=xl-ml-test&advancedFilter=resource.type%3Dk8s_container%0Aresource.labels.project_id%3Dxl-ml-test%0Aresource.labels.location=us-central1-b%0Aresource.labels.cluster_name=xl-ml-test%0Aresource.labels.namespace_name=automated%0Aresource.labels.pod_name:pt-1.5-cpp-ops-func-v2-8-1587398400&dateRangeUnbound=backwardInTime'
class MetricHistoryTest(parameterized.TestCase):
def test_make_plots_nothing_oob(self):
input_df = pd.DataFrame({
'test_name': pd.Series(['test1', 'test1', 'test1', 'test1']),
'metric_name': pd.Series(['acc', 'loss', 'acc', 'loss']),
'run_date': pd.Series(['2020-04-21', '2020-04-20', '2020-04-20',
'2020-04-21']),
'metric_value': pd.Series([99.1, 0.5, 99.2, 0.6]),
'metric_upper_bound': pd.Series([np.nan, 1.0, np.nan, 1.0]),
'metric_lower_bound': pd.Series([99.0, np.nan, 99.0, np.nan]),
'logs_link': pd.Series([SAMPLE_LOGS_LINK] * 4),
'job_status': pd.Series(['success', 'success', 'success', 'success']),
})
# There should be 2 plots: 1 per metric. Neither should be outlined in red
# since neither metric was oob.
plots = metric_history.make_plots('test1', '', input_df)
self.assertEqual(len(plots), 2)
self.assertItemsEqual([plot.title.text for plot in plots], ['loss', 'acc'])
self.assertNotEqual(plots[0].outline_line_color, 'red')
self.assertNotEqual(plots[1].outline_line_color, 'red')
def test_make_plots_with_oob(self):
input_df = pd.DataFrame({
'test_name': | pd.Series(['test1', 'test1', 'test1', 'test1']) | pandas.Series |
import numpy as np
import pandas as pd
import csv
from settings import *
chunks_path=os.path.join(CHUNKS_PATH, 'train')
tracks = os.listdir(chunks_path)
trackwise_energy = | pd.DataFrame(columns=['Name','Vocals','Accompaniment','Drums','Bass','Other']) | pandas.DataFrame |
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
import pandas as pd
from scikitplot.estimators import plot_learning_curve
from scikitplot.metrics import plot_precision_recall
from sklearn.base import is_regressor, is_classifier
from sklearn.cluster import KMeans
from sklearn.metrics import explained_variance_score, max_error, mean_absolute_error, r2_score, \
precision_recall_fscore_support
from yellowbrick.classifier import ClassificationReport, ConfusionMatrix, ROCAUC, ClassPredictionError
from yellowbrick.cluster import SilhouetteVisualizer, KElbowVisualizer
from yellowbrick.model_selection import FeatureImportances
from yellowbrick.regressor import ResidualsPlot, PredictionError, CooksDistance
import neptune.new as neptune
def create_regressor_summary(regressor, X_train, X_test, y_train, y_test, nrows=1000, log_charts=True):
"""Create sklearn regressor summary.
This method creates regressor summary that includes:
* all regressor parameters,
* pickled estimator (model),
* test predictions,
* test scores,
* model performance visualizations.
Returned ``dict`` can be assigned to the run's namespace defined by the user (see example below).
Regressor should be fitted before calling this function.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The regression target for training
y_test (:obj:`ndarray`):
| The regression target for testing
nrows (`int`, optional, default is 1000):
| Log first ``nrows`` rows of test predictions.
log_charts (:bool:, optional, default is ``True``):
| If ``True``, calculate and log chart visualizations.
|
| NOTE: calculating visualizations is potentially expensive depending on input data and regressor, and
| may take some time to finish.
|
| This is equivalent to calling ``log_learning_curve_chart``, ``log_feature_importance_chart``,
| ``log_residuals_chart``, ``log_prediction_error_chart``, ``log_cooks_distance_chart``
| functions from this module.
Returns:
``dict`` with all summary items.
Examples:
Log random forest regressor summary.
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['random_forest/summary'] = npt_utils.create_regressor_summary(rfr, X_train, X_test, y_train, y_test)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
reg_summary = dict()
reg_summary['all_params'] = get_estimator_params(regressor)
reg_summary['pickled_model'] = get_pickled_model(regressor)
y_pred = regressor.predict(X_test)
reg_summary['test'] = {'preds': get_test_preds(regressor, X_test, y_test, y_pred=y_pred, nrows=nrows),
'scores': get_scores(regressor, X_test, y_test, y_pred=y_pred)}
if log_charts:
reg_summary['diagnostics_charts'] = {
'learning_curve': create_learning_curve_chart(regressor, X_train, y_train),
'feature_importance': create_feature_importance_chart(regressor, X_train, y_train),
'residuals': create_residuals_chart(regressor, X_train, X_test, y_train, y_test),
'prediction_error': create_prediction_error_chart(regressor, X_train, X_test, y_train, y_test),
'cooks_distance': create_cooks_distance_chart(regressor, X_train, y_train)}
return reg_summary
def create_classifier_summary(classifier, X_train, X_test, y_train, y_test, nrows=1000, log_charts=True):
"""Create sklearn classifier summary.
This method creates classifier summary that includes:
* all classifier parameters,
* pickled estimator (model),
* test predictions,
* test predictions probabilities,
* test scores,
* model performance visualizations.
Returned ``dict`` can be assigned to the run's namespace defined by the user (see example below).
Classifier should be fitted before calling this function.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
nrows (`int`, optional, default is 1000):
| Log first ``nrows`` rows of test predictions and predictions probabilities.
log_charts (:bool:, optional, default is ``True``):
| If True, calculate and send chart visualizations.
|
| NOTE: calculating visualizations is potentially expensive depending on input data and classifier, and
| may take some time to finish.
|
| This is equivalent to calling ``log_classification_report_chart``, ``log_confusion_matrix_chart``,
| ``log_roc_auc_chart``, ``log_precision_recall_chart``, ``log_class_prediction_error_chart``
| functions from this module.
Returns:
``dict`` with all summary items.
Examples:
Log random forest classifier summary.
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['random_forest/summary'] = npt_utils.create_classifier_summary(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
cls_summary = dict()
cls_summary['all_params'] = get_estimator_params(classifier)
cls_summary['pickled_model'] = get_pickled_model(classifier)
y_pred = classifier.predict(X_test)
cls_summary['test'] = {'preds': get_test_preds(classifier, X_test, y_test, y_pred=y_pred, nrows=nrows),
'preds_proba': get_test_preds_proba(classifier, X_test, nrows=nrows),
'scores': get_scores(classifier, X_test, y_test, y_pred=y_pred)}
if log_charts:
cls_summary['diagnostics_charts'] = {
'classification_report': create_classification_report_chart(classifier, X_train, X_test, y_train, y_test),
'confusion_matrix': create_confusion_matrix_chart(classifier, X_train, X_test, y_train, y_test),
'ROC_AUC': create_roc_auc_chart(classifier, X_train, X_test, y_train, y_test),
'precision_recall': create_precision_recall_chart(classifier, X_test, y_test),
'class_prediction_error': create_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test)}
return cls_summary
def create_kmeans_summary(model, X, nrows=1000, **kwargs):
"""Create sklearn kmeans summary.
This method fit KMeans model to data and logs:
* all kmeans parameters,
* cluster labels,
* clustering visualizations: KMeans elbow chart and silhouette coefficients chart.
Returned ``dict`` can be assigned to the run's namespace defined by the user (see example below).
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
model (:obj:`KMeans`):
| KMeans object.
X (:obj:`ndarray`):
| Training instances to cluster.
nrows (`int`, optional, default is 1000):
| Number of rows to log in the cluster labels.
kwargs:
KMeans parameters.
Returns:
``dict`` with all summary items.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
km = KMeans(n_init=11, max_iter=270)
X, y = make_blobs(n_samples=579, n_features=17, centers=7, random_state=28743)
run = neptune.init(project='my_workspace/my_project')
run['kmeans/summary'] = npt_utils.create_kmeans_summary(km, X)
"""
assert isinstance(model, KMeans), 'model should be sklearn KMeans instance'
kmeans_summary = dict()
model.set_params(**kwargs)
kmeans_summary['all_params'] = get_estimator_params(model)
kmeans_summary['cluster_labels'] = get_cluster_labels(model, X, nrows=nrows, **kwargs)
kmeans_summary['diagnostics_charts'] = {
'kelbow': create_kelbow_chart(model, X, **kwargs),
'silhouette': create_silhouette_chart(model, X, **kwargs)}
return kmeans_summary
def get_estimator_params(estimator):
"""Get estimator parameters.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator from which to log parameters.
Returns:
``dict`` with all parameters mapped to their values.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
run = neptune.init(project='my_workspace/my_project')
run['estimator/params'] = npt_utils.get_estimator_params(rfr)
"""
assert is_regressor(estimator) or is_classifier(estimator) or isinstance(estimator, KMeans),\
'Estimator should be sklearn regressor, classifier or kmeans clusterer.'
return estimator.get_params()
def get_pickled_model(estimator):
"""Get pickled estimator.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to pickle.
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
run = neptune.init(project='my_workspace/my_project')
run['estimator/pickled_model'] = npt_utils.get_pickled_model(rfr)
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
return neptune.types.File.as_pickle(estimator)
def get_test_preds(estimator, X_test, y_test, y_pred=None, nrows=1000):
"""Get test predictions.
If you pass ``y_pred``, then predictions are not computed from ``X_test`` data.
Estimator should be fitted before calling this function.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to compute predictions.
X_test (:obj:`ndarray`):
| Testing data matrix.
y_test (:obj:`ndarray`):
| Target for testing.
y_pred (:obj:`ndarray`, optional, default is ``None``):
| Estimator predictions on test data.
nrows (`int`, optional, default is 1000):
| Number of rows to log.
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
run = neptune.init(project='my_workspace/my_project')
run['estimator/pickled_model'] = npt_utils.compute_test_preds(rfr, X_test, y_test)
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
assert isinstance(nrows, int), 'nrows should be integer, {} was passed'.format(type(nrows))
preds = None
if y_pred is None:
y_pred = estimator.predict(X_test)
# single output
if len(y_pred.shape) == 1:
df = pd.DataFrame(data={'y_true': y_test, 'y_pred': y_pred})
df = df.head(n=nrows)
preds = neptune.types.File.as_html(df)
# multi output
if len(y_pred.shape) == 2:
df = | pd.DataFrame() | pandas.DataFrame |
import requests
import json
import pandas
# To check login status
loggedin = 0
baseurl = 'http://127.0.0.1:8000/api'
# baseurl = 'http://sc17crk.pythonanywhere.com/api'
def main():
while True:
print("\n\nPlease select the command to execute.")
print("1. To register, type 'register'")
print("2. To login, type 'login url', with required argument(s).")
print("3. To logout, type 'logout'")
print("4. To view a list of all module instances and the professors, type 'list'")
print("5. To view the rating of all professors, type 'view'")
print(
"6. To view the average rating of a certain professor in a certain module, type 'average professor_id module_code' with required argument(s).")
print(
"7. To rate the teaching of a certain professor in a cetain module, type 'rate professor_id module_code year semester rating'with required argument(s).")
print("\n")
option = input("Please select the menu : ")
userinput = option.split()
if userinput[0] == 'register':
register()
elif userinput[0] == 'login':
if len(userinput) == 2:
login(userinput[1])
else:
print('Specify login url.')
elif userinput[0] == 'logout':
logout()
elif userinput[0] == 'list':
list()
elif userinput[0] == 'view':
view()
elif userinput[0] == 'average':
if len(userinput) == 3:
average(userinput[1], userinput[2])
else:
print('Specify the professor ID and module code.')
elif userinput[0] == 'rate':
if len(userinput) == 6:
rate(userinput[1], userinput[2], userinput[3], userinput[4], userinput[5])
else:
print('Specify the professor ID, module code, year, semester and rating.')
def register():
session = requests.Session()
# prompt the user for details
username = input("Enter username : ")
email = input("Enter email : ")
password = input("Enter password : ")
# send a request to api along with data
# url = 'http://sc17crk.pythonanywhere.com/api/register/'
url = baseurl + '/register/'
post_data = {
'username': username,
'email': email,
'password': password
}
# send a request to api
r = session.post(url, data=post_data)
# print(r.status_code)
print(r.content)
def login(user_url):
global loggedin
session = requests.Session()
# prompt the user for deatils
username = input("Enter username : ")
password = input("Enter password : ")
# send a request to api along with data
url = user_url+'api/login/'
post_data = {
'username': username,
'password': password
}
# send a request to api
r = session.post(url, data=post_data)
if r.status_code == 200:
loggedin = 1
print(r.content)
# print(r.status_code)
# send logout request
def logout():
global loggedin
session = requests.Session()
url = baseurl + '/logout/'
r = session.get(url)
loggedin = 0
# print(r.status_code)
print(r.content)
def list():
session = requests.Session()
# sending request
url = baseurl + '/list/'
r = session.get(url)
# parsing objects
parsed = json.loads(r.text)
module_list = parsed['module_list']
print_list = []
for i in module_list:
moduleobjects = {
'Code': i.get('module_code'),
'Name': i.get('name'),
'Year': i.get('year'),
'Semester': i.get('semester'),
'Professor': i.get('professor_id') + ', Professor ' + i.get('first_name') + '. ' + i.get('last_name')
}
print_list.append(moduleobjects)
print("=" * 80)
print( | pandas.DataFrame(print_list) | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.types.common import (is_integer,
is_float,
is_object_dtype,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_int64,
_ensure_object)
from pandas.types.dtypes import PeriodDtype
from pandas.types.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
import pandas._period as period
from pandas._period import (Period, IncompatibleFrequency,
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas.core.base import _shared_docs
from pandas.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.lib import infer_dtype
import pandas.tslib as tslib
from pandas.compat import zip, u
import pandas.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self._values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self._values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = getattr(self._values, opname)(other._values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
func = getattr(self._values, opname)
result = func(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'qyear', 'freq',
'days_in_month', 'daysinmonth',
'to_timestamp', 'asfreq', 'start_time', 'end_time',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, dtype=None,
**kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=copy)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if is_scalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = _ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq) for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = _ensure_object(data)
if freq is None:
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data._values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data._values,
base1, base2, 1)
else:
if is_object_dtype(data):
inferred = infer_dtype(data)
if inferred == 'integer':
data = data.astype(np.int64)
if freq is None and is_object_dtype(data):
# must contain Period instance and thus extract ordinals
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
if freq is None:
msg = 'freq not specified and cannot be inferred'
raise ValueError(msg)
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
data = _ensure_object(data)
data = period.extract_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if (len(values) > 0 and is_float_dtype(values)):
raise TypeError("PeriodIndex can't take floats")
else:
return cls(values, name=name, freq=freq, **kwargs)
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, **kwargs):
if kwargs.get('freq') is None:
# freq must be provided
kwargs['freq'] = self.freq
if values is None:
values = self._values
return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
return False
@property
def asi8(self):
return self._values.view('i8')
@cache_readonly
def _int64index(self):
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
def values(self):
return self.asobject.values
@property
def _values(self):
return self._data
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.asobject.values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
pass
elif (func is np.subtract):
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
"""
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._values[mask].searchsorted(where_idx._values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._values < self._values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
if copy:
return self._int64index.copy()
else:
return self._int64index
elif is_datetime64_dtype(dtype):
return self.to_timestamp(how=how)
elif is_datetime64tz_dtype(dtype):
return self.to_timestamp(how=how).tz_localize(dtype.tz)
elif is_period_dtype(dtype):
return self.asfreq(freq=dtype.freq)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
@Substitution(klass='PeriodIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, compat.string_types):
value = Period(value, freq=self.freq).ordinal
return self._values.searchsorted(value, side=side, sorter=sorter)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.tseries.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period.period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
new_data[self._isnan] = tslib.iNaT
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
"""
DEPRECATED: use :meth:`to_timestamp` instead.
Cast to DatetimeIndex.
"""
warnings.warn("to_datetime is deprecated. Use self.to_timestamp(...)",
FutureWarning, stacklevel=2)
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10,
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9,
"The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11,
"The number of days in the month")
daysinmonth = days_in_month
@property
def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
return tslib._isleapyear_arr(self.year)
@property
def start_time(self):
return self.to_timestamp(how='start')
@property
def end_time(self):
return self.to_timestamp(how='end')
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.asobject.values
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data._values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return nanos // offset_nanos
elif isinstance(other, offsets.DateOffset):
freqstr = other.rule_code
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
elif isinstance(other, np.ndarray):
if is_integer_dtype(other):
return other
elif is_timedelta64_dtype(other):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if (nanos % offset_nanos).all() == 0:
return nanos // offset_nanos
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
msg = "Input has different freq from PeriodIndex(freq={0})"
raise IncompatibleFrequency(msg.format(self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self.shift(ordinal_delta)
def _sub_datelike(self, other):
if other is tslib.NaT:
new_data = np.empty(len(self), dtype=np.int64)
new_data.fill(tslib.iNaT)
return TimedeltaIndex(new_data, name=self.name)
return NotImplemented
def _sub_period(self, other):
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
asi8 = self.asi8
new_data = asi8 - other.ordinal
if self.hasnans:
new_data = new_data.astype(np.float64)
new_data[self._isnan] = np.nan
# result must be Int64Index or Float64Index
return Index(new_data, name=self.name)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
Returns
-------
shifted : PeriodIndex
"""
values = self._values + n * self.freq.n
if self.hasnans:
values[self._isnan] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@cache_readonly
def dtype(self):
return PeriodDtype.construct_from_string(self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = com._values_from_object(series)
try:
return com._maybe_box(self,
super(PeriodIndex, self).get_value(s, key),
series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
vals = self._values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self._values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = _ensure_index(target)
if hasattr(target, 'freq') and target.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr)
raise IncompatibleFrequency(msg)
if isinstance(target, PeriodIndex):
target = target.asi8
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
return Index.get_indexer(self._int64index, target, method,
limit, tolerance)
def _get_unique_index(self, dropna=False):
"""
wrap Index._get_unique_index to handle NaT
"""
res = super(PeriodIndex, self)._get_unique_index(dropna=dropna)
if dropna:
res = res.dropna()
return res
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if | is_integer(key) | pandas.types.common.is_integer |
"""Backtester"""
from copy import deepcopy
import unittest
import pandas as pd
import pytest
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from soam.constants import (
ANOMALY_PLOT,
DS_COL,
FIG_SIZE,
MONTHLY_TIME_GRANULARITY,
PLOT_CONFIG,
Y_COL,
)
from soam.models.prophet import SkProphet
from soam.plotting.forecast_plotter import ForecastPlotterTask
from soam.workflow import (
Backtester,
BaseDataFrameTransformer,
Forecaster,
Transformer,
compute_metrics,
)
from soam.workflow.backtester import METRICS_KEYWORD, PLOT_KEYWORD, RANGES_KEYWORD
from tests.helpers import sample_data_df # pylint: disable=unused-import
def test_compute_metrics():
"""Function to compute performance metrics."""
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
expected_output = {'mae': 0.5, 'mse': 0.375}
output = compute_metrics(y_true, y_pred, metrics)
unittest.TestCase().assertDictEqual(expected_output, output)
class SimpleProcessor(BaseDataFrameTransformer):
"""Create a Simple Processor object."""
def __init__(self, **fit_params): # pylint:disable=super-init-not-called
self.preproc = StandardScaler(**fit_params)
def fit(self, df_X):
self.preproc.fit(df_X[Y_COL].values.reshape(-1, 1))
return self
def transform(self, df_X, inplace=True):
if not inplace:
df_X = df_X.copy()
df_X[Y_COL] = self.preproc.transform(df_X[Y_COL].values.reshape(-1, 1)) + 10
return df_X
def assert_backtest_fold_result_common_checks(rv, ranges=None, plots=None):
"""Backtest fold result common checks assertion."""
assert tuple(rv) == (RANGES_KEYWORD, METRICS_KEYWORD, PLOT_KEYWORD)
assert rv[RANGES_KEYWORD] == ranges
assert rv[PLOT_KEYWORD].name == plots
def assert_backtest_fold_result(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
for metric_name, values in metrics.items():
assert metric_name in rv[METRICS_KEYWORD]
if isinstance(values, dict):
for measure_name, value in values.items():
assert value, pytest.approx(rv[METRICS_KEYWORD][measure_name], 0.01)
else:
assert values, pytest.approx(rv[METRICS_KEYWORD][metric_name], 0.01)
def assert_backtest_all_folds_result(rvs, expected_values):
"""Backtest all fold result assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result(rv, **evs)
def assert_backtest_fold_result_aggregated(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result aggregated assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
output_metrics = pd.DataFrame(rv[METRICS_KEYWORD])
expected_metrics = pd.DataFrame(metrics)
pd.testing.assert_frame_equal(output_metrics, expected_metrics, rtol=1e-1)
def assert_backtest_all_folds_result_aggregated(rvs, expected_values):
"""Backtest all fold result aggregated assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result_aggregated(rv, **evs)
def test_integration_backtester_single_fold(
tmp_path, sample_data_df
): # pylint: disable=redefined-outer-name
"""Backtest single fold integration test."""
test_window = 10
train_data = sample_data_df
forecaster = Forecaster(model=SkProphet(), output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2016-05-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 0.19286372252777645, 'mse': 0.07077117049346579},
'plots': '0_forecast_2013020100_2015080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
def test_integration_backtester_multi_fold(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.140921182444867, 'mse': 2.4605768804352675},
'plots': '0_forecast_2013020100_2015080100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2015-08-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.600049020613293, 'mse': 4.383723067139095},
'plots': '0_forecast_2015080100_2018020100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2018-02-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 3.1358162976127217, 'mse': 12.666965373730687},
'plots': '0_forecast_2018020100_2020080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
# TODO: It maybe a good visual aggregation to include all metrics in one plot. This
# TODO: is not possible with the current implementation.
def test_integration_backtester_multi_fold_default_aggregation(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold default aggregation integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation="default",
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {
'mae': {
'avg': 2.0269522786354313,
'max': 3.135813436023453,
'min': 1.344995687583762,
},
'mse': {
'avg': 6.761216280050696,
'max': 12.666927167728852,
'min': 3.233004063171241,
},
},
'plots': '0_forecast_2018020100_2020080100_.png',
}
]
assert_backtest_all_folds_result_aggregated(rvs, expected_values)
def test_integration_backtester_multi_fold_custom_aggregations(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold custom aggregation integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
aggregation = {
METRICS_KEYWORD: {
"weighted_begining": lambda metrics_list: (
sum(
[
3 * val if idx == 0 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
"weighted_ending": lambda metrics_list: (
sum(
[
3 * val if idx == len(metrics_list) - 1 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
},
PLOT_KEYWORD: 1,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation=aggregation,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {
'mae': {
'weighted_begining': 1.631725773112123,
'weighted_ending': 2.4296838191792647,
},
'mse': {
'weighted_begining': 4.886483816435117,
'weighted_ending': 8.969039213753284,
},
},
'plots': '0_forecast_2015080100_2018020100_.png',
}
]
assert_backtest_all_folds_result_aggregated(rvs, expected_values)
def test_integration_backtester_multi_fold_custom_metric_aggregation_default_plot(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold custom metric aggregation default plot integration test."""
test_window = 30
train_data = | pd.concat([sample_data_df] * 3) | pandas.concat |
import math
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.offline import plot
import networkx as nx
from parsl.monitoring.visualization.utils import timestamp_to_int, num_to_timestamp, DB_DATE_FORMAT
def task_gantt_plot(df_task, df_status, time_completed=None):
# if the workflow is not recorded as completed, then assume
# that tasks should continue in their last state until now,
# rather than the workflow end time.
if not time_completed:
time_completed = df_status['timestamp'].max()
df_task = df_task.sort_values(by=['task_id'], ascending=False)
parsl_tasks = []
for i, task in df_task.iterrows():
task_id = task['task_id']
description = "Task ID: {}, app: {}".format(task['task_id'], task['task_func_name'])
statuses = df_status.loc[df_status['task_id'] == task_id].sort_values(by=['timestamp'])
last_status = None
for j, status in statuses.iterrows():
if last_status is not None:
last_status_bar = {'Task': description,
'Start': last_status['timestamp'],
'Finish': status['timestamp'],
'Resource': last_status['task_status_name']
}
parsl_tasks.extend([last_status_bar])
last_status = status
# TODO: factor with above?
if last_status is not None:
last_status_bar = {'Task': description,
'Start': last_status['timestamp'],
'Finish': time_completed,
'Resource': last_status['task_status_name']
}
parsl_tasks.extend([last_status_bar])
# colours must assign a colour value for every state name defined
# in parsl/dataflow/states.py
colors = {'unsched': 'rgb(240, 240, 240)',
'pending': 'rgb(168, 168, 168)',
'launched': 'rgb(100, 255, 255)',
'running': 'rgb(0, 0, 255)',
'dep_fail': 'rgb(255, 128, 255)',
'failed': 'rgb(200, 0, 0)',
'exec_done': 'rgb(0, 200, 0)',
'memo_done': 'rgb(64, 200, 64)',
'fail_retryable': 'rgb(200, 128,128)'
}
fig = ff.create_gantt(parsl_tasks,
title="",
colors=colors,
group_tasks=True,
show_colorbar=True,
index_col='Resource',
)
fig['layout']['yaxis']['title'] = 'Task'
fig['layout']['yaxis']['showticklabels'] = False
fig['layout']['xaxis']['title'] = 'Time'
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
def task_per_app_plot(task, status):
try:
task['epoch_time_running'] = (pd.to_datetime(
task['task_try_time_running']) - pd.Timestamp("1970-01-01")) // | pd.Timedelta('1s') | pandas.Timedelta |
import datetime
import os
import numpy as np
import pandas as pd
import pytz
from tqdm import tqdm
import bb_utils
from bb_utils.meta import BeeMetaInfo
def load_circadian_df(curta_scratch_path):
import slurmhelper
META = BeeMetaInfo()
data = slurmhelper.SLURMJob(
"circadiansine5", os.path.join(curta_scratch_path, "dormagen", "slurm")
)
circadian_df = []
for kwargs, results in tqdm(data.items(ignore_open_jobs=True)):
if results is None:
continue
for (subsample, date, bee_id), bee_data in results.items():
bee_row = dict()
def add_dict(d, prefix=""):
for key, val in d.items():
if type(val) is not dict:
bee_row[prefix + key] = val
if key == "parameters":
if len(val) == 3:
amplitude, phase, offset = val
bee_row[prefix + "amplitude"] = amplitude
bee_row[prefix + "phase"] = phase
bee_row[prefix + "offset"] = offset
bee_row[prefix + "base_activity"] = offset - abs(amplitude)
elif len(val) == 2:
amplitude, phase = val
bee_row[prefix + "amplitude"] = amplitude
bee_row[prefix + "phase"] = phase
if key == "constant_parameters":
mean = val[0]
bee_row[prefix + "mean"] = mean
continue
else:
add_dict(val, prefix=prefix + key + "_")
add_dict(bee_data)
circadian_df.append(bee_row)
circadian_df = pd.DataFrame(circadian_df)
circadian_df.describe()
circadian_df.subsample.fillna(0, inplace=True)
circadian_df = circadian_df[circadian_df.date < datetime.datetime(2016, 9, 1, tzinfo=pytz.UTC)]
circadian_df["is_good_fit"] = (circadian_df.goodness_of_fit > 0.1).astype(np.float)
circadian_df["is_circadian"] = (circadian_df.p_value < 0.05).astype(np.float)
circadian_df["well_tested_circadianess"] = circadian_df.is_circadian * circadian_df.is_good_fit
circadian_df = circadian_df[~pd.isnull(circadian_df.amplitude)]
circadian_df = circadian_df[~ | pd.isnull(circadian_df.r_squared) | pandas.isnull |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformat()
def load_member_summaries(
source_dir="data_for_graph/members",
filename="company_check",
# concat_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
dfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv")
print ("reading summary from", summary_filename)
dfs.append(pd.read_csv(summary_filename, index_col=0).rename(columns={"database_id": "id"}))
summaries = pd.concat(dfs)
# if concat_uk_sector:
# member_uk_sectors = pd.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].map(ast.literal_eval)
# summaries = summaries.join(member_uk_sectors, on="member_name", how="left")
return summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = pd.read_csv(f"{source_dir}/all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.iterrows():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0)
commerces = commerces.drop_duplicates("commerce_name")
i = 0
for _, row in commerces.iterrows():
commerce = row["commerce_name"]
category = row["commerce_category"]
print ("creating document for commerce", commerce)
document = {
"_key": str(i),
"name": commerce,
"commerce": commerce,
"category": category,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_members(
cols_of_interest=[
"id",
"member_name",
"website",
"about_company",
"membership_level",
"tenancies",
"badges",
"accreditations",
"sectors", # add to member as list
"buys",
"sells",
"sic_codes",
"directors",
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
],
db=None):
'''
CREATE AND POPULATE MEMBER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
members = load_member_summaries(concat_uk_sector=False)
members = members[cols_of_interest]
members = members.drop_duplicates("member_name") # ensure no accidental duplicates
members = members.loc[~pd.isnull(members["tenancies"])]
members["about_company"] = members["about_company"].map(remove_html_tags, na_action="ignore")
members = members.sort_values("member_name")
i = 0
for _, row in members.iterrows():
member_name = row["member_name"]
if pd.isnull(member_name):
continue
document = {
"_key" : str(i),
"name": member_name,
**{
k: (row[k].split(separator) if not pd.isnull(row[k]) and k in {"sectors", "buys", "sells"}
else ast.literal_eval(row[k]) if not pd.isnull(row[k]) and k in {
"UK_sectors",
"UK_divisions",
"UK_groups",
"UK_classes",
"sic_codes",
"directors",
}
else cast_to_float(row[k]) if k in {"Cash_figure","NetWorth_figure","TotalCurrentAssets_figure","TotalCurrentLiabilities_figure"}
else row[k] if not pd.isnull(row[k])
else None)
for k in cols_of_interest
},
}
if not pd.isnull(row["directors"]):
directors_ = ast.literal_eval(row["directors"])
directors = []
for director in directors_:
if pd.isnull(director["director_name"]):
continue
if not pd.isnull(director["director_date_of_birth"]):
director["director_date_of_birth"] = insert_space(director["director_date_of_birth"], 3)
directors.append(director)
else:
directors = []
document["directors"] = directors
assert not | pd.isnull(row["tenancies"]) | pandas.isnull |
import pandas as pd
import time
from collections import OrderedDict
class StatsOperation(object):
SUB = "substract"
ADD = "add"
class StatsCollectorException(Exception):
pass
class StatsCollector(object):
def __init__(self, iters=None, disable_instance=False):
self._iters = iters
self._iteration_mode = True if iters is not None else False
self._enabled = False
self._iter_dict = OrderedDict()
self._stats_df = None
self._report_cols = []
self._disable_instance = disable_instance
self._report_tuples = []
def enable(self):
if self._disable_instance:
return
self._iter_dict.clear()
self._enabled = True
def disable(self):
if self._disable_instance:
return
for tup in self._report_tuples:
if tup[2] == StatsOperation.SUB:
self._iter_dict[tup[0]] = self._iter_dict[tup[1]] - self._iter_dict[tup[3]]
elif tup[2] == StatsOperation.ADD:
self._iter_dict[tup[0]] = self._iter_dict[tup[1]] + self._iter_dict[tup[3]]
if self._stats_df is None:
self._stats_df = | pd.DataFrame(self._iter_dict, index=[0]) | pandas.DataFrame |
import streamlit as st
from src.implem.orchester import AdmissionGroup
from src.implem.plots import DistPlots
from pandas import DataFrame, concat
from .utils import EXP_UNIT_GROUPS, create_batches, show_ledger
def show_coparison_params():
col1, col2 = st.columns(2)
with col1:
batch_size = st.number_input(
'admission batch size', value=30, min_value=1,
max_value=100
)
with col2:
bootstrap_n_iterations = st.number_input('number of iterations', value=100, min_value=1)
return bootstrap_n_iterations, batch_size
def calc_ward_ledger(ward_num, admissions_init: int, iterations_init: int, bootstrap_n_iterations: int, batch_size: int,roll ):
admissions = AdmissionGroup(ward_num)
admissions.calc_init_parameters(
iterations_init=iterations_init,
admissions_init=admissions_init,
)
create_batches(
admissions,
admission_add=0,
batch_size=batch_size,
batches_cnt=bootstrap_n_iterations
)
df = DataFrame(admissions.ledger).T
df["ward_num"] = ward_num
df["rolling_sd"] = df["estimated_mean"].rolling(roll).std()
df["rolling_mean"] = df["estimated_mean"].rolling(roll).mean()
df["rolling_cv"] = df["rolling_sd"] / df["rolling_mean"] * 100
admissions_size = len(admissions.admissions)
return df.query("admissions_total_size < @admissions_size")
def ward_comparison(admissions_init: int, iterations_init: int):
ledger_df = | DataFrame() | pandas.DataFrame |
#Python wrapper / library for Einstein Analytics API
import sys
import browser_cookie3
import requests
import json
import time
import datetime
from dateutil import tz
import pandas as pd
import numpy as np
import re
from pandas import json_normalize
from decimal import Decimal
import base64
import csv
import unicodecsv
from unidecode import unidecode
import math
class salesforceEinsteinAnalytics(object):
def __init__(self, env_url, browser):
self.env_url = env_url
try:
if browser == 'chrome':
cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect "https://"
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
elif browser == 'firefox':
cj = browser_cookie3.firefox(domain_name=env_url[8:])
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
else:
print('Please select a valid browser (chrome or firefox)')
sys.exit(1)
except:
print('ERROR: Could not get session ID. Make sure you are logged into a live Salesforce session (chrome/firefox).')
sys.exit(1)
#set timezone for displayed operation start time
def get_local_time(self, add_sec=None, timeFORfile=False):
curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
if add_sec is not None:
return (curr_time + datetime.timedelta(seconds=add_sec)).strftime("%I:%M:%S %p")
elif timeFORfile == True:
return curr_time.strftime("%m_%d_%Y__%I%p")
else:
return curr_time.strftime("%I:%M:%S %p")
def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False):
params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name}
dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params)
dataset_df = json_normalize(json.loads(dataset_json.text)['datasets'])
#check if the user wants to seach by API name or label name
if search_type == 'UI Label':
dataset_df = dataset_df[dataset_df['label'] == dataset_name]
else:
dataset_df = dataset_df[dataset_df['name'] == dataset_name]
#show user how many matches that they got. Might want to use exact API name if getting multiple matches for label search.
if verbose == True:
print('Found '+str(dataset_df.shape[0])+' matching datasets.')
#if dataframe is empty then return not found message or return the dataset ID
if dataset_df.empty == True:
print('Dataset not found. Please check name or API name in Einstein Analytics.')
sys.exit(1)
else:
dsnm = dataset_df['name'].tolist()[0]
dsid = dataset_df['id'].tolist()[0]
#get dataset version ID
r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header)
dsvid = json.loads(r.text)['currentVersionId']
return dsnm, dsid, dsvid
def run_saql_query(self, saql, save_path=None, verbose=False):
'''
This function takes a saql query as an argument and returns a dataframe or saves to csv
The query can be in JSON form or can be in the UI SAQL form
load statements must have the appropreate spaces: =_load_\"datasetname\";
'''
if verbose == True:
start = time.time()
print('Checking SAQL and Finding Dataset IDs...')
print('Process started at: '+str(self.get_local_time()))
saql = saql.replace('\"','\\"') #convert UI saql query to JSON format
#create a dictionary with all datasets used in the query
load_stmt_old = re.findall(r"(= load )(.*?)(;)", saql)
load_stmt_new = load_stmt_old.copy()
for ls in range(0,len(load_stmt_new)):
load_stmt_old[ls] = ''.join(load_stmt_old[ls])
dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\"',''), verbose=verbose)
load_stmt_new[ls] = ''.join(load_stmt_new[ls])
load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid)
#update saql with dataset ID and version ID
for i in range(0,len(load_stmt_new)):
saql = saql.replace(load_stmt_old[i], load_stmt_new[i])
saql = saql.replace('\\"','\"')
if verbose == True:
print('Running SAQL Query...')
#run query and return dataframe or save as csv
payload = {"query":saql}
r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) )
df = json_normalize(json.loads(r.text)['results']['records'])
if save_path is not None:
if verbose == True:
print('Saving result to CSV...')
df.to_csv(save_path, index=False)
if verbose == True:
end = time.time()
print('Dataframe saved to CSV...')
print('Completed in '+str(round(end-start,3))+'sec')
return df
else:
if verbose == True:
end = time.time()
print('Completed in '+str(round(end-start,3))+'sec')
return df
def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None):
'''
version number goes backwards 0 = current version 20 is max oldest version.
Typically best practice to run the function and view the history first before supplying a version number.
'''
#get broken dashboard version history
r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header)
history_df = json_normalize(json.loads(r.text)['histories'])
if save_json_path is not None and version_num is not None:
preview_link = history_df['previewUrl'].tolist()[version_num]
r_restore = requests.get(self.env_url+preview_link, headers=self.header)
with open(save_json_path, 'w', encoding='utf-8') as f:
json.dump(r_restore.json(), f, ensure_ascii=False, indent=4)
elif version_num is not None:
payload = { "historyId": history_df['id'].tolist()[version_num] }
fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload))
else:
return history_df
def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3):
if verbose == True:
start = time.time()
progress_counter = 0
print('Getting app user list and access details...')
print('Process started at: '+str(self.get_local_time()))
if app_id is None:
'''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST
ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF')
Proposed Solution is to add a try/except block to handle the error
'''
attempts = 0
while attempts < max_request_attempts:
try:
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header)
response = json.loads(r.text)
total_size = response['totalSize']
next_page = response['nextPageUrl']
app_user_df = pd.DataFrame()
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
for app in response['folders']:
attempts = 0
while attempts < max_request_attempts:
try:
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header)
users = json.loads(r.text)['shares']
for u in users:
app_user_df = app_user_df.append( { "AppId": app['id'],
"AppName": app['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
#continue to pull data from next page
attempts = 0 # reset attempts for additional pages
while next_page is not None:
if verbose == True:
progress_counter += 25
print('Progress: '+str(round(progress_counter/total_size*100,1))+'%')
while attempts < max_request_attempts:
try:
np = requests.get(self.env_url+next_page, headers=self.header)
response = json.loads(np.text)
next_page = response['nextPageUrl']
break
except KeyError:
next_page = None
print(sys.exc_info()[0])
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
while attempts < max_request_attempts:
try:
for app in response['folders']:
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header)
users = json.loads(r.text)['shares']
for u in users:
app_user_df = app_user_df.append( { "AppId": app['id'],
"AppName": app['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
elif app_id is not None:
if type(app_id) is list or type(app_id) is tuple:
for app in app_id:
app_user_df = | pd.DataFrame() | pandas.DataFrame |
'''
convert json.gz file to dataframe
http://jmcauley.ucsd.edu/data/amazon/links.html
'''
file_paths = ['loc_Clothing_Shoes_and_Jewelry.json.gz',
'loc_Toys_and_Games.json.gz',
'loc_Cell_Phones_and_Accessories.json.gz',
'globF_Grocery_and_Gourmet_Food.json.gz'
]
print(f"\nwe will process the following files...\n")
print(*file_paths, sep="\n")
print()
def process_data(file_path):
def parse(path):
g = gzip.open(path, 'r')
for l in g:
yield json.loads(l)
#
import pandas as pd
import gzip
import json
#
def parse(path):
g = gzip.open(path, 'rb')
for l in g:
yield json.loads(l)
#
def getDF(path):
i = 0
df = {}
for d in parse(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
#
df = getDF(file_path)
df = df.dropna()
#
df_to_save = | pd.DataFrame() | pandas.DataFrame |
# Inputs: Database containing:
# - origin-destination pairs (table)
# - a subset of the destinations that contain services of interest (table)
# Maximum duration of walking
# Output: Table containing O-D pairs only for the destinations of interest
import pandas as pd
import numpy as np
import sqlite3
import code
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# file names and parameters
db_fn = '../query_results/combined-data_5km_with_hssa.db'
db_fn = '../query_results/sea_hospital_5km.db'
max_dur = 30*60 # 30 minutes
# run main function
subsetDatabase(db_fn, max_dur)
db.close()
def subsetDatabase(db_fn, max_dur):
# create connection to the database
logger.info('subsetting the database')
db = sqlite3.connect(db_fn)
cursor = db.cursor()
# create a dataframe with only the relevant O-D pairs
if 'destsubset' not in getTabNames(db):
createSubsetDataframe(cursor, max_dur)
db.commit()
# calculate walk scores for origins
calcWalkScores(cursor, db, max_dur)
print('finished!!')
def calcWalkScores(cursor, db, max_dur):
# calculate the walk score for each origin
# get np.DataFrame of orig ids
logger.info('calculating walk scores')
orig_ids = getTable(cursor, 'orig', [0, 4], ['orig_id', 'pop_over_65'])
scores_dict = {}
# initialize the amount of people for each contract
contract_per_cap = {}
contract_data = getTable(cursor, 'contracts', [0, 3], ['ContractNo', 'TotalBudgt'])
for i in range(contract_data.shape[0]):
contract_per_cap[contract_data.ix[i,'ContractNo']] = {'amt' : contract_data.ix[i,'TotalBudgt'], 'ppl' : 0}
# initialize dictionary to store contracts for each origin
orig_contracts = {}
# Loop through each origin id
for i in range(orig_ids.shape[0]):
if i % 100 == 0:
print('i = {} / {}'.format(i, orig_ids.shape[0]))
# find all services within 30min of this orig
services_pd = getVendorsForOrig(orig_ids.ix[i, 'orig_id'], cursor).drop_duplicates()
# initialize contract list for orig i
orig_contracts[orig_ids.ix[i,'orig_id']] = {'contracts' : [], 'pop' : orig_ids.ix[i, 'pop_over_65']}
# loop through the services
for j in range(services_pd.shape[0]):
# get the duration to this service
tmp = cursor.execute('''SELECT walking_time FROM destsubset
WHERE dest_id={} AND orig_id={}'''
.format(services_pd.ix[j, 'dest_id'], orig_ids.ix[i, 'orig_id']))
duration = tmp.fetchall()
# add to data frame
services_pd.ix[j, 'walking_time'] = duration[0][0]
# add origin pop to the services funding count
contract_per_cap[services_pd.ix[j, 'ContractNo']]['ppl'] += orig_ids.ix[i,'pop_over_65']
# add contract id to the origin's contracts
orig_contracts[orig_ids.ix[i,'orig_id']]['contracts'].append(services_pd.ix[j, 'ContractNo'])
# CALCULATE WALKING SCORE
score = calcHSSAScore(services_pd, cursor, max_dur)
scores_dict[orig_ids.ix[i,'orig_id']] = {'HSSA' : score}
# code.interact(local=locals())
# calculate per capita spending for each contract
contract_per_cap = calcPerCapSpending(contract_data, contract_per_cap)
# calculate spending per origin (update the scores dictionary with this data)
scores_dict = calcOrigFunding(orig_contracts, contract_per_cap, scores_dict)
# add scores to database
HSSAs = [val['HSSA'] for val in scores_dict.values()]
investments = [val['investment'] for val in scores_dict.values()]
# scores_pd = pd.DataFrame({'orig_id' : list(scores_dict.keys()), 'score' : HSSAs, 'investment' : investments})
scores_pd = pd.DataFrame({'orig_id' : list(scores_dict.keys()), 'investment' : investments})
# normalize the scores
# scores_pd['score'] = (100 * scores_pd['score'].divide(max(scores_pd['score']))).astype(int)
print('...normalized the scores')
code.interact(local=locals())
WriteDB(scores_pd, db, 'investment')
db.commit()
def calcOrigFunding(orig_contracts, contract_per_cap, scores_dict):
'''
calculate the amount of funding (per capita) to be apportioned to each origin
using the contracts that each orign has within their walkshed
and the per capita funding of each service
add this to scores_dict
'''
output_dict = {}
for orig_id, orig_data in orig_contracts.items():
orig_spending = 0
for contract_id in orig_data['contracts']:
per_cap_spend = contract_per_cap[contract_id]['per_cap']
orig_spending += per_cap_spend * orig_data['pop']
scores_dict[orig_id].update({'investment' : orig_spending})
return(scores_dict)
def calcPerCapSpending(contract_data, contract_per_cap):
# for each contract, create key for per capita spending and add to dictionary
for i in range(contract_data.shape[0]):
dict_i = contract_per_cap[contract_data.ix[i,'ContractNo']]
# calculate per capita spending
if dict_i['ppl']:
d_per_cap = {'per_cap' : dict_i['amt'] / dict_i['ppl']}
else:
d_per_cap = {'per_cap' : 0}
dict_i.update(d_per_cap)
return(contract_per_cap)
def WriteDB(df, db, col_name):
'''
Add table to db
'''
logger.info('Writing to DB')
#Initialize connections to .db
cursor = db.cursor()
# code.interact(local=locals())
# add column
# col_name = attr['field']
add_col_str = "ALTER TABLE orig ADD COLUMN {} REAL".format(col_name)
db.execute(add_col_str)
for i in range(len(df)):
add_data_str = "UPDATE orig SET {} =(?) WHERE orig_id=(?)".format(col_name)
value = df.values[i][1]
idx = df.values[i][0]
db.execute(add_data_str, (value, idx))
# commit
db.commit()
# logger.info('Complete')
def calcHSSAScore(services, cursor, max_dur):
'''
Calculate the HSSA score for a given origin
Note: this code is adapted from Logan Noel's code
https://github.com/GeoDaCenter/contracts/blob/master/analytics/ScoreModel.py
'''
WEIGHTS = [.1, .25, .5, .75, 1]
weight_dict = {}
score = 0
for i in range(services.shape[0]):
# cat = VendorLookup(cursor, services.ix[i, 'ContractNo'], 'Project')
cat = services.ix[i, 'Project']
if cat not in weight_dict:
weight_dict[cat] = WEIGHTS
if len(weight_dict[cat]) > 0:
variety_weight = weight_dict[cat].pop()
else:
variety_weight = 0
distance_weight = linearDecayFunction(services.ix[i, 'walking_time'], max_dur)
# calculate score
score += variety_weight * distance_weight * services.ix[i,'TotalBudgt']
return(score)
def linearDecayFunction(time, upper):
# penalty function for distance
# taken from https://github.com/GeoDaCenter/contracts/blob/master/analytics/ScoreModel.py
upper = float(upper)
time = float(time)
if time > upper:
return 0
else:
return (upper - time) / upper
# def VendorLookup(cursor, id, kind):
# # look up the value for a specific record, such as Project or TotalBudgt
# # Note: this code is adapted from <NAME>'s code
# # https://github.com/GeoDaCenter/contracts/blob/master/analytics/ScoreModel.py
# query = "SELECT {} FROM contracts WHERE ContractNo is {}".format(kind, id)
# data = cursor.execute(query).fetchone()
# return(data)
def getVendorsForOrig(orig_id, cursor):
# get all of the vendors within reach of a given origin point
# note - doesn't actually get the duration (creates a column with 'None')
tmp = cursor.execute('''SELECT * FROM contracts
WHERE dest_id IN
(SELECT dest_id FROM destsubset WHERE orig_id={})'''
.format(orig_id))
services_tuple = tmp.fetchall()
# convert to pandas data frame
services_list = [x for x in services_tuple]
services_pd = pd.DataFrame(services_list, columns=getColNames(cursor, 'contracts'))
# add column for duration
services_pd['walking_time'] = None
return(services_pd)
def createSubsetDataframe(cursor, max_dur):
# create a pandas dataframe containing the O-D pairs for destinations that contain services
# and adds it to the database
# # get list of dest id's that contain the services of interest
# tmp = cursor.execute("SELECT dest_id FROM contracts")
# service_dest_ids_tuple = tmp.fetchall()
# service_dest_ids = [x[0] for x in service_dest_ids_tuple]
# # filter the database to O-D pairs with duration < specified time
# tmp = cursor.execute("SELECT * FROM origxdest WHERE walking_time < {}".format(max_dur))
logger.info('subsetting the walking results table')
tmp = cursor.execute('''SELECT * FROM walking
WHERE duration < {}
AND dest_id IN (SELECT dest_id FROM contracts)'''.format(max_dur))
#
od_pairs = tmp.fetchall()
#
# create pandas dataframe
data_list = [[row[0], row[1], row[2]] for row in od_pairs]
od_pairs = pd.DataFrame(data_list, columns=['orig_id', 'dest_id', 'walking_time'])
# write this as a table in the database...
# strings
cols_str = "orig_id VARCHAR (15), dest_id VARCHAR (15), walking_time INT"
col_names = ['orig_id', 'dest_id', 'walking_time']
# convert index back to column and format data frame
# od_pairs_subset['dest_id'] = od_pairs_subset.index
# od_pairs_subset = od_pairs_subset[['orig_id', 'dest_id', 'walking_time']]
# add to data base
addPdToDb(od_pairs, cursor, 'destsubset', cols_str, col_names)
return
def addPdToDb(d_frame, cursor, new_table_name, cols_str, col_names):
# add a pandas dataframe (d_frame) to a database (db)
# NOTE: this code is not generalizable (it adds the 3rd column as an int)
# create new table
add_table_str = "CREATE TABLE {}({})".format(new_table_name, cols_str)
cursor.execute(add_table_str)
# add data
add_data_str = "INSERT INTO {}({}) VALUES(?,?,?)".format(new_table_name, ', '.join(col_names))
for i in range(d_frame.shape[0]):
# cursor.execute(add_data_str, (d_frame.ix[i,:]))
cursor.execute(add_data_str, (d_frame.ix[i,0],d_frame.ix[i,1],int(d_frame.ix[i,2])))
def addLatLon(d_frame, cursor, table_name):
# add lat and lon columns to the data
# retrieve the lat and lon from table_name
# match the lat/lon to the d_frame using 'orig_id' column name
# NOTE: this assumes there are three columns in 'table_name' corresponding to id, lon, and lat
# get the lat/lon data
lat_lon_pd = getTable(cursor, table_name, [0,1,2], ['id', 'lon', 'lat'])
# tmp = cursor.execute("SELECT * FROM {}".format(table_name))
# tuple_data = tmp.fetchall()
# # turn into pandas dataframe
# data_list = [[row[0], row[1], row[2]] for row in tuple_data]
# lat_lon_pd = pd.DataFrame(data_list, columns=['id', 'lon', 'lat'])
lat_lon_pd.set_index('id', inplace=True)
# match to the input dataframe
# CHECK THIS! -- does it work with 'id' as index
d_frame_combined = | pd.merge(d_frame, lat_lon_pd, left_on='orig_id', right_on='id') | pandas.merge |
# define all classes for image and result showing and labeling
import copy
import json
from numbers import Integral
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas
from PyQt5.QtCore import pyqtSignal, Qt
from PyQt5.QtWidgets import QApplication
from matplotlib import path, patches, colors
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.image import AxesImage
from matplotlib.lines import Line2D
from matplotlib.patches import Ellipse, Rectangle, PathPatch, Patch
from matplotlib.widgets import ToolHandles, AxesWidget
from skimage import color
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_DRAW = Qt.CrossCursor
CURSOR_SELECT = Qt.ClosedHandCursor
class Canvas(FigureCanvas):
update_data = pyqtSignal(list)
gray_data = pyqtSignal(list)
new_page = pyqtSignal()
slice_link = pyqtSignal(int)
grey_link = pyqtSignal(list)
zoomRequest = pyqtSignal(int)
scrollRequest = pyqtSignal(int, int)
newShape = pyqtSignal()
selectionChanged = pyqtSignal(bool)
deleteEvent = pyqtSignal()
MARK, SELECT = list(range(2))
def __init__(self, param, parent=None):
self.figure = plt.figure()
FigureCanvas.__init__(self, self.figure)
self.setParent(parent)
self.figure.set_facecolor("black") # white region outsides the (ax)dicom image
self.voxel = param.get('image')
self.shape = param.get('shape')
self.Y = param.get('color')
self.Z = param.get('hatch')
self.mode = param.get('mode')
self.cmap = param.get('cmap')
self.hmap = param.get('hmap')
self.trans = param.get('trans')
self.param = param
with open('configGUI/lastWorkspace.json', 'r') as json_data:
lastState = json.load(json_data)
self.dim = lastState["Dim"][0]
if self.mode == 1 or self.mode == 4 or self.mode == 7:
self.slices = self.voxel.shape[-1]
self.ind = self.slices // 2
elif self.mode == 2 or self.mode == 5 or self.mode == 8:
self.slices = self.voxel.shape[-3]
self.ind = self.slices // 2
elif self.mode == 3 or self.mode == 6 or self.mode == 9:
self.slices = self.voxel.shape[-2]
self.ind = self.slices // 2
self.time = 0
try:
self.timemax = self.voxel.shape[-5]
except:
self.timemax = 1
self.depth = 0
try:
self.depthmax = self.voxel.shape[-4]
except:
self.depthmax = 1
self.x_clicked = None
self.y_clicked = None
self.wheel_clicked = False
self.wheel_roll = False
self.ax1 = self.figure.add_subplot(111)
self.artist_list = []
self.mask_class = None
# for marking labels
# 1 = left mouse button
# 2 = center mouse button(scroll wheel)
# 3 = right mouse button
self.cursor2D = Cursor(self.ax1, useblit=True, color='blue', linestyle='dashed')
self.cursor2D.set_active(True)
self.toggle_selector_RS = RectangleSelector(self.ax1, self.rec_onselect, button=[1], drawtype='box',
useblit=True,
minspanx=5, minspany=5, spancoords='pixels')
self.toggle_selector_ES = EllipseSelector(self.ax1, self.ell_onselect, drawtype='box', button=[1], minspanx=5,
useblit=True, minspany=5, spancoords='pixels')
self.toggle_selector_LS = LassoSelector(self.ax1, self.lasso_onselect, useblit=True, button=[1])
self.toggle_selector_ES.set_active(False)
self.toggle_selector_RS.set_active(False)
self.toggle_selector_LS.set_active(False)
self.toggle_selector()
self.figure.canvas.mpl_connect('key_press_event', self.press_event)
self.figure.canvas.mpl_connect('button_press_event', self.mouse_clicked)
self.figure.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self.figure.canvas.mpl_connect('button_release_event', self.mouse_release)
self.figure.canvas.mpl_connect('scroll_event', self.on_scroll)
self.figure.canvas.mpl_connect('pick_event', self.selectShape)
self.emitlist = []
self.emitlist.append(self.ind)
self.emitlist.append(self.slices)
self.emitlist.append(self.time)
self.emitlist.append(self.timemax)
self.emitlist.append(self.depth)
self.emitlist.append(self.depthmax)
self.gchange = []
self.gchange.append(0)
self.gchange.append(0)
# 2 elements
self.labelmode = self.SELECT
self.current = None
self.selectedShape = None # save the selected shape here
self._cursor = CURSOR_DEFAULT
self.is_open_dialog = False
self.shapeList = []
self.background = None
self.to_draw = None
self.picked = False
self.moveEvent = None
self.pressEvent = None
self._corner_order = ['NW', 'NE', 'SE', 'SW']
self._edge_order = ['W', 'N', 'E', 'S']
self.active_handle = None
self._extents_on_press = None
self.maxdist = 10
self.labelon = False
self.legendon = True
self.selectedshape_name = None
self.aspect = 'equal' # auto or equal
self.image_array = None
self.flipimage = 0
if bool(self.flipimage % 4):
self.view_flip_image()
else:
self.view_image()
def set_open_dialog(self, value):
self.is_open_dialog = value
def get_open_dialog(self):
return self.is_open_dialog
def on_scroll(self, event):
self.wheel_roll = True
if event.button == 'up':
self.ind = (self.ind + 1)
self.slice_link.emit(0)
else:
self.ind = (self.ind - 1)
self.slice_link.emit(1)
self.after_scroll()
def after_scroll(self):
if self.ind >= self.slices:
self.ind = 0
if self.ind <= -1:
self.ind = self.slices - 1
self.ax1.clear()
self.shapeList.clear()
self.emitlist[0] = self.ind
self.update_data.emit(self.emitlist)
if bool(self.flipimage % 4):
self.view_flip_image()
else:
self.view_image()
def timechange(self):
if self.time >= self.timemax:
self.time = 0
if self.time <= -1:
self.time = self.timemax - 1
self.emitlist[2] = self.time
self.update_data.emit(self.emitlist)
if bool(self.flipimage % 4):
self.view_flip_image()
else:
self.view_image()
def depthchange(self):
if self.depth >= self.depthmax:
self.depth = 0
if self.depth <= -1:
self.depth = self.depthmax - 1
self.emitlist[4] = self.depth
self.update_data.emit(self.emitlist)
if bool(self.flipimage % 4):
self.view_flip_image()
else:
self.view_image()
def press_event(self, event):
self.v_min, self.v_max = self.pltc.get_clim()
if event.key == 'w':
self.wheel_roll = True
self.ind = (self.ind + 1)
self.slice_link.emit(0)
self.after_scroll()
elif event.key == 'q':
self.wheel_roll = True
self.ind = (self.ind - 1)
self.slice_link.emit(1)
self.after_scroll()
elif event.key == 'left':
self.wheel_clicked = True
self.factor1 = -20
self.factor2 = -20
self.after_adjust()
elif event.key == 'right':
self.wheel_clicked = True
self.factor1 = 20
self.factor2 = 20
self.after_adjust()
elif event.key == 'down':
self.wheel_clicked = True
self.factor1 = 20
self.factor2 = -20
self.after_adjust()
elif event.key == 'up':
self.wheel_clicked = True
self.factor1 = -20
self.factor2 = 20
self.after_adjust()
elif event.key == '1':
# keyboard 1
self.time = self.time - 1
self.timechange()
elif event.key == '2':
# keyboard 2
self.time = self.time + 1
self.timechange()
elif event.key == '3':
# keyboard 3
self.depth = self.depth - 1
self.depthchange()
elif event.key == '4':
# keyboard 4
self.depth = self.depth + 1
self.depthchange()
elif event.key == 'enter':
self.is_open_dialog = True
shapelist = []
if self.label_shape() == 1:
shapelist.append(self.toggle_selector_RS.get_select())
elif self.label_shape == 2:
shapelist.append(self.toggle_selector_ES.get_select())
elif self.label_shape == 3:
shapelist.append(self.toggle_selector_LS.get_select())
for j in shapelist:
for i in j:
if i not in shapelist:
self.shapeList.append(i)
for i in self.ax1.patches:
if type(i) is Rectangle or Ellipse:
i.set_picker(True)
else:
i.set_picker(False)
self.newShape.emit()
elif event.key == 'delete':
self.to_draw.set_visible(False)
self._center_handle = None
self._edge_handles = None
self._corner_handles = None
try:
canvas = self.selectedShape.get_figure().canvas
canvas.draw_idle()
except:
pass
self.df = pandas.read_csv('Markings/marking_records.csv')
self.df = self.df.drop(self.df.index[self.selectind])
self.df.to_csv('Markings/marking_records.csv', index=False)
self.deSelectShape()
self.deleteEvent.emit()
def after_adjust(self):
if (float(self.factor1 - self.factor2)) / (self.v_max - self.factor1 + 0.001) > 1:
nmb = (float(self.factor1 - self.factor2)) / (self.v_max - self.factor1 + 0.001) + 1
self.factor1 = (float(self.factor1 - self.factor2)) / nmb * (self.factor1 / (self.factor1 - self.factor2))
self.factor2 = (float(self.factor1 - self.factor2)) / nmb * (self.factor2 / (self.factor1 - self.factor2))
self.v_min += self.factor1
self.v_max += self.factor2
self.gchange[0] = self.factor1
self.gchange[1] = self.factor2
self.grey_link.emit(self.gchange) ###
self.pltc.set_clim(vmin=self.v_min, vmax=self.v_max)
self.graylist[0] = self.v_min
self.graylist[1] = self.v_max
self.gray_data.emit(self.graylist)
self.figure.canvas.draw()
self.wheel_clicked = False
def set_aspect(self, aspect):
self.aspect = aspect
if bool(self.flipimage % 4):
self.view_flip_image()
else:
self.view_image()
def get_aspect(self):
return self.aspect
def view_image(self):
if self.mode == 1:
self.ax1.axis('off')
try:
img = np.swapaxes(self.voxel[self.time, self.depth, :, :, self.ind], 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
self.image_array = self.voxel[self.time, self.depth, :, :, self.ind]
except:
img = np.swapaxes(self.voxel[:, :, self.ind], 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
self.image_array = self.voxel[:, :, self.ind]
self.draw_idle()
elif self.mode == 2:
self.ax1.axis('off')
try:
img = np.swapaxes(self.voxel[self.time, self.depth, self.ind, :, :], 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.time, self.depth, self.ind, :, :]
except:
img = np.swapaxes(self.voxel[self.ind, :, :], 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.ind, :, :]
self.draw_idle()
elif self.mode == 3:
self.ax1.axis('off')
try:
img = np.swapaxes(self.voxel[self.time, self.depth, :, self.ind, :], 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.time, self.depth, :, self.ind, :]
except:
img = np.swapaxes(self.voxel[:, self.ind, :], 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[:, self.ind, :]
self.draw_idle()
elif self.mode == 4:
self.ax1.axis('off')
try:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.voxel[self.time, self.depth, :, :, self.ind]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
self.image_array = self.voxel[self.time, self.depth, :, :, self.ind]
if not self.Y == []:
mask_shape = list(self.voxel[self.time, self.depth, :, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.time, self.depth, :, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
mask_shape = list(self.voxel[self.time, self.depth, :, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.time, self.depth, :, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.voxel[self.time, self.depth, :, :, self.ind]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, :, :, self.ind]
self.im2 = self.ax1.imshow(img, aspect=self.aspect,
cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.time, self.depth, :, :, self.ind]
self.im3 = self.ax1.contourf(img,
cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
except:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.voxel[:, :, self.ind]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0])
self.image_array = self.voxel[:, :, self.ind]
if not self.Y == []:
mask_shape = list(self.voxel[:, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][:, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0])
mask_shape = list(self.voxel[:, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][:, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.voxel[:, :, self.ind]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][:, :, self.ind]
self.im2 = self.ax1.imshow(img, cmap=local_cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][:, :, self.ind]
self.im3 = self.ax1.contourf(img, cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
self.draw_idle()
elif self.mode == 5:
self.ax1.axis('off')
try:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.voxel[self.time, self.depth, self.ind, :, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.time, self.depth, self.ind, :, :]
if not self.Y == []:
mask_shape = list(self.voxel[self.time, self.depth, self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.time, self.depth, self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, extent=[0, img.shape[1], img.shape[0], 0], aspect=self.aspect, alpha=self.trans)
mask_shape = list(self.voxel[self.time, self.depth, self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.time, self.depth, self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.voxel[self.time, self.depth, self.ind, :, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.time, self.depth, self.ind, :, :]
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, self.ind, :, :]
self.im2 = self.ax1.imshow(img, cmap=local_cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.time, self.depth, self.ind, :, :]
self.im3 = self.ax1.contourf(img, cmap=local_cmap,
alpha=self.trans, extent=[0, img.shape[1], 0, img.shape[0]])
except:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.voxel[self.ind, :, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.ind, :, :]
if not self.Y == []:
mask_shape = list(self.voxel[self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, extent=[0, img.shape[1], img.shape[0], 0], aspect=self.aspect, alpha=self.trans)
mask_shape = list(self.voxel[self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, aspect=self.aspect, alpha=self.trans, extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.voxel[self.ind, :, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.ind, :, :]
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][self.ind, :, :]
self.im2 = self.ax1.imshow(img, cmap=local_cmap,
alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.ind, :, :]
self.im3 = self.ax1.contourf(img, cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
self.draw_idle()
elif self.mode == 6:
self.ax1.axis('off')
try:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.voxel[self.time, self.depth, :, self.ind, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.time, self.depth, :, self.ind, :]
if not self.Y == []:
mask_shape = list(self.voxel[self.time, self.depth, :, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.time, self.depth, :, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, extent=[0, img.shape[1], img.shape[0], 0], aspect=self.aspect, alpha=self.trans)
mask_shape = list(self.voxel[self.time, self.depth, :, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.time, self.depth, :, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.voxel[self.time, self.depth, :, self.ind, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[self.time, self.depth, :, self.ind, :]
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, :, self.ind, :]
self.im2 = self.ax1.imshow(img, cmap=local_cmap, aspect=self.aspect,
alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.time, self.depth, :, self.ind, :]
self.im3 = self.ax1.contourf(img, cmap=local_cmap,
alpha=self.trans, extent=[0, img.shape[1], 0, img.shape[0]])
except:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.voxel[:, self.ind, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[:, self.ind, :]
if not self.Y == []:
mask_shape = list(self.voxel[:, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][:, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, extent=[0, img.shape[1], img.shape[0], 0], aspect=self.aspect, alpha=self.trans)
mask_shape = list(self.voxel[:, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][:, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.voxel[:, self.ind, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.image_array = self.voxel[:, self.ind, :]
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][:, self.ind, :]
self.im2 = self.ax1.imshow(img, cmap=local_cmap, aspect=self.aspect,
alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][:, self.ind, :]
self.im3 = self.ax1.contourf(img, cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
self.draw_idle()
elif self.mode == 7:
self.ax1.axis('off')
try:
img = self.voxel[self.time, self.depth, :, :, self.ind]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, self.ind, :, :]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, aspect=self.aspect, alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
except:
img = self.voxel[:, :, self.ind]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if not self.Y == []:
img = self.Y[-1][:, :, self.ind]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
self.draw_idle()
elif self.mode == 8:
self.ax1.axis('off')
try:
img = self.voxel[self.time, self.depth, self.ind, :, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, self.ind, :, :]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
except:
img = self.voxel[self.ind, :, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][self.ind, :, :]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
self.draw_idle()
elif self.mode == 9:
self.ax1.axis('off')
try:
img = self.voxel[self.time, self.depth, :, self.ind, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, :, self.ind, :]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
except:
img = self.voxel[:, self.ind, :]
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][:, self.ind, :, 0, 0]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
self.draw_idle()
self.wheel_roll = False
v_min, v_max = self.pltc.get_clim()
self.graylist = []
self.graylist.append(v_min)
self.graylist.append(v_max)
self.new_page.emit()
def view_flip_image(self):
if self.mode == 1:
self.ax1.axis('off')
try:
img = np.swapaxes(self.image_array, 0, 1)
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
except:
img = np.swapaxes(self.image_array, 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
self.draw_idle()
elif self.mode == 2:
self.ax1.axis('off')
try:
img = np.swapaxes(self.image_array, 0, 1)
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
except:
img = np.swapaxes(self.image_array, 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.draw_idle()
elif self.mode == 3:
self.ax1.axis('off')
try:
img = np.swapaxes(self.image_array, 0, 1)
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
except:
img = np.swapaxes(self.image_array, 0, 1)
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
self.draw_idle()
elif self.mode == 4:
self.ax1.axis('off')
try:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.image_array
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if not self.Y == []:
mask_shape = list(self.voxel[self.time, self.depth, :, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.time, self.depth, :, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
mask_shape = list(self.voxel[self.time, self.depth, :, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.time, self.depth, :, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.image_array
self.pltc = self.ax1.imshow(img, aspect=self.aspect,
cmap='gray',
extent=[0, img.shape[1], img.shape[0], 0])
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, :, :, self.ind]
self.im2 = self.ax1.imshow(img, aspect=self.aspect,
cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.time, self.depth, :, :, self.ind]
self.im3 = self.ax1.contourf(img,
cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
except:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if not self.Y == []:
mask_shape = list(self.voxel[:, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][:, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
mask_shape = list(self.voxel[:, :, self.ind].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][:, :, self.ind])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, self.shape[-2], 0, self.shape[-3]])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][:, :, self.ind]
self.im2 = self.ax1.imshow(img, cmap=local_cmap, aspect=self.aspect,
alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
self.im3 = self.ax1.contourf(self.Z[-1][:, :, self.ind], cmap=local_cmap,
alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
self.draw_idle()
elif self.mode == 5:
self.ax1.axis('off')
try:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.image_array
self.pltc = self.ax1.imshow(img, aspect=self.aspect,
cmap='gray',
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
mask_shape = list(self.voxel[self.time, self.depth, self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.time, self.depth, self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0], alpha=self.trans)
mask_shape = list(self.voxel[self.time, self.depth, self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.time, self.depth, self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, self.shape[-2], 0, self.shape[-1]])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.image_array
self.pltc = self.ax1.imshow(img, aspect=self.aspect,
cmap='gray',
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, self.ind, :, :]
self.im2 = self.ax1.imshow(img, aspect=self.aspect,
cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.time, self.depth, self.ind, :, :]
self.im3 = self.ax1.contourf(img,
cmap=local_cmap,
alpha=self.trans, extent=[0, img.shape[1], 0, img.shape[0]])
except:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
mask_shape = list(self.voxel[self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0], alpha=self.trans)
mask_shape = list(self.voxel[self.ind, :, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.ind, :, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][self.ind, :, :]
self.im2 = self.ax1.imshow(img, cmap=local_cmap,
alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.ind, :, :]
self.im3 = self.ax1.contourf(img, cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
self.draw_idle()
elif self.mode == 6:
self.ax1.axis('off')
try:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.image_array
self.pltc = self.ax1.imshow(img, aspect=self.aspect,
cmap='gray',
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
mask_shape = list(self.voxel[self.time, self.depth, :, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][self.time, self.depth, :, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, extent=[0, img.shape[1], img.shape[0], 0], aspect=self.aspect, alpha=self.trans)
mask_shape = list(self.voxel[self.time, self.depth, :, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][self.time, self.depth, :, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.image_array
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.image_array
self.im2 = self.ax1.imshow(img,
cmap=local_cmap,
alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][self.time, self.depth, :, self.ind, :]
self.im3 = self.ax1.contourf(img,
cmap=local_cmap,
alpha=self.trans, extent=[0, img.shape[1], 0, img.shape[0]])
except:
if len(self.cmap) > 1:
artists = []
patch_color_df = pandas.read_csv('configGUI/patch_color.csv')
num_classes = patch_color_df['class'].count()
labels = list(patch_color_df.iloc[0:num_classes]['class'])
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
mask_shape = list(self.voxel[:, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Y[i][:, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
img = self.total_mask
self.im2 = self.ax1.imshow(img, aspect=self.aspect, extent=[0, img.shape[1], img.shape[0], 0], alpha=self.trans)
mask_shape = list(self.voxel[:, self.ind, :].shape)
mask_shape.append(3)
self.total_mask = np.zeros(mask_shape)
for i in range(len(self.cmap)):
mask = color.gray2rgb(self.Z[i][:, self.ind, :])
self.cmap[i] = matplotlib.colors.to_rgb(self.cmap[i])
self.total_mask += mask * self.cmap[i]
artists.append(Patch(facecolor=self.cmap[i], label=labels[i]))
img = self.total_mask
self.im3 = self.ax1.imshow(img, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if self.legendon:
self.ax1.legend(handles=artists, fontsize='x-small')
else:
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
local_cmap = matplotlib.colors.ListedColormap(self.cmap[0])
if not self.Y == []:
img = self.Y[-1][:, self.ind, :]
self.im2 = self.ax1.imshow(img, cmap=local_cmap,
alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
img = self.Z[-1][:, self.ind, :]
self.im3 = self.ax1.contourf(img, cmap=local_cmap, alpha=self.trans,
extent=[0, img.shape[1], 0, img.shape[0]])
self.draw_idle()
elif self.mode == 7:
self.ax1.axis('off')
try:
img = self.image_array
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, self.ind, :, :]
self.im2 = self.ax1.imshow(img,
cmap=self.cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
except:
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
if not self.Y == []:
img = self.Y[-1][:, :, self.ind]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, aspect=self.aspect,
alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
self.draw_idle()
elif self.mode == 8:
self.ax1.axis('off')
try:
img = self.image_array
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, self.ind, :, :]
self.im2 = self.ax1.imshow(img,
cmap=self.cmap, alpha=self.trans, aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0])
except:
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][self.ind, :, :]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, aspect=self.aspect,
alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
self.draw_idle()
elif self.mode == 9:
self.ax1.axis('off')
try:
img = self.image_array
self.pltc = self.ax1.imshow(img,
cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][self.time, self.depth, :, self.ind, :]
self.im2 = self.ax1.imshow(img,
cmap=self.cmap, alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
except:
img = self.image_array
self.pltc = self.ax1.imshow(img, cmap='gray', aspect=self.aspect,
extent=[0, img.shape[1], img.shape[0], 0], interpolation='sinc')
if not self.Y == []:
img = self.Y[-1][:, self.ind, :, 0, 0]
self.im2 = self.ax1.imshow(img, cmap=self.cmap, aspect=self.aspect,
alpha=self.trans,
extent=[0, img.shape[1], img.shape[0], 0])
self.draw_idle()
self.wheel_roll = False
def get_image_array(self):
if self.image_array is not None:
return self.image_array
def set_image_array(self, array):
self.flipimage += 1
self.image_array = array
if bool(self.flipimage % 4):
self.view_flip_image()
else:
self.view_image()
def set_selected(self, shape):
self.deSelectShape()
self.picked = True
self.toggle_selector_ES.set_active(False)
self.toggle_selector_RS.set_active(False)
self.toggle_selector_LS.set_active(False)
self.selectedShape = shape
if type(self.selectedShape) is Rectangle or Ellipse:
self.selectedShape.set_edgecolor('black')
self.draw_idle()
self.set_state(2)
self.edit_selectedShape(self.selectedShape)
elif type(self.selectedShape) is PathPatch:
self.selectedShape.set_edgecolor('black')
self.draw_idle()
self.set_state(2)
self.selectionChanged.emit(True)
def get_selected(self):
return self.selectedShape
def selectShape(self, event):
self.deSelectShape()
self.picked = True
self.toggle_selector_ES.set_active(False)
self.toggle_selector_RS.set_active(False)
self.toggle_selector_LS.set_active(False)
self.selectedShape = event.artist
if type(self.selectedShape) is Rectangle or Ellipse:
self.selectedShape.set_edgecolor('black')
self.draw_idle()
self.set_state(2)
self.edit_selectedShape(self.selectedShape)
elif type(self.selectedShape) is PathPatch:
self.selectedShape.set_edgecolor('black')
self.draw_idle()
self.set_state(2)
self.selectionChanged.emit(True)
def update_selectedShape(self):
self.selectedShape = self.to_draw
plist = np.ndarray.tolist(self.selectedShape.get_path().vertices)
plist = ', '.join(str(x) for x in plist)
self.df = pandas.read_csv('Markings/marking_records.csv')
if not self.df[self.df['artist'] == str(self.selectedShape)].index.values.astype(int) == []:
try:
self.selectind = self.df[self.df['artist'] == str(self.selectedShape)].index.values.astype(int)[0]
except:
self.selectind = 0
else:
pass
color = self.df.iloc[self.selectind]['labelcolor']
if self.labelon:
self.setToolTip(self.selectedshape_name)
if color is np.nan:
color = colors.to_hex('b', keep_alpha=True)
self.df.loc[self.selectind, 'path'] = plist
self.df.to_csv('Markings/marking_records.csv', index=False)
self.selectedShape.set_facecolor(color)
self.selectedShape.set_alpha(0.5)
self.selectionChanged.emit(True)
try:
canvas = self.selectedShape.get_figure().canvas
axes = self.selectedShape.axes
self.background = canvas.copy_from_bbox(self.selectedShape.axes.bbox)
canvas.restore_region(self.background)
axes.draw_artist(self.selectedShape)
axes.draw_artist(self._corner_handles.artist)
axes.draw_artist(self._edge_handles.artist)
axes.draw_artist(self._center_handle.artist)
# blit just the redrawn area
canvas.blit(axes.bbox)
except:
pass
def deSelectShape(self):
self.picked = False
if self.selectedShape is not None:
self.selectedShape.set_edgecolor(None)
self.selectedShape = None
self.toggle_selector_ES.set_active(True)
self.toggle_selector_RS.set_active(True)
self.toggle_selector_LS.set_active(True)
self.draw_idle()
self.selectionChanged.emit(False)
def edit_selectedShape(self, artist):
self.to_draw = artist
xc, yc = self.get_corners()
self._corner_handles = ToolHandles(self.ax1, xc, yc, marker='o',
marker_props=self.toggle_selector_RS.get_props(), useblit=True)
self._corner_handles.set_visible(True)
xe, ye = self.get_edge_centers()
self._edge_handles = ToolHandles(self.ax1, xe, ye, marker='o', marker_props=self.toggle_selector_RS.get_props(),
useblit=True)
self._edge_handles.set_visible(True)
xc, yc = self.get_center()
self._center_handle = ToolHandles(self.ax1, [xc], [yc], marker='o',
marker_props=self.toggle_selector_RS.get_props(), useblit=True)
self._center_handle.set_visible(True)
if self.pressEvent is not None:
c_idx, c_dist = self._corner_handles.closest(self.pressEvent.x, self.pressEvent.y)
e_idx, e_dist = self._edge_handles.closest(self.pressEvent.x, self.pressEvent.y)
m_idx, m_dist = self._center_handle.closest(self.pressEvent.x, self.pressEvent.y)
if m_dist < self.maxdist * 2:
self.active_handle = 'C'
self._extents_on_press = self.extents
elif c_dist > self.maxdist and e_dist > self.maxdist:
self.active_handle = None
return
elif c_dist < e_dist:
self.active_handle = self._corner_order[c_idx]
else:
self.active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
x1, x2, y1, y2 = self.extents
# Switch variables so that only x2 and/or y2 are updated on move.
if self.active_handle in ['W', 'SW', 'NW']:
x1, x2 = x2, self.pressEvent.xdata
if self.active_handle in ['N', 'NW', 'NE']:
y1, y2 = y2, self.pressEvent.ydata
self._extents_on_press = x1, x2, y1, y2
if self.selectedShape is not None:
self.update_selectedShape()
@property
def _rect_bbox(self):
if type(self.selectedShape) is Rectangle:
x0 = self.to_draw.get_x()
y0 = self.to_draw.get_y()
width = self.to_draw.get_width()
height = self.to_draw.get_height()
return x0, y0, width, height
elif type(self.selectedShape) is Ellipse:
x, y = self.to_draw.center
width = self.to_draw.width
height = self.to_draw.height
return x - width / 2., y - height / 2., width, height
def get_corners(self):
"""Corners of rectangle from lower left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
return xc, yc
def get_edge_centers(self):
"""Midpoint of rectangle edges from left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
return xe, ye
def get_center(self):
"""Center of rectangle"""
x0, y0, width, height = self._rect_bbox
return x0 + width / 2., y0 + height / 2.
@property
def extents(self):
"""Return (xmin, xmax, ymin, ymax)."""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
def set_extents(self, extents):
# Update displayed shape
self.draw_shape(extents)
# Update displayed handles
self._corner_handles.set_data(self.get_corners())
self._edge_handles.set_data(self.get_edge_centers())
self._center_handle.set_data(self.get_center())
try:
canvas = self.to_draw.get_figure().canvas
axes = self.to_draw.axes
canvas.restore_region(self.background)
axes.draw_artist(self.to_draw)
axes.draw_artist(self._corner_handles.artist)
axes.draw_artist(self._edge_handles.artist)
axes.draw_artist(self._center_handle.artist)
# blit just the redrawn area
canvas.blit(axes.bbox)
except:
pass
self.df = pandas.read_csv('Markings/marking_records.csv')
if type(self.to_draw) is Rectangle or Ellipse:
self.df.loc[self.selectind, 'artist'] = self.to_draw
self.df.to_csv('Markings/marking_records.csv', index=False)
def draw_shape(self, extents):
if type(self.selectedShape) is Rectangle:
x0, x1, y0, y1 = extents
xmin, xmax = sorted([x0, x1])
ymin, ymax = sorted([y0, y1])
xlim = sorted(self.ax1.get_xlim())
ylim = sorted(self.ax1.get_ylim())
xmin = max(xlim[0], xmin)
ymin = max(ylim[0], ymin)
xmax = min(xmax, xlim[1])
ymax = min(ymax, ylim[1])
self.to_draw.set_x(xmin)
self.to_draw.set_y(ymin)
self.to_draw.set_width(xmax - xmin)
self.to_draw.set_height(ymax - ymin)
elif type(self.selectedShape) is Ellipse:
x1, x2, y1, y2 = extents
xmin, xmax = sorted([x1, x2])
ymin, ymax = sorted([y1, y2])
center = [x1 + (x2 - x1) / 2., y1 + (y2 - y1) / 2.]
a = (xmax - xmin) / 2.
b = (ymax - ymin) / 2.
self.to_draw.center = center
self.to_draw.width = 2 * a
self.to_draw.height = 2 * b
@property
def geometry(self):
"""
Returns numpy.ndarray of shape (2,5) containing
x (``RectangleSelector.geometry[1,:]``) and
y (``RectangleSelector.geometry[0,:]``)
coordinates of the four corners of the rectangle starting
and ending in the top left corner.
"""
if hasattr(self.to_draw, 'get_verts'):
xfm = self.ax1.transData.inverted()
y, x = xfm.transform(self.to_draw.get_verts()).T
return np.array([x, y])
else:
return np.array(self.to_draw.get_data())
##
def mouse_clicked(self, event):
if event.button == 2:
self.x_clicked = event.x
self.y_clicked = event.y
self.wheel_clicked = True
elif event.button == 3:
if self.editing():
self.set_state(1)
elif self.drawing():
self.set_state(2)
elif event.button == 1:
if self.picked:
try:
self._edit_on_press(event)
except:
pass
def _edit_on_press(self, event):
self.pressEvent = event
contains, attrd = self.selectedShape.contains(event)
if not contains: return
# draw everything but the selected rectangle and store the pixel buffer
try:
canvas = self.selectedShape.get_figure().canvas
axes = self.selectedShape.axes
self.to_draw.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(self.selectedShape.axes.bbox)
# canvas.restore_region(self.background)
axes.draw_artist(self.to_draw)
axes.draw_artist(self._corner_handles.artist)
axes.draw_artist(self._edge_handles.artist)
axes.draw_artist(self._center_handle.artist)
# blit just the redrawn area
canvas.blit(axes.bbox)
except:
pass
self.df = | pandas.read_csv('Markings/marking_records.csv') | pandas.read_csv |
from _operator import itemgetter
from math import sqrt
import random
import time
from pympler import asizeof
import numpy as np
import pandas as pd
from math import log10
import scipy.sparse
from scipy.sparse.csc import csc_matrix
import theano
import theano.tensor as T
import keras.layers as kl
import keras.models as km
import keras.backend as K
from datetime import datetime as dt
from datetime import timedelta as td
from keras.layers.embeddings import Embedding
import keras
from keras.regularizers import l2
from keras.utils.vis_utils import plot_model
class RankNetNeuralMF:
'''
RankNetNeuralMF( factors=16, layers=[64,32,16,8], batch=100, optimizer='adam', learning_rate=0.01, reg=0.01, emb_reg=0.01, dropout=0.0, epochs=10, add_dot=False, include_artist=False, order='random', session_key = 'playlist_id', item_key= 'track_id', user_key= 'playlist_id', artist_key= 'artist_id', time_key= 'pos' )
Parameters
-----------
'''
def __init__( self, factors=16, layers=[64,32,16,8], batch=100, optimizer='adam', learning_rate=0.01, reg=0.01, emb_reg=0.01, dropout=0.0, epochs=10, add_dot=False, include_artist=False, order='random', session_key = 'playlist_id', item_key= 'track_id', user_key= 'playlist_id', artist_key= 'artist_id', time_key= 'pos' ):
self.factors = factors
self.layers = layers
self.batch = batch
self.learning_rate = learning_rate
self.optimizer = optimizer
self.regularization = reg
self.dropout = dropout
self.epochs = epochs
self.order = order
self.include_artist = include_artist
self.add_dot = add_dot
self.session_key = session_key
self.item_key = item_key
self.user_key = user_key
self.time_key = time_key
self.artist_key = artist_key
self.emb_reg = emb_reg
self.final_reg = reg
self.floatX = theano.config.floatX
self.intX = 'int32'
def train(self, train, test=None):
'''
Trains the predictor.
Parameters
--------
data: pandas.DataFrame
Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps).
It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties).
'''
data = train['actions']
datat = test['actions']
data = pd.concat( [data, datat] )
start = time.time()
self.unique_items = data[self.item_key].unique().astype( self.intX )
self.num_items = data[self.item_key].nunique()
self.num_users = data[self.user_key].nunique()
self.num_artists = data[self.artist_key].nunique()
#idx = [data[self.item_key].max()+1] + list( data[self.item_key].unique() )
self.itemmap = pd.Series( data=np.arange(self.num_items), index=data[self.item_key].unique() ).astype( self.intX )
self.usermap = pd.Series( data=np.arange(self.num_users), index=data[self.user_key].unique() ).astype( self.intX )
self.artistmap = pd.Series( data=np.arange(self.num_artists), index=data[self.artist_key].unique() ).astype( self.intX )
print( 'finished init item and user map in {}'.format( ( time.time() - start ) ) )
train = data
start = time.time()
self.num_sessions = train[self.session_key].nunique()
train = pd.merge(train, | pd.DataFrame({self.item_key:self.itemmap.index, 'ItemIdx':self.itemmap[self.itemmap.index].values}) | pandas.DataFrame |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2016, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path
from math import isnan
import abc
import pandas as pd
import matplotlib.pyplot as plt
from devlib.target import KernelVersion
from lisa.wlgen.rta import Periodic, Ramp, Step
from lisa.analysis.rta import PerfAnalysis
from lisa.tests.base import ResultBundle, CannotCreateError, RTATestBundle
from lisa.utils import ArtifactPath
from lisa.datautils import series_integrate
from lisa.energy_model import EnergyModel
from lisa.trace import requires_events
from lisa.target import Target
from lisa.trace import FtraceCollector
class EASBehaviour(RTATestBundle):
"""
Abstract class for EAS behavioural testing.
:param nrg_model: The energy model of the platform the synthetic workload
was run on
:type nrg_model: EnergyModel
This class provides :meth:`test_task_placement` to validate the basic
behaviour of EAS. The implementations of this class have been developed to
verify patches supporting Arm's big.LITTLE in the Linux scheduler. You can
see these test results being published
`here <https://developer.arm.com/open-source/energy-aware-scheduling/eas-mainline-development>`_.
"""
@property
def nrg_model(self):
return self.plat_info['nrg-model']
@classmethod
def check_from_target(cls, target):
for domain in target.cpufreq.iter_domains():
if "schedutil" not in target.cpufreq.list_governors(domain[0]):
raise CannotCreateError(
"Can't set schedutil governor for domain {}".format(domain))
if 'nrg-model' not in target.plat_info:
raise CannotCreateError("Energy model not available")
@classmethod
def _from_target(cls, target, res_dir, ftrace_coll=None):
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info)
# EAS doesn't make a lot of sense without schedutil,
# so make sure this is what's being used
with target.disable_idle_states():
with target.cpufreq.use_governor("schedutil"):
cls._run_rtapp(target, res_dir, rtapp_profile, ftrace_coll=ftrace_coll)
return cls(res_dir, plat_info)
@classmethod
def from_target(cls, target:Target, res_dir:ArtifactPath=None, ftrace_coll:FtraceCollector=None) -> 'EASBehaviour':
"""
Factory method to create a bundle using a live target
This will execute the rt-app workload described in
:meth:`lisa.tests.base.RTATestBundle.get_rtapp_profile`
"""
return super().from_target(target, res_dir, ftrace_coll=ftrace_coll)
def _get_expected_task_utils_df(self, nrg_model):
"""
Get a DataFrame with the *expected* utilization of each task over time
:param nrg_model: EnergyModel used to computed the expected utilization
:type nrg_model: EnergyModel
:returns: A Pandas DataFrame with a column for each task, showing how
the utilization of that task varies over time
"""
util_scale = nrg_model.capacity_scale
transitions = {}
def add_transition(time, task, util):
if time not in transitions:
transitions[time] = {task: util}
else:
transitions[time][task] = util
# First we'll build a dict D {time: {task_name: util}} where D[t][n] is
# the expected utilization of task n from time t.
for task, params in self.rtapp_profile.items():
# time = self.get_start_time(experiment) + params.get('delay', 0)
time = params.delay_s
add_transition(time, task, 0)
for _ in range(params.loops):
for phase in params.phases:
util = (phase.duty_cycle_pct * util_scale / 100)
add_transition(time, task, util)
time += phase.duration_s
add_transition(time, task, 0)
index = sorted(transitions.keys())
df = pd.DataFrame([transitions[k] for k in index], index=index)
return df.fillna(method='ffill')
def _get_task_cpu_df(self):
"""
Get a DataFrame mapping task names to the CPU they ran on
Use the sched_switch trace event to find which CPU each task ran
on. Does not reflect idleness - tasks not running are shown as running
on the last CPU they woke on.
:returns: A Pandas DataFrame with a column for each task, showing the
CPU that the task was "on" at each moment in time
"""
tasks = self.rtapp_tasks
df = self.trace.df_events('sched_switch')[['next_comm', '__cpu']]
df = df[df['next_comm'].isin(tasks)]
df = df.pivot(index=df.index, columns='next_comm').fillna(method='ffill')
cpu_df = df['__cpu']
# Drop consecutive duplicates
cpu_df = cpu_df[(cpu_df.shift(+1) != cpu_df).any(axis=1)]
return cpu_df
def _sort_power_df_columns(self, df, nrg_model):
"""
Helper method to re-order the columns of a power DataFrame
This has no significance for code, but when examining DataFrames by hand
they are easier to understand if the columns are in a logical order.
:param nrg_model: EnergyModel used to get the CPU from
:type nrg_model: EnergyModel
"""
node_cpus = [node.cpus for node in nrg_model.root.iter_nodes()]
return pd.DataFrame(df, columns=[c for c in node_cpus if c in df])
def _plot_expected_util(self, util_df, nrg_model):
"""
Create a plot of the expected per-CPU utilization for the experiment
The plot is then output to the test results directory.
:param experiment: The :class:Experiment to examine
:param util_df: A Pandas Dataframe with a column per CPU giving their
(expected) utilization at each timestamp.
:param nrg_model: EnergyModel used to get the CPU from
:type nrg_model: EnergyModel
"""
fig, ax = plt.subplots(
len(nrg_model.cpus), 1, figsize=(16, 1.8 * len(nrg_model.cpus))
)
fig.suptitle('Per-CPU expected utilization')
for cpu in nrg_model.cpus:
tdf = util_df[cpu]
ax[cpu].set_ylim((0, 1024))
tdf.plot(ax=ax[cpu], drawstyle='steps-post', title="CPU{}".format(cpu), color='red')
ax[cpu].set_ylabel('Utilization')
# Grey-out areas where utilization == 0
ffill = False
prev = 0.0
for time, util in tdf.items():
if ffill:
ax[cpu].axvspan(prev, time, facecolor='gray', alpha=0.1, linewidth=0.0)
ffill = False
if util == 0.0:
ffill = True
prev = time
figname = os.path.join(self.res_dir, 'expected_placement.png')
plt.savefig(figname, bbox_inches='tight')
plt.close()
def _get_expected_power_df(self, nrg_model, capacity_margin_pct):
"""
Estimate *optimal* power usage over time
Examine a trace and use :meth:get_optimal_placements and
:meth:EnergyModel.estimate_from_cpu_util to get a DataFrame showing the
estimated power usage over time under ideal EAS behaviour.
:meth:get_optimal_placements returns several optimal placements. They
are usually equivalent, but can be drastically different in some cases.
Currently only one of those placements is used (the first in the list).
:param nrg_model: EnergyModel used compute the optimal placement
:type nrg_model: EnergyModel
:param capacity_margin_pct:
:returns: A Pandas DataFrame with a column each node in the energy model
(keyed with a tuple of the CPUs contained by that node) and a
"power" column with the sum of other columns. Shows the
estimated *optimal* power over time.
"""
task_utils_df = self._get_expected_task_utils_df(nrg_model)
data = []
index = []
def exp_power(row):
task_utils = row.to_dict()
expected_utils = nrg_model.get_optimal_placements(task_utils, capacity_margin_pct)[0]
power = nrg_model.estimate_from_cpu_util(expected_utils)
columns = list(power.keys())
# Assemble a dataframe to plot the expected utilization
data.append(expected_utils)
index.append(row.name)
return pd.Series([power[c] for c in columns], index=columns)
res_df = self._sort_power_df_columns(
task_utils_df.apply(exp_power, axis=1), nrg_model)
self._plot_expected_util(pd.DataFrame(data, index=index), nrg_model)
return res_df
def _get_estimated_power_df(self, nrg_model):
"""
Considering only the task placement, estimate power usage over time
Examine a trace and use :meth:EnergyModel.estimate_from_cpu_util to get
a DataFrame showing the estimated power usage over time. This assumes
perfect cpuidle and cpufreq behaviour. Only the CPU on which the tasks
are running is extracted from the trace, all other signals are guessed.
:param nrg_model: EnergyModel used compute the optimal placement and
CPUs
:type nrg_model: EnergyModel
:returns: A Pandas DataFrame with a column node in the energy model
(keyed with a tuple of the CPUs contained by that node) Shows
the estimated power over time.
"""
task_cpu_df = self._get_task_cpu_df()
task_utils_df = self._get_expected_task_utils_df(nrg_model)
task_utils_df.index = [time + self.trace.start for time in task_utils_df.index]
tasks = self.rtapp_tasks
# Create a combined DataFrame with the utilization of a task and the CPU
# it was running on at each moment. Looks like:
# utils cpus
# task_wmig0 task_wmig1 task_wmig0 task_wmig1
# 2.375056 102.4 102.4 NaN NaN
# 2.375105 102.4 102.4 2.0 NaN
df = pd.concat([task_utils_df, task_cpu_df],
axis=1, keys=['utils', 'cpus'])
df = df.sort_index().fillna(method='ffill')
# Now make a DataFrame with the estimated power at each moment.
def est_power(row):
cpu_utils = [0 for cpu in nrg_model.cpus]
for task in tasks:
cpu = row['cpus'][task]
util = row['utils'][task]
if not isnan(cpu):
cpu_utils[int(cpu)] += util
power = nrg_model.estimate_from_cpu_util(cpu_utils)
columns = list(power.keys())
return | pd.Series([power[c] for c in columns], index=columns) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.