prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
DEFAULT_WINDOW = 7
DEFAULT_TAKE_LOGS = True
DEFAULT_CENTER = False
DEFAULT_MIN_PERIODS = 1
def calculate_weekly_incidences_from_results(
results,
outcome,
groupby=None,
):
"""Create the weekly incidences from a list of simulation runs.
Args:
results (list): list of dask DataFrames with the time series data from sid
simulations.
Returns:
weekly_incidences (pandas.DataFrame): every column is the
weekly incidence over time for one simulation run.
The index are the dates of the simulation period if groupby is None, else
the index is a MultiIndex with date and the groups.
"""
weekly_incidences = []
for res in results:
daily_smoothed = smoothed_outcome_per_hundred_thousand_sim(
df=res,
outcome=outcome,
take_logs=False,
window=7,
center=False,
groupby=groupby,
)
weekly_smoothed = daily_smoothed * 7
if groupby is None:
full_index = pd.date_range(
weekly_smoothed.index.min(), weekly_smoothed.index.max()
)
else:
groups = weekly_smoothed.index.get_level_values(groupby).unique()
dates = weekly_smoothed.index.get_level_values("date").unique()
full_index = pd.MultiIndex.from_product(iterables=[dates, groups])
expanded = weekly_smoothed.reindex(full_index).fillna(0)
weekly_incidences.append(expanded)
df = pd.concat(weekly_incidences, axis=1)
df.columns = range(len(results))
assert not df.index.duplicated().any()
if groupby is not None:
assert is_categorical_dtype(df.index.levels[1])
return df
def smoothed_outcome_per_hundred_thousand_sim(
df,
outcome,
groupby=None,
window=DEFAULT_WINDOW,
min_periods=DEFAULT_MIN_PERIODS,
take_logs=DEFAULT_TAKE_LOGS,
center=DEFAULT_CENTER,
):
"""Calculate a daily smoothed outcome on the per 100 000 people level on simulated data.
Args:
df (pandas.DataFrame or dask.dataframe): Simulated time series.
outcome (str): Selects a column in df.
groupby (list, str or None): Defines the subgroups for which the outcome is
calculated.
window (int): Over how many days results are averaged to smooth the outcome.
min_periods (int): Minimum number of days that need to be present in the
smoothing window for the outcome to be not NaN.
take_logs (int): Whether the log of the outcome should be returned. If True,
smoothing is already done in logs.
center (bool): Whether the smoothing window is centered or forward looking.
Returns:
pd.Series: Series with a smoothed outcome. The first index level is date. If
groupby is specified, there are additional index levels.
"""
df = df.reset_index()
window, min_periods, groupby = _process_inputs(window, min_periods, groupby)
per_individual = (
df.groupby([pd.Grouper(key="date", freq="D")] + groupby)[outcome]
.mean()
.fillna(0)
)
if isinstance(df, dd.core.DataFrame):
per_individual = per_individual.compute()
out = _smooth_and_scale_daily_outcome_per_individual(
per_individual, window, min_periods, groupby, take_logs, center=center
)
return out
def calculate_period_outcome_sim(df, outcome, groupby=None):
"""Calculate an outcome on a dataset of one period.
This uses a groupby over the date column such that the date is preserved as the
first index level of the result. Only meant to be used during the msm estimation.
Args:
df (pandas.DataFrame): Simulated states DataFrame for one period.
outcome (str): Selects a column in df.
groupby (list, str or None): Defines the subgroups for which the outcome is
calculated.
Returns:
pd.Series: Series with an unsmoothed outcome for one day. The first index
level is date, even though it is meant to be the same for all entries.
If groupby is specified, there are additional index levels.
"""
if groupby is None:
groupby = []
elif isinstance(groupby, str):
groupby = [groupby]
out = (
df.groupby([pd.Grouper(key="date", freq="D")] + groupby)[outcome]
.mean()
.fillna(0)
)
if isinstance(df, dd.core.DataFrame):
out = out.compute()
return out
def aggregate_and_smooth_period_outcome_sim(
simulate_result,
outcome,
groupby=None,
window=DEFAULT_WINDOW,
min_periods=DEFAULT_MIN_PERIODS,
take_logs=DEFAULT_TAKE_LOGS,
center=DEFAULT_CENTER,
):
"""Aggregate and smooth a list of per period outcomes in simulate_results.
Args:
simulate_results (dict): Dictionary with a "period_outputs" entry.
outcome (str): The name of the outcome in simulate_result["period_outputs"]
that should be selected.
groupby (list, str or None): Defines the subgroups for which the outcome is
calculated.
window (int): Over how many days results are averaged to smooth the outcome.
min_periods (int): Minimum number of days that need to be present in the
smoothing window for the outcome to be not NaN.
take_logs (int): Whether the log of the outcome should be returned. If True,
smoothing is already done in logs.
center (bool): Whether the smoothing window is centered or forward looking.
Returns:
pd.Series: Series with a smoothed outcome. The first index level is date. If
groupby is specified, there are additional index levels.
"""
period_outcomes = simulate_result["period_outputs"][outcome]
per_individual = | pd.concat(period_outcomes) | pandas.concat |
"""Script to add interval on GVA and population file
Run script on 'data' folder in scenarios_not_extracted folder
"""
import os
import pandas as pd
import numpy as np
from energy_demand.basic import lookup_tables
from energy_demand.basic import basic_functions
def run(
path_to_folder,
path_MSOA_baseline,
MSOA_calculations=False,
geography_name='region',
scenarios_to_generate=[]
):
"""
path_to_folder : str
Path to data folder
path_MSOA_baseline : str
Path to MSOA file with correct geography in csv
"""
sectors_to_generate = [2, 3, 4, 5, 6, 8, 9, 29, 11, 12, 10, 15, 14, 19, 17, 40, 41, 28, 35, 23, 27]
# Get all folders with scenario run results
all_csv_folders = basic_functions.get_all_folders_files(path_to_folder)
# Lookup of economic sectors
LAD_MSOA_lu = lookup_tables.lad_msoa_mapping()
base_yr = 2015
end_yr = 2050
# ---------------------------------------------------------------------------------------------------
# Create scenario with CONSTANT (2015) population and CONSTANT GVA
# ---------------------------------------------------------------------------------------------------
'''
empty_folder_name = os.path.join(path_to_folder, "constant_pop_gva")
basic_functions.delete_folder(empty_folder_name)
os.makedirs(empty_folder_name)
wrote_out_pop, wroute_out_GVA = False, False #Do not change
# Creat empty dataframe
columns = ['timestep', 'sector', 'lad_uk_2016', 'value']
# Get folder with standard scenario to get data for constant scenario
for folder_name in ['pop-baseline16_econ-c16_fuel-c16']:
all_files = os.listdir(os.path.join(path_to_folder, folder_name))
# Scale for every year according to this distribution
for file_name in all_files:
filename_split = file_name.split("__")
if (filename_split[0] == "gva_per_head" and filename_split[1] == 'lad_sector.csv') or (
filename_split[0] == "population" and filename_split[1] == 'lad.csv'):
file_path = os.path.join(path_to_folder, folder_name, file_name)
print("Change file: " + str(file_path))
# Read csv file
gp_file = pd.read_csv(file_path)
# Replace future pop with 2015 pop
gp_file_selection_2015 = gp_file.loc[gp_file['year'] == 2015] #Data of 2015
list_with_all_vals = []
for year in range(base_yr, end_yr + 1):
gp_file_selection_yr = gp_file_selection_2015
gp_file_selection_yr['year'] = year
list_with_all_vals += gp_file_selection_yr.values.tolist()
# Save as file
new_dataframe = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
file_path_out = os.path.join(empty_folder_name, file_name)
new_dataframe.to_csv(file_path_out, index=False) #Index prevents writing index rows
# ---
# MSOA pop calculation
# ----
if MSOA_calculations:
if (filename_split[0] == "population" and filename_split[1] == 'lad.csv'):
# Calculate relative pop percentage of ONS scenarios
msoa_principalDF = pd.read_csv(path_MSOA_baseline)
msoa_principalDF_selection_2015 = msoa_principalDF.loc[msoa_principalDF['year'] == 2015]
# LADs and calculate factor per MSOA
factor_msoas = {}
for lad, msoas in LAD_MSOA_lu.items():
tot_pop_lad = 0
for msoa in msoas:
tot_pop_lad += float(msoa_principalDF_selection_2015.loc[msoa_principalDF_selection_2015['region'] == msoa]['value'])
for msoa in msoas:
pop_msoa = float(msoa_principalDF_selection_2015.loc[msoa_principalDF_selection_2015['region'] == msoa]['value'])
factor_msoas[msoa] = pop_msoa / tot_pop_lad #calculate fator
list_with_all_vals = []
# READ csv file
gp_file = pd.read_csv(file_path)
pop_LADs_2015 = gp_file.loc[gp_file['year'] == 2015]
for index, row_lad in gp_file.iterrows():
lad = row_lad['region']
try:
corresponding_msoas = LAD_MSOA_lu[lad]
except KeyError:
# No match for northern ireland
corresponding_msoas = [lad]
# Calculate population according to ONS 2015 #pop_LAD = row_lad['value']
pop_LAD_2015 = float(pop_LADs_2015.loc[gp_file['region'] == lad]['value']) #Base year pop
for msoa_name in corresponding_msoas:
try:
pop_ONS_scale_factor = factor_msoas[msoa_name]
except:
pop_ONS_scale_factor = 1 # If not mapped
pop_MSOA_ONS_scaled = pop_LAD_2015 * pop_ONS_scale_factor
new_row = {
'region': msoa_name,
"year": row_lad['year'],
"value": pop_MSOA_ONS_scaled,
"interval": row_lad['interval']}
list_with_all_vals.append(new_row)
msoaDF = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
file_path_MSOA_out = os.path.join(empty_folder_name, "{}_{}.csv".format(file_name[:-4], "MSOA"))
msoaDF.to_csv(file_path_MSOA_out, index=False)
wrote_out_pop = True
elif (filename_split[0] == "gva_per_head" and filename_split[1] == 'lad.csv'):
file_path = os.path.join(path_to_folder, folder_name, file_name)
print("Change file: " + str(file_path))
gp_file = pd.read_csv(file_path)
# Add new column
gp_file['value'] = 1000
# Replace future pop with 2015 pop
gp_file_selection_2015 = gp_file.loc[gp_file['year'] == 2015] #Data of 2015
list_with_all_vals = []
for year in range(base_yr, end_yr + 1):
gp_file_selection_yr = gp_file_selection_2015
gp_file_selection_yr['year'] = year
list_with_all_vals += gp_file_selection_yr.values.tolist()
new_dataframe = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
# Save as file
file_path_out = os.path.join(empty_folder_name, file_name)
new_dataframe.to_csv(file_path_out, index=False) #Index prevents writing index rows
# -----------------------------------------
# MSOA GVA calculations
# -----------------------------------------
if MSOA_calculations:
lads = list(gp_file.loc[gp_file['year'] == 2015]['region'])
list_with_all_vals = []
for lad in lads:
try:
corresponding_msoas = LAD_MSOA_lu[lad]
except KeyError:
corresponding_msoas = lad # No match for northern ireland
rows_msoa = gp_file.loc[gp_file['region'] == lad].values
for row_msoa in rows_msoa:
for msoa_name in corresponding_msoas:
#row_msoa[0] = msoa_name
new_row = {
"region": msoa_name,
"year": row_msoa[1],
"value": row_msoa[2],
"interval": row_msoa[3]}
list_with_all_vals.append(new_row)
#msoaDF = msoaDF.append(new_row, ignore_index=True)
# Convert list to dataframe
msoaDF = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
file_path_MSOA_out = os.path.join(empty_folder_name, "{}_{}.csv".format(file_name[:-4], "MSOA"))
msoaDF.to_csv(file_path_MSOA_out, index=False)
wroute_out_GVA = True
else:
pass
if wrote_out_pop == True and wroute_out_GVA == True:
break
print("... finished generating CONSTANT scenario")'''
# ---------------------------------------------------------------------------------------------------
# Add interval and create individual GVA data for selected sectors
# ---------------------------------------------------------------------------------------------------
columns = ['timestep', 'sectors', geography_name]
# Get all folders with scenario run results (name of folder is scenario)
all_csv_folders_walk = os.walk(path_to_folder)
for root, dirnames, filenames in all_csv_folders_walk:
all_csv_folders = dirnames
break
for folder_name in all_csv_folders:
all_files = os.listdir(os.path.join(path_to_folder, folder_name))
if (scenarios_to_generate == []) or (folder_name in scenarios_to_generate):
print("folder name: " + str(folder_name), flush=True)
for file_name in all_files:
filename_split = file_name.split("__")
var_name = filename_split[0]
if (var_name == "gva_per_head" and filename_split[1] == 'lad_sector.csv') or (
var_name == "population" and filename_split[1] == 'lad.csv') or (
var_name == "gva_per_head" and filename_split[1] == 'lad.csv'):
try:
file_path = os.path.join(path_to_folder, folder_name, file_name)
print("file_path " + str(file_path))
gp_file = | pd.read_csv(file_path) | pandas.read_csv |
""" Model for output of general/metadata data, useful for a batch """
import logging
from pathlib import Path
from typing import List, Optional, Union
import pandas as pd
from pydantic import BaseModel, Field, validator
from nowcasting_dataset.consts import SPATIAL_AND_TEMPORAL_LOCATIONS_OF_EACH_EXAMPLE_FILENAME
from nowcasting_dataset.filesystem.utils import check_path_exists
from nowcasting_dataset.utils import get_start_and_end_example_index
logger = logging.getLogger(__name__)
class SpaceTimeLocation(BaseModel):
"""Location of the example"""
t0_datetime_utc: pd.Timestamp = Field(
...,
description="The t0 of one example ",
)
x_center_osgb: float = Field(
...,
description="The x center of one example in OSGB coordinates",
)
y_center_osgb: float = Field(
...,
description="The y center of one example in OSGB coordinates",
)
id: Optional[int] = Field(
None,
description="The id of the GSP or the PV system. This is optional so can be None",
)
id_type: Optional[str] = Field(
None,
description="The type of the id. Should be either None, 'gsp' or 'pv_system'",
)
@validator("t0_datetime_utc")
def v_t0_datetime_utc(cls, t0_datetime_utc):
"""Make sure t0_datetime_utc is pandas Timestamp"""
return pd.Timestamp(t0_datetime_utc)
@validator("id_type")
def v_id_type(cls, id_type):
"""Make sure id_type is either None, 'gsp' or 'pv_system'"""
if id_type == "None":
id_type = None
assert id_type in [
None,
"gsp",
"pv_system",
], f"{id_type=} should be None, 'gsp' or 'pv_system'"
return id_type
class Metadata(BaseModel):
"""Class to store metadata data"""
batch_size: int = Field(
...,
g=0,
description="The size of this batch. If the batch size is 0, "
"then this item stores one data item",
)
space_time_locations: List[SpaceTimeLocation]
@property
def t0_datetimes_utc(self) -> list:
"""Return all the t0"""
return [location.t0_datetime_utc for location in self.space_time_locations]
@property
def x_centers_osgb(self) -> List[float]:
"""List of all the x centers from all the locations"""
return [location.x_center_osgb for location in self.space_time_locations]
@property
def y_centers_osgb(self) -> List[float]:
"""List of all the x centers from all the locations"""
return [location.y_center_osgb for location in self.space_time_locations]
@property
def ids(self) -> List[float]:
"""List of all the ids from all the locations"""
return [location.id for location in self.space_time_locations]
def save_to_csv(self, path):
"""
Save metadata to a csv file
Args:
path: the path where the file should be save
"""
filename = f"{path}/{SPATIAL_AND_TEMPORAL_LOCATIONS_OF_EACH_EXAMPLE_FILENAME}"
metadata_dict = [location.dict() for location in self.space_time_locations]
# metadata_dict.pop("batch_size")
# if file exists, add to it
try:
check_path_exists(filename)
except FileNotFoundError:
metadata_df = pd.DataFrame(metadata_dict)
else:
metadata_df = | pd.read_csv(filename) | pandas.read_csv |
from datetime import datetime
import pandas as pd
import numpy as np
from extract import PreProcess
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import geopandas as gpd
import fun_logger
log = fun_logger.init_log()
prepros = PreProcess()
fmt = '%Y-%m-%d %H:%M:%S'
class AnalyzeDf():
def search_ais_gaps(self, df, column, time):
"""
Search for (large) gaps in time.
Search for (large) gaps per the current and last row in the df.
When a gap is larger then the specified time it will be flagged.
Args:
----
df (df): Pandas dataframe.
column (str): Column to use for the comparison.
time (int): Time to check the gap with.
"""
# Make the column a proper time_stamp instead of str/int
AIS_Gap = time
df_ret = df
df_t = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import astropy.units as u
import pandas as pd
def deg2mas(value):
'''
Converts value from degree to milliarcseconds
value: a value in degree
'''
value_mas = (value * u.degree).to(u.mas).value
return value_mas
def time_diff(catalog):
"""
Calculates the time difference between differen epochs and adds it to the
component catalog
CATALOG: Component catalog with different observation epochs
"""
dates = (pd.to_datetime(catalog['date'], format='%Y-%m-%d'))
delta_days = ((dates - dates.min()) / np.timedelta64(1, 'D'))
delta_days = | pd.DataFrame({'delta_days': delta_days}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline
from sklearn.metrics import (accuracy_score, precision_score, recall_score, roc_auc_score,
f1_score, roc_curve, precision_recall_curve)
import pickle
from pycaret.classification import *
class ClassifierPyCaret(BaseEstimator):
def __init__(self, metric="F1", numeric_features=None, categorical_features=None, numeric_imputation='mean',
normalize=True, normalize_method='zscore', handle_unknown_categorical=True,
unknown_categorical_method='least_frequent', feature_selection=False, feature_selection_threshold=0.8,
feature_interaction=False, folds=5, **kwargs):
self.numeric_features = numeric_features
self.categorical_features = categorical_features
self.numeric_imputation = numeric_imputation
self.normalize = normalize
self.normalize_method = normalize_method
self.handle_unknown_categorical = handle_unknown_categorical
self.unknown_categorical_method = unknown_categorical_method
self.feature_selection = feature_selection
self.feature_selection_threshold = feature_selection_threshold
self.feature_interaction = feature_interaction
self.metric = metric
self.folds = folds
self.params = kwargs
self.trained = False
self.__model_loaded = None
self.MODELS_TO_STR = {
'K Neighbors Classifier': 'knn',
'Ada Boost Classifier': 'ada',
'Extreme Gradient Boosting': 'xgboost',
'CatBoost Classifier': 'catboost',
'Quadratic Discriminant Analysis': 'qda',
'Extra Trees Classifier': 'et',
'Naive Bayes': 'nb',
'Random Forest Classifier': 'rf',
'Light Gradient Boosting Machine': 'lightgbm',
'Gradient Boosting Classifier': 'gbc',
'Decision Tree Classifier': 'dt',
'Logistic Regression': 'lr',
'SVM - Linear Kernel': 'svm',
'Linear Discriminant Analysis': 'lda',
'Ridge Classifier': 'ridge',
'Multi Level Perceptron': 'mlp',
'SVM (Linear)': 'svm',
'SVM (RBF)': 'rbfsvm',
'Gaussian Process': 'gpc'
}
def fit(self, X, y, *args):
if not isinstance(X, pd.DataFrame):
cols = ['X_'+str(i+1) for i in range(X.shape[1])]
df = pd.DataFrame(X, columns=cols)
else:
df = X.copy()
if isinstance(y, pd.Series):
name = "target"
y.name=name
elif isinstance(y, pd.DataFrame):
y = y.iloc[:,0]
name = y.name
else:
name = "target"
y = pd.Series(y, name=name)
df[name] = y
if self.__model_loaded is not None:
return self.__model_loaded
if self.categorical_features is None:
if self.numeric_features is None:
self.categorical_features = [i for i in df.dtypes[df.dtypes=='object'].index.tolist() if i!=name]
self.numeric_features = [i for i in df.dtypes[df.dtypes!='object'].index if i!=name]
else:
self.categorical_features = [i for i in df.columns if i!=name and i not in self.numeric_features]
else:
if self.numeric_features is None:
self.numeric_features = [i for i in df.columns if i!=name and i not in self.categorical_features]
self.env_setup = setup(data=df,
target=name,
train_size=0.9,
numeric_features = self.numeric_features,
categorical_features = self.categorical_features,
numeric_imputation = self.numeric_imputation,
normalize = self.normalize,
normalize_method = self.normalize_method,
handle_unknown_categorical = self.handle_unknown_categorical,
unknown_categorical_method = self.unknown_categorical_method,
feature_selection = self.feature_selection,
feature_selection_threshold = self.feature_selection_threshold,
feature_interaction = self.feature_interaction,
**self.params)
from pycaret.classification import prep_pipe
self.all_models = compare_models(fold=self.folds, sort=self.metric)
self.__name_model = self.all_models.data.iloc[0, 0]
str_model = self.MODELS_TO_STR[self.__name_model]
best_model = tune_model(str_model, fold=self.folds, n_iter=50, optimize=self.metric)
self.best_model = finalize_model(best_model)
self.preprocessor = prep_pipe
self.trained = True
return self.all_models
def predict(self, X):
if not self.trained:
raise ValueError("Model not fitted yet!")
if not isinstance(X, pd.DataFrame):
cols = ['X_'+str(i+1) for i in range(X.shape[1])]
X = pd.DataFrame(X, columns=cols)
X_transformed = self.preprocessor.transform(X)
return self.best_model.predict(X_transformed)
def predict_proba(self, X):
if not self.trained:
raise ValueError("Model not fitted yet!")
if not isinstance(X, pd.DataFrame):
cols = ['X_'+str(i+1) for i in range(X.shape[1])]
X = pd.DataFrame(X, columns=cols)
X_transformed = self.preprocessor.transform(X)
try:
output = self.best_model.predict_proba(X_transformed)
except:
raise ValueError("Model does not return probabilities!")
return output
def preprocess(self, X):
if not isinstance(X, pd.DataFrame):
cols = ['X_'+str(i+1) for i in range(X.shape[1])]
X = pd.DataFrame(X, columns=cols)
return self.preprocessor.transform(X)
def evaluate(self, X, y_true):
if not self.trained:
raise ValueError("Model not fitted yet!")
METRICS = ["Accuracy", "Recall", "Precision", "F1-Score", "AUC-ROC"]
metrics = {}
y_pred = self.predict(X)
try:
y_proba = self.predict_proba(X)[:,1]
except:
y_proba = None
if len(np.unique(y_true))==2:
for metric in METRICS:
if metric=="Accuracy":
metrics[metric] = accuracy_score(y_true, y_pred)
elif metric=="Precision":
metrics[metric] = precision_score(y_true, y_pred)
elif metric=="Recall":
metrics[metric] = recall_score(y_true, y_pred)
elif metric=="F1-Score":
metrics[metric] = f1_score(y_true, y_pred)
elif metric=="AUC-ROC" and y_proba is not None:
metrics[metric] = roc_auc_score(y_true, y_proba)
else:
for metric in METRICS:
if metric=="Accuracy":
metrics[metric] = accuracy_score(y_true, y_pred)
elif metric=="Precision":
metrics[metric] = precision_score(y_true, y_pred, average="weighted")
elif metric=="Recall":
metrics[metric] = recall_score(y_true, y_pred, average="weighted")
elif metric=="F1-Score":
metrics[metric] = f1_score(y_true, y_pred, average="weighted")
return pd.DataFrame(metrics, index=[self.__name_model])
def binary_evaluation_plot(self, X, y_true):
if not self.trained:
raise ValueError("Model not fitted yet!")
try:
y_proba = self.predict_proba(X)[:,1]
except:
raise ValueError("Model does not return probabilities!")
if len(np.unique(y_true))!=2:
raise ValueError("Multiclass Problem!")
fig, ax = plt.subplots(2,2,figsize=(12,8))
self._plot_roc(y_true, y_proba, ax[0][0])
self._plot_pr(y_true, y_proba, ax[0][1])
self._plot_cap(y_true, y_proba, ax[1][0])
self._plot_ks(y_true, y_proba, ax[1][1])
plt.tight_layout()
plt.show()
def _plot_cap(self, y_test, y_proba, ax):
cap_df = pd.DataFrame(data=y_test, index=y_test.index)
cap_df["Probability"] = y_proba
total = cap_df.iloc[:, 0].sum()
perfect_model = (cap_df.iloc[:, 0].sort_values(ascending=False).cumsum()/total).values
current_model = (cap_df.sort_values(by="Probability", ascending=False).iloc[:, 0].cumsum()/total).values
max_area = 0
covered_area = 0
h = 1/len(perfect_model)
random = np.linspace(0, 1, len(perfect_model))
for i, (am, ap) in enumerate(zip(current_model, perfect_model)):
try:
max_area += (ap-random[i]+perfect_model[i+1]-random[i+1])*h/2
covered_area += (am-random[i]+current_model[i+1]-random[i+1])*h/2
except:
continue
accuracy_ratio = covered_area/max_area
ax.plot(np.linspace(0, 1, len(current_model)), current_model,
color="green", label=f"{self.__name_model}: AR = {accuracy_ratio:.3f}")
ax.plot(np.linspace(0, 1, len(perfect_model)), perfect_model, color="red", label="Perfect Model")
ax.plot([0,1], [0,1], color="navy")
ax.set_xlabel("Individuals", fontsize=12)
ax.set_ylabel("Target Individuals", fontsize=12)
ax.set_xlim((0,1))
ax.set_ylim((0,1.01))
ax.legend(loc=4, fontsize=10)
ax.set_title("CAP Analysis", fontsize=13)
def _plot_roc(self, y_test, y_proba, ax):
fpr, tpr, _ = roc_curve(y_test, y_proba)
ax.plot(fpr, tpr, color="red", label=f"{self.__name_model} (AUC = {roc_auc_score(y_test, y_proba):.3f})")
ax.plot([0,1], [0,1], color="navy")
ax.set_xlabel("FPR")
ax.set_ylabel("TPR")
ax.set_xlim((0,1))
ax.set_ylim((0,1.001))
ax.legend(loc=4)
ax.set_title("ROC Analysis", fontsize=13)
def _plot_pr(self, y_test, y_proba, ax):
precision, recall, _ = precision_recall_curve(y_test, y_proba)
ax.plot(recall, precision, color="red", label=f"{self.__name_model}")
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_xlim((0,1))
ax.set_ylim((0,1.001))
ax.legend(loc=4)
ax.set_title("Precision-Recall Analysis", fontsize=13)
def _plot_ks(self, y_test, y_proba, ax):
prediction_labels = | pd.DataFrame(y_test.values, columns=["True Label"]) | pandas.DataFrame |
import os
import logging
import copy
import numpy as np
import pandas as pd
from oemof.solph import EnergySystem, Bus, Sink, Source
import oemof.tabular.tools.postprocessing as pp
from oemof.tools.economics import annuity
from oemof_flexmex.helpers import delete_empty_subdirs, load_elements, load_scalar_input_data,\
load_yaml
from oemof_flexmex.parametrization_scalars import get_parameter_values
from oemof_flexmex.facades import TYPEMAP
basic_columns = ['region', 'name', 'type', 'carrier', 'tech']
# Path definitions
module_path = os.path.abspath(os.path.dirname(__file__))
MODEL_CONFIG = 'model_config'
PATH_MAPPINGS_REL = '../flexmex_config'
path_mappings = os.path.abspath(os.path.join(module_path, PATH_MAPPINGS_REL))
path_map_output_timeseries = os.path.join(path_mappings, 'mapping-output-timeseries.yml')
path_map_input_scalars = os.path.join(path_mappings, 'mapping-input-scalars.yml')
# Load mappings
map_output_timeseries = load_yaml(path_map_output_timeseries)
FlexMex_Parameter_Map = load_yaml(path_map_input_scalars)
def create_postprocessed_results_subdirs(postprocessed_results_dir):
for parameters in map_output_timeseries.values():
for subdir in parameters.values():
path = os.path.join(postprocessed_results_dir, subdir)
if not os.path.exists(path):
os.makedirs(path)
def get_capacities(es):
r"""
Calculates the capacities of all components.
Adapted from oemof.tabular.tools.postprocessing.write_results()
Parameters
----------
es : oemof.solph.EnergySystem
EnergySystem containing the results.
Returns
-------
capacities : pd.DataFrame
DataFrame containing the capacities.
"""
def get_facade_attr(attr):
# Function constructor for getting a specific property from
# the Facade object in bus_results() DataFrame columns "from" or "to"
def fnc(flow):
# Get property from the Storage object in "from" for the discharge device
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['from'], attr, np.nan)
# Get property from the Storage object in "to" for the charge device
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['to'], attr, np.nan)
# Get property from other object in "from"
return getattr(flow['from'], attr, np.nan)
return fnc
def get_parameter_name(flow):
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_discharge_invest"
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_charge_invest"
return np.nan
try:
flows = pp.bus_results(es, es.results, select="scalars", concat=True)
flows.name = "var_value"
endogenous = flows.reset_index()
# Results already contain a column named "type". Call this "var_name" to
# preserve its content ("invest" for now)
endogenous.rename(columns={"type": "var_name"}, inplace=True)
# Update "var_name" with Storage specific parameter names for charge and discharge devices
df = pd.DataFrame({'var_name': endogenous.apply(get_parameter_name, axis=1)})
endogenous.update(df)
endogenous["region"] = endogenous.apply(get_facade_attr('region'), axis=1)
endogenous["name"] = endogenous.apply(get_facade_attr('label'), axis=1)
endogenous["type"] = endogenous.apply(get_facade_attr('type'), axis=1)
endogenous["carrier"] = endogenous.apply(get_facade_attr('carrier'), axis=1)
endogenous["tech"] = endogenous.apply(get_facade_attr('tech'), axis=1)
endogenous.drop(['from', 'to'], axis=1, inplace=True)
endogenous.set_index(
["region", "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
endogenous = pd.DataFrame()
d = dict()
for node in es.nodes:
if not isinstance(node, (Bus, Sink, TYPEMAP["shortage"], TYPEMAP["link"])):
# Specify which parameters to read depending on the technology
parameters_to_read = []
if isinstance(node, TYPEMAP["storage"]):
# TODO for brownfield optimization
# parameters_to_read = ['capacity', 'storage_capacity']
# WORKAROUND Skip 'capacity' to safe some effort in aggregation and elsewhere
# possible because storages are greenfield optimized only: 'capacity' = 0
parameters_to_read = ['storage_capacity']
elif isinstance(node, TYPEMAP["asymmetric storage"]):
parameters_to_read = ['capacity_charge', 'capacity_discharge', 'storage_capacity']
elif getattr(node, "capacity", None) is not None:
parameters_to_read = ['capacity']
# Update dict with values in oemof's parameter->value structure
for p in parameters_to_read:
key = (
node.region,
node.label,
# [n for n in node.outputs.keys()][0],
node.type,
node.carrier,
node.tech, # tech & carrier are oemof-tabular specific
p
) # for oemof logic
d[key] = {'var_value': getattr(node, p)}
exogenous = pd.DataFrame.from_dict(d).T # .dropna()
if not exogenous.empty:
exogenous.index = exogenous.index.set_names(
['region', 'name', 'type', 'carrier', 'tech', 'var_name']
)
# Read storage capacities (from oemof.heat)
# only component_results() knows about 'storage_capacity'
try:
components = pd.concat(pp.component_results(es, es.results, select='scalars'))
components.name = 'var_value'
storage = components.reset_index()
storage.drop('level_0', 1, inplace=True)
storage.columns = ['name', 'to', 'var_name', 'var_value']
storage['region'] = [
getattr(t, "region", np.nan) for t in components.index.get_level_values('from')
]
storage['type'] = [
getattr(t, "type", np.nan) for t in components.index.get_level_values('from')
]
storage['carrier'] = [
getattr(t, "carrier", np.nan) for t in components.index.get_level_values('from')
]
storage['tech'] = [
getattr(t, "tech", np.nan) for t in components.index.get_level_values('from')
]
storage = storage.loc[storage['to'].isna()]
storage.drop('to', 1, inplace=True)
storage = storage[['region', 'name', 'type', 'carrier', 'tech', 'var_name', 'var_value']]
# Delete unused 'init_cap' rows - parameter name misleading! (oemof issue)
storage.drop(storage.loc[storage['var_name'] == 'init_cap'].index, axis=0, inplace=True)
storage.replace(
['invest'],
['storage_capacity_invest'],
inplace=True
)
storage.set_index(
['region', "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
storage = pd.DataFrame()
capacities = pd.concat([endogenous, exogenous, storage])
return capacities
def format_capacities(oemoflex_scalars, capacities):
df = pd.DataFrame(columns=oemoflex_scalars.columns)
df.loc[:, 'name'] = capacities.reset_index().loc[:, 'name']
df.loc[:, 'tech'] = capacities.reset_index().loc[:, 'tech']
df.loc[:, 'carrier'] = capacities.reset_index().loc[:, 'carrier']
df.loc[:, 'var_name'] = capacities.reset_index().loc[:, 'var_name']
df.loc[:, 'var_value'] = capacities.reset_index().loc[:, 'var_value']
df.loc[:, 'type'] = capacities.reset_index().loc[:, 'type']
df.loc[:, 'region'] = capacities.reset_index().loc[:, 'region']
df['var_unit'] = 'MW'
return df
def get_sequences_by_tech(results):
r"""
Creates a dictionary with carrier-tech as keys with the sequences of the components
from optimization results.
Parameters
----------
results : dict
Dictionary containing oemof.solph.Model results.
Returns
-------
sequences_by_tech : dict
Dictionary containing sequences with carrier-tech as keys.
"""
# copy to avoid manipulating the data in es.results
sequences = copy.deepcopy({key: value['sequences'] for key, value in results.items()})
sequences_by_tech = []
# Get internal busses for all 'ReservoirWithPump' and 'Bev' nodes to be ignored later
internal_busses = get_subnodes_by_type(sequences, Bus)
# Get inflows for all 'ReservoirWithPump' nodes
reservoir_inflows = get_subnodes_by_type(sequences, Source)
for key, df in sequences.items():
if isinstance(key[0], Bus):
component = key[1]
bus = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.from_bus:
var_name = 'flow_gross_forward'
elif bus == component.to_bus:
var_name = 'flow_gross_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
var_name = 'flow_fuel'
else:
var_name = 'flow_in'
if isinstance(key[1], Bus):
bus = key[1]
component = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.to_bus:
var_name = 'flow_net_forward'
elif bus == component.from_bus:
var_name = 'flow_net_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
if bus == component.electricity_bus:
var_name = 'flow_electricity'
elif bus == component.heat_bus:
var_name = 'flow_heat'
elif component in reservoir_inflows:
var_name = 'flow_inflow'
else:
var_name = 'flow_out'
if key[1] is None:
component = key[0]
var_name = 'storage_content'
# Ignore sequences FROM internal busses (concerns ReservoirWithPump, Bev)
if bus in internal_busses and component not in reservoir_inflows:
continue
carrier_tech = component.carrier + '-' + component.tech
if isinstance(component, TYPEMAP["link"]):
# Replace AT-DE by AT_DE to be ready to be merged with DataFrames from preprocessing
region = component.label.replace('-', '_')
else:
# Take AT from AT-ch4-gt, string op since sub-nodes lack of a 'region' attribute
region = component.label.split('-')[0]
df.columns = pd.MultiIndex.from_tuples([(region, carrier_tech, var_name)])
df.columns.names = ['region', 'carrier_tech', 'var_name']
sequences_by_tech.append(df)
sequences_by_tech = pd.concat(sequences_by_tech, axis=1)
return sequences_by_tech
def get_subnodes_by_type(sequences, cls):
r"""
Get all the subnodes of type 'cls' in the <to> nodes of 'sequences'
Parameters
----------
sequences : dict (special format, see get_sequences_by_tech() and before)
key: tuple of 'to' node and 'from' node: (from, to)
value: timeseries DataFrame
cls : Class
Class to check against
Returns
-------
A list of all subnodes of type 'cls'
"""
# Get a list of all the components
to_nodes = []
for k in sequences.keys():
# It's sufficient to look into one side of the flows ('to' node, k[1])
to_nodes.append(k[1])
subnodes_list = []
for component in to_nodes:
if hasattr(component, 'subnodes'):
# Only get subnodes of type 'cls'
subnodes_per_component = [n for n in component.subnodes if isinstance(n, cls)]
subnodes_list.extend(subnodes_per_component)
return subnodes_list
def get_summed_sequences(sequences_by_tech, prep_elements):
# Put component definitions into one DataFrame - drops 'carrier_tech' information in the keys
base = pd.concat(prep_elements.values())
df = base.loc[:, basic_columns]
sum = sequences_by_tech.sum()
sum.name = 'var_value'
sum_df = sum.reset_index()
# Form helper column for proper merging with component definition
df['carrier_tech'] = df['carrier'] + '-' + df['tech']
summed_sequences = pd.merge(df, sum_df, on=['region', 'carrier_tech'])
# Drop helper column
summed_sequences.drop('carrier_tech', axis=1, inplace=True)
summed_sequences = summed_sequences.loc[summed_sequences['var_name'] != 'storage_content']
summed_sequences['var_unit'] = 'MWh'
return summed_sequences
def get_re_generation(oemoflex_scalars):
renewable_carriers = ['solar', 'wind']
re_generation = pd.DataFrame(columns=oemoflex_scalars.columns)
re_flow = oemoflex_scalars.loc[(oemoflex_scalars['carrier'].isin(renewable_carriers)) &
(oemoflex_scalars['var_name'] == 'flow_out')]
curtailment = oemoflex_scalars.loc[(oemoflex_scalars['carrier'] == 'electricity') &
(oemoflex_scalars['tech'] == 'curtailment') &
(oemoflex_scalars['var_name'] == 'flow_in')]
sum = re_flow.groupby('region').sum() - curtailment.groupby('region').sum()
re_generation['region'] = sum.index
re_generation['carrier'] = 're'
re_generation['type'] = 'none'
re_generation['tech'] = 'none'
re_generation['var_name'] = 're_generation'
re_generation = re_generation.drop('var_value', 1)
re_generation = pd.merge(re_generation, sum['var_value'], on='region')
re_generation['var_unit'] = 'MWh'
return re_generation
def get_transmission_losses(oemoflex_scalars):
r"""Calculates losses_forward losses_backward for each link."""
def gross_minus_net_flow(direction):
flow_gross = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_gross_{direction}'].set_index('name')
flow_net = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_net_{direction}'].set_index('name')
loss = flow_gross.copy()
loss['var_name'] = f'loss_{direction}'
loss['var_value'] = flow_gross['var_value'] - flow_net['var_value']
return loss
losses = []
for direction in ['forward', 'backward']:
loss = gross_minus_net_flow(direction)
losses.append(loss)
losses = pd.concat(losses)
losses = losses.reset_index()
return losses
def get_storage_losses(oemoflex_scalars):
storage_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['storage', 'asymmetric storage'])
]
flow_in = storage_data.loc[storage_data['var_name'] == 'flow_in'].set_index('name')
flow_out = storage_data.loc[storage_data['var_name'] == 'flow_out'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'loss'
losses['var_value'] = flow_in['var_value'] - flow_out['var_value']
losses = losses.reset_index()
return losses
def get_reservoir_losses(oemoflex_scalars):
reservoir_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['reservoir'])
]
flow_in = reservoir_data.loc[reservoir_data['var_name'] == 'flow_in'].set_index('name')
flow_out = reservoir_data.loc[reservoir_data['var_name'] == 'flow_out'].set_index('name')
flow_inflow = reservoir_data.loc[reservoir_data['var_name'] == 'flow_inflow'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'losses'
losses['var_value'] = flow_inflow['var_value'] - (flow_out['var_value'] - flow_in['var_value'])
losses = losses.reset_index()
return losses
def aggregate_storage_capacities(oemoflex_scalars):
storage = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['storage_capacity', 'storage_capacity_invest'])].copy()
# Make sure that values in columns used to group on are strings and thus equatable
storage[basic_columns] = storage[basic_columns].astype(str)
storage = storage.groupby(by=basic_columns, as_index=False).sum()
storage['var_name'] = 'storage_capacity_sum'
storage['var_value'] = storage['var_value'] * 1e-3 # MWh -> GWh
storage['var_unit'] = 'GWh'
charge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_charge', 'capacity_charge_invest'])]
charge = charge.groupby(by=basic_columns, as_index=False).sum()
charge['var_name'] = 'capacity_charge_sum'
charge['var_unit'] = 'MW'
discharge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_discharge', 'capacity_discharge_invest'])]
discharge = discharge.groupby(by=basic_columns, as_index=False).sum()
discharge['var_name'] = 'capacity_discharge_sum'
discharge['var_unit'] = 'MW'
return pd.concat([storage, charge, discharge])
def aggregate_other_capacities(oemoflex_scalars):
capacities = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity', 'invest'])
].copy()
# Make sure that values in columns used to group on are strings and thus equatable
capacities[basic_columns] = capacities[basic_columns].astype(str)
capacities = capacities.groupby(by=basic_columns, as_index=False).sum()
capacities['var_name'] = 'capacity_sum'
capacities['var_unit'] = 'MW'
return capacities
def get_emissions(oemoflex_scalars, scalars_raw):
try:
emissions = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'cost_emission'].copy()
except KeyError:
logging.info("No key 'cost_emissions' found to calculate 'emissions'.")
return None
price_emission = get_parameter_values(scalars_raw, 'Energy_Price_CO2')
emissions['var_value'] *= 1/price_emission
emissions['var_name'] = 'emissions'
emissions['var_unit'] = 'tCO2'
return emissions
def map_link_direction(oemoflex_scalars):
r"""Swaps name and region for backward flows of links."""
backward = (
(oemoflex_scalars['type'] == 'link') &
(oemoflex_scalars['var_name'].str.contains('backward'))
)
def swap(series, delimiter):
return series.str.split(delimiter).apply(lambda x: delimiter.join(x[::-1]))
def drop_regex(series, regex):
return series.str.replace(regex, '', regex=True)
oemoflex_scalars.loc[backward, 'name'] = swap(oemoflex_scalars.loc[backward, 'name'], '-')
oemoflex_scalars.loc[backward, 'region'] = swap(oemoflex_scalars.loc[backward, 'region'], '_')
oemoflex_scalars.loc[:, 'var_name'] = drop_regex(
oemoflex_scalars.loc[:, 'var_name'], '.backward|.forward'
)
return oemoflex_scalars
def map_to_flexmex_results(oemoflex_scalars, flexmex_scalars_template, mapping, scenario):
mapping = mapping.set_index('Parameter')
flexmex_scalars = flexmex_scalars_template.copy()
oemoflex_scalars = oemoflex_scalars.set_index(['region', 'carrier', 'tech', 'var_name'])
oemoflex_scalars.loc[oemoflex_scalars['var_unit'] == 'MWh', 'var_value'] *= 1e-3 # MWh to GWh
for i, row in flexmex_scalars.loc[flexmex_scalars['UseCase'] == scenario].iterrows():
try:
select = mapping.loc[row['Parameter'], :]
except KeyError:
continue
try:
value = oemoflex_scalars.loc[
(row['Region'],
select['carrier'],
select['tech'],
select['var_name']), 'var_value']
except KeyError:
logging.info(
f"No key "
f"{(row['Region'], select['carrier'], select['tech'], select['var_name'])}"
f"found to be mapped to FlexMex."
)
continue
if isinstance(value, float):
flexmex_scalars.loc[i, 'Value'] = np.around(value)
flexmex_scalars.loc[:, 'Modell'] = 'oemof'
return flexmex_scalars
def get_varom_cost(oemoflex_scalars, prep_elements):
r"""
Calculates the VarOM cost by multiplying consumption by marginal cost.
Which value is taken as consumption depends on the actual technology type.
Parameters
----------
oemoflex_scalars
prep_elements
Returns
-------
"""
varom_cost = []
for prep_el in prep_elements.values():
if 'marginal_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] == 'excess':
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
elif prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_electricity']
elif prep_el['type'][0] in ['link', 'electrical line']:
net_flows = ['flow_net_forward', 'flow_net_backward']
flow = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(net_flows)]
flow = flow.groupby(basic_columns, as_index=False).sum()
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_out']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['marginal_cost']
df['var_name'] = 'cost_varom'
varom_cost.append(df)
varom_cost = pd.concat(varom_cost)
varom_cost['var_unit'] = 'Eur'
return varom_cost
def get_carrier_cost(oemoflex_scalars, prep_elements):
carrier_cost = []
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_fuel']
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['carrier_cost']
df['var_name'] = 'cost_carrier'
carrier_cost.append(df)
if carrier_cost:
carrier_cost = | pd.concat(carrier_cost) | pandas.concat |
import streamlit as st
import pandas as pd
import altair as alt
import numpy as np
from datetime import datetime, timedelta
from dateutil import parser
from utils import suffix, custom_strftime
population = 68134973
alt.themes.enable('fivethirtyeight')
latest_date = parser.parse("2021-04-07")
dose1 = pd.read_csv(f"data/data_{latest_date.strftime('%Y-%b-%d')}-dose1.csv")
dose2 = pd.read_csv(f"data/data_{latest_date.strftime('%Y-%b-%d')}-dose2.csv")
df = pd.merge(dose1, dose2, on=["date", "areaName", "areaType", "areaCode"])
df.loc[:, "totalByDay"] = df.newPeopleVaccinatedSecondDoseByPublishDate + df.newPeopleVaccinatedFirstDoseByPublishDate
df.loc[:, "percentageFirstDose"] = 100.0* df.newPeopleVaccinatedFirstDoseByPublishDate / df.totalByDay
cols = ["date", "newPeopleVaccinatedSecondDoseByPublishDate", "newPeopleVaccinatedFirstDoseByPublishDate", "totalByDay", "percentageFirstDose"]
all_df = df[df.areaName == "United Kingdom"]
all_df = all_df.loc[~pd.isna(all_df.totalByDay)]
first_dose = all_df['cumPeopleVaccinatedFirstDoseByPublishDate'].max()
second_dose = all_df['cumPeopleVaccinatedSecondDoseByPublishDate'].max()
all_df = all_df.rename(columns={
"newPeopleVaccinatedFirstDoseByPublishDate": "firstDose",
"newPeopleVaccinatedSecondDoseByPublishDate": "secondDose",
"cumPeopleVaccinatedFirstDoseByPublishDate": "firstDoseCumulative",
"cumPeopleVaccinatedSecondDoseByPublishDate": "secondDoseCumulative"
})
all_df.loc[:, "totalDoses"] = all_df.firstDose + all_df.secondDose
melted_df = all_df.melt(value_vars=["firstDose", "secondDose", "firstDoseCumulative", "secondDoseCumulative", "totalDoses"], id_vars=["date", "areaName"])
melted_df = melted_df[melted_df.areaName == "United Kingdom"]
melted_df = melted_df.rename(columns={"value": "vaccinations", "variable": "dose"})
melted_daily_doses = melted_df.loc[(melted_df["dose"] == "firstDose") | (melted_df["dose"] == "secondDose") | (melted_df["dose"] == "totalDoses")]
melted_daily_doses = melted_daily_doses.loc[~pd.isna(melted_daily_doses.vaccinations)]
melted_daily_doses = melted_daily_doses.sort_values(["date"])
melted_daily_doses.loc[:, "dateWeek"] = pd.to_datetime(melted_daily_doses.date).dt.strftime('%Y-%U')
melted_daily_doses.loc[melted_daily_doses.dose == "firstDose", "rollingAverage"] = melted_daily_doses.loc[melted_daily_doses.dose == "firstDose"]["vaccinations"].rolling(7).mean()
melted_daily_doses.loc[melted_daily_doses.dose == "secondDose", "rollingAverage"] = melted_daily_doses.loc[melted_daily_doses.dose == "secondDose"]["vaccinations"].rolling(7).mean()
melted_daily_doses.loc[melted_daily_doses.dose == "totalDoses", "rollingAverage"] = melted_daily_doses.loc[melted_daily_doses.dose == "totalDoses"]["vaccinations"].rolling(7).mean()
melted_daily_doses.loc[:, "dayOfWeek"] = melted_daily_doses.date.apply(lambda item: parser.parse(item).strftime("%A"))
melted_daily_doses.loc[:, "dayOfWeekIndex"] = melted_daily_doses.date.apply(lambda item: parser.parse(item).strftime("%w"))
melted_first_second_daily_doses = melted_daily_doses.loc[(melted_daily_doses.dose.isin(["firstDose", "secondDose"]))]
melted_cumulative_doses = melted_df.loc[(melted_df["dose"] == "firstDoseCumulative") | (melted_df["dose"] == "secondDoseCumulative"), :]
melted_cumulative_doses.loc[:,"dateWeek"] = pd.to_datetime(melted_cumulative_doses.date).dt.strftime('%Y-%U')
summary_df = pd.DataFrame({
"Description": ["Population", "1st Dose", "2nd Dose"],
"Value": [f"{population:,}", f"{int(first_dose):,}", f"{int(second_dose):,}"],
"Percentage": ["", f"{round(np.divide(first_dose, population) * 100, 2)}", f"{round(np.divide(second_dose, population) * 100, 2)}"]
})
summary_df.set_index('Description', inplace=True)
st.set_page_config(layout="wide")
st.title("Coronavirus Vaccines in the UK")
st.write(f"As of {custom_strftime('{S} %B %Y', latest_date)}")
st.markdown("This app contains charts showing how the Coronavirus vaccination program is going in the UK. It's based on data from [coronavirus.data.gov.uk/details/vaccinations](https://coronavirus.data.gov.uk/details/vaccinations)")
st.header("Overview")
st.table(summary_df)
st.header("Cumulative Vaccine Doses")
st.write("This chart shows the total number of doses done at the end of each week.")
cumulative_first_doses_chart = alt.Chart(melted_cumulative_doses, padding={"left": 10, "top": 10, "right": 10, "bottom": 10}).mark_line(point=True).encode(
# x=alt.X('dateWeek', axis=alt.Axis(title='Week Ending', format=("%b %d"))),
x=alt.X('dateWeek', axis=alt.Axis(title='Week Ending'), scale=alt.Scale(padding=0)),
tooltip=['max(vaccinations)'],
y=alt.Y('max(vaccinations)', axis=alt.Axis(title='Vaccinations')),
color=alt.Color('dose', legend=alt.Legend(orient='bottom')),
).properties(title='Cumulative doses', height=500)
st.altair_chart(cumulative_first_doses_chart, use_container_width=True)
dose1.loc[:, "dateWeek"] = | pd.to_datetime(dose1.date) | pandas.to_datetime |
#!/usr/bin/env python3
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
import statsmodels.api as sm
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.arima_process import arma_generate_sample
from rom_plots import detrend
import warnings
BASE_DIR = Path(__file__).resolve().parent.parent
TRAIN_DIR = BASE_DIR.joinpath('train')
def load_data(file_path, segments):
df = pd.read_csv(file_path)
year = | pd.unique(df.YEAR) | pandas.unique |
"""
Utils to plot graphs with arrows
"""
import matplotlib.transforms
import matplotlib.patches
import matplotlib.colors
import matplotlib.cm
import numpy as np
import pandas as pd
import logging
from tctx.util import plot
def _clip_arrows(arrows, tail_offset, head_offset):
"""
shorten head & tail so the arrows don't overlap with markers
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:return: 2 numpy arrays of shape Nx2
"""
source_pos = arrows[['source_x', 'source_y']].values
target_pos = arrows[['target_x', 'target_y']].values
direction = target_pos - source_pos
length = np.sqrt(np.sum(np.square(direction), axis=1))
direction = direction / length[:, np.newaxis]
source_pos = source_pos + direction * tail_offset
target_pos = target_pos + direction * (-1 * head_offset)
return source_pos, target_pos
def plot_arrows_cmap(
ax, arrows, c, cmap=None, norm=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a colormap.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param c: a pd.Series with the same index as arrows or a string that identifies a column in it.
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:return: matplotlib.cm.Mappable that can be used for a colorbar
:param cmap:
:param norm:
:param head_length:
:param head_width:
:return:
"""
if cmap is None:
cmap = 'default'
if isinstance(cmap, str):
cmap = plot.lookup_cmap(cmap)
if isinstance(c, str):
c = arrows[c]
if norm is None:
norm = matplotlib.colors.Normalize(vmin=c.min(), vmax=c.max())
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
color = cmap(norm(c[idx]))
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
sm = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(c.values)
return sm
def _plot_single_arrow(ax, source_pos, target_pos, arrowstyle, color, **kwargs):
patch_kwargs = kwargs.copy()
patch_kwargs.setdefault('edgecolor', color)
patch_kwargs.setdefault('facecolor', color)
patch = matplotlib.patches.FancyArrowPatch(
posA=source_pos,
posB=target_pos,
arrowstyle=arrowstyle,
**patch_kwargs,
)
ax.add_artist(patch)
def plot_arrows_solid(
ax, arrows, color=None,
tail_offset=0, head_offset=0, head_length=4, head_width=1.25, **kwargs):
"""
Draw multiple arrows using a solid color.
:param ax: matplotlib.axes.Axes
:param arrows: a pd.DataFrame with columns: 'source_x', 'source_y', 'target_x', 'target_y'
:param tail_offset: how much shorter to make the tail (so it doesn't overlap with the markers)
:param head_offset: how much shorter to make the head (so it doesn't overlap with the markers)
:param kwargs: args for matplotlib.patches.FancyArrowPatch
:param color:
:param head_length:
:param head_width:
:param kwargs:
:return:
"""
arrowstyle = matplotlib.patches.ArrowStyle.CurveFilledB(head_length=head_length, head_width=head_width)
kwargs.setdefault('linewidth', .75)
source_pos, target_pos = _clip_arrows(arrows, tail_offset, head_offset)
for i, idx in enumerate(arrows.index):
_plot_single_arrow(ax, source_pos[i], target_pos[i], arrowstyle, color, **kwargs)
class Graph:
"""
A class to plot graphs with per-node and per-edge styles
"""
def __init__(self, nodes, edges, styles=None, transform=None, kwargs_nodes=None, kwargs_edges=None):
"""
:param nodes: a pd.DataFrame with columns ['x', 'y'] representing the 2d position and
column 'style' that can be indexed into the styles DF
:param edges: a pd.DataFrame with columns ['source', 'target'] that can be indexed into the nodes DF and
column 'style' that can be indexed into the styles DF
:param styles: pd.DataFrame with columns for different cmaps ('cmap_from_white', etc),
color levels ('light', 'dark', etc). By default: plot.styles_df
:param kwargs_nodes: default kwargs to nodes plotting
:param kwargs_edges: default kwargs to edges plotting
:param transform: the transform to apply to the graph. Useful when drawing an inset.
"""
assert np.all(edges['source'] != edges['target']), 'self edges'
assert np.all([np.issubdtype(nodes[c].dtype, np.number) for c in ['x', 'y']])
if styles is None:
styles = plot.styles_df.copy()
self.styles = styles
self.nodes = nodes
self.edges = edges
self.transform = transform
self.default_kwargs_nodes = dict(
cmap='cmap',
marker='marker_time',
linewidth=.5,
facecolor='light',
edgecolor='darker',
)
self.default_kwargs_nodes.update(kwargs_nodes or {})
self.default_kwargs_edges = dict(
cmap='cmap',
facecolor='main',
edgecolor='main',
)
self.default_kwargs_edges.update(kwargs_edges or {})
edge_len = self.get_edge_lengths()
too_short = np.count_nonzero(np.isclose(edge_len, 0))
if too_short:
logging.warning(f'{too_short}/{len(edge_len)} edges of zero length')
# pandas complains when editing categories which is inconvenient
if self.nodes['style'].dtype.name == 'category':
self.nodes['style'] = self.nodes['style'].astype(str)
if self.edges['style'].dtype.name == 'category':
self.edges['style'] = self.edges['style'].astype(str)
def copy(self):
return Graph(
nodes=self.nodes.copy(),
edges=self.edges.copy(),
styles=self.styles.copy(),
transform=None if self.transform is None else self.transform.copy(),
kwargs_nodes=self.default_kwargs_nodes.copy(),
kwargs_edges=self.default_kwargs_edges.copy(),
)
def get_edge_lengths(self):
xy0 = self.nodes.loc[self.edges['source'], ['x', 'y']].values
xy1 = self.nodes.loc[self.edges['target'], ['x', 'y']].values
edge_len = np.sqrt(np.sum(np.square(xy0 - xy1), axis=1))
return pd.Series(edge_len, index=self.edges.index)
def _get_arrows(self, selection=None):
if selection is None:
selection = self.edges
if isinstance(selection, (np.ndarray, pd.Index)):
selection = self.edges.loc[selection]
arrows = [selection]
for end in ['source', 'target']:
pos = self.nodes[['x', 'y']].reindex(selection[end])
pos.index = selection.index
pos.columns = [end + '_' + c for c in pos.columns]
arrows.append(pos)
arrows = pd.concat(arrows, axis=1)
return arrows
def _lookup_style_kwargs(self, style, kwargs):
kwargs = kwargs.copy()
if 'style' in kwargs:
specific = kwargs.pop('style')
if style in specific:
kwargs.update(specific[style])
styled_kwargs = kwargs.copy()
for k, v in kwargs.items():
if isinstance(v, str) and v in self.styles.columns:
styled_kwargs[k] = self.styles.loc[style, v]
if self.transform is not None:
styled_kwargs['transform'] = self.transform
return styled_kwargs
def plot_nodes_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the nodes with a flat color
:param ax:
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return:
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
ax.scatter(
nodes.x,
nodes.y,
**style_kwargs,
)
def plot_nodes_cmap(self, ax, c=None, selection=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.nodes,
if none indicated, we expect a column 'c' in self.nodes
:param selection: an array, index or boolean series that
can be used on self.nodes.loc to draw a subset of the known nodes
:param kwargs: scatter params
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_nodes.copy()
final_kwargs.update(kwargs)
nodes_to_draw = self.nodes
if selection is not None:
assert isinstance(selection, (np.ndarray, pd.Index, pd.Series))
nodes_to_draw = self.nodes.loc[selection]
if c is None:
c = 'c'
if isinstance(c, str):
c = self.nodes[c]
if isinstance(c, np.ndarray):
c = pd.Series(c, index=self.nodes.index)
all_sm = {}
for style, nodes in nodes_to_draw.groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
all_sm[style] = ax.scatter(
nodes.x,
nodes.y,
c=c.loc[nodes.index],
**style_kwargs,
)
return all_sm
def plot_nodes_labels(self, ax, nodes=None, va='center', ha='center', fmt='{index}', fontsize=6, **kwargs):
"""
plot a descriptive text for each node.
By default, the index is show, modify fmt to use something else
"""
# TODO allow the style column in the fmt to color by dark of the "label" column.
if nodes is None:
nodes = self.nodes
else:
nodes = self.nodes.loc[nodes]
for idx, row in nodes.iterrows():
ax.text(row['x'], row['y'], fmt.format(index=idx, **row), va=va, ha=ha, fontsize=fontsize, **kwargs)
def plot_edges_cmap(self, ax, c=None, **kwargs):
"""
Plot all of the nodes with a color map
:param ax:
:param c: series or array matching length of self.edges,
if none indicated, we expect a column 'c' in self.edges
:param kwargs: params to plot_arrows_cmap
:return: a dict of style to mappable for use in colorbars
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
if c is None:
c = self.edges['c']
all_sm = {}
for style, arrows in self._get_arrows().groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'facecolor' in style_kwargs:
style_kwargs.pop('facecolor')
if 'edgecolor' in style_kwargs:
style_kwargs.pop('edgecolor')
all_sm[style] = plot_arrows_cmap(
ax, arrows, c,
**style_kwargs
)
return all_sm
def plot_edges_solid(self, ax, selection=None, **kwargs):
"""
Plot all of the edges with a flat color
:param ax:
:param selection:
:param kwargs:
:return:
"""
final_kwargs = self.default_kwargs_edges.copy()
final_kwargs.update(kwargs)
for style, arrows in self._get_arrows(selection=selection).groupby('style'):
style_kwargs = self._lookup_style_kwargs(style, final_kwargs)
if 'cmap' in style_kwargs:
style_kwargs.pop('cmap')
plot_arrows_solid(
ax, arrows,
**style_kwargs
)
@classmethod
def from_conns(cls, conns, cells, node_style='ei_type', edge_style='con_type'):
"""plot the connections in XY space"""
all_gids = np.unique(conns[['source_gid', 'target_gid']].values.flatten())
nodes = cells.loc[all_gids, ['x', 'y']].copy()
nodes['style'] = cells.loc[nodes.index, node_style]
edges = conns[['source_gid', 'target_gid']].copy()
edges.columns = ['source', 'target']
edges['style'] = conns.loc[edges.index, edge_style]
return cls(nodes, edges)
@classmethod
def from_conn_jumps(
cls, selected_jumps, detailed_spikes, node_keys, edge_style,
**kwargs):
"""plot spike jumps"""
assert 'x' in node_keys and 'y' in node_keys and 'style' in node_keys
nodes = {}
for k, v in node_keys.items():
if isinstance(v, str):
v = detailed_spikes[v]
else:
assert isinstance(v, (tuple, list, pd.Series, np.ndarray))
nodes[k] = v
nodes = pd.DataFrame(nodes)
edges = selected_jumps[['source_spike', 'target_spike']].copy()
edges.columns = ['source', 'target']
edges['style'] = selected_jumps.loc[edges.index, edge_style]
return cls(nodes, edges, **kwargs)
def get_floating_nodes(self) -> pd.Index:
"""
:return: the index of nodes with no connections in or out
"""
return self.nodes.index[
~self.nodes.index.isin(self.edges['source']) &
~self.nodes.index.isin(self.edges['target'])
]
def get_linked_nodes(self) -> pd.Index:
"""
:return: the index of nodes with at least a connection in or out
"""
return self.nodes.index[~self.nodes.index.isin(self.get_floating_nodes())]
def drop_nodes(self, drop_gids: pd.Index):
"""
remove the given nodes from the graph. This will also remove edges to/from those nodes
:param drop_gids: either a list of node ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.nodes.index, fill_value=False)
assert len(drop_gids) == len(self.nodes)
drop_gids = self.nodes.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.nodes.index.difference(drop_gids)
self.nodes = self.nodes.loc[remaining_gids].copy()
bad_edges = (
self.edges['source'].isin(drop_gids) |
self.edges['target'].isin(drop_gids)
)
self.edges = self.edges.loc[~bad_edges].copy()
def drop_edges(self, drop_gids: pd.Index):
"""
remove the given edges from the graph
example:
graph.drop_edges(graph.edges['weight'] < .75 * 70)
:param drop_gids: either a list of edge ids or a boolean mask (True == remove)
:return:
"""
if drop_gids.dtype == 'bool':
if isinstance(drop_gids, pd.Series):
drop_gids = drop_gids.reindex(self.edges.index, fill_value=False)
assert len(drop_gids) == len(self.edges)
drop_gids = self.edges.index[drop_gids]
drop_gids = pd.Index(np.asarray(drop_gids))
remaining_gids = self.edges.index.difference(drop_gids)
self.edges = self.edges.loc[remaining_gids].copy()
def add_edges(self, new_edges: pd.DataFrame, **overwrite_cols):
"""
Add edges to this graph.
Inplace.
:param overwrite_cols: pairs of <column, value> to assign to new_edges before adding them.
For example, to set a style.
"""
new_edges = new_edges.copy()
for c, v in overwrite_cols.items():
new_edges[c] = v
missing_cols = self.edges.columns.difference(new_edges.columns)
if len(missing_cols) > 0:
logging.error(f'Missing columns: {list(missing_cols)}. Got: {list(new_edges.columns)}')
return
repeated = self.edges.index.intersection(new_edges.index)
if len(repeated):
logging.warning(f'Repeated edges will be ignored: {repeated}')
new_edges = new_edges.drop(repeated)
valid = (
new_edges['source'].isin(self.nodes.index) &
new_edges['target'].isin(self.nodes.index)
)
if np.any(~valid):
logging.warning(f'{np.count_nonzero(~valid):,g} edges without source or target will be ignored')
new_edges = new_edges[valid]
all_edges = pd.concat([self.edges, new_edges], axis=0, sort=False)
assert all_edges.index.is_unique
self.edges = all_edges
def add_nodes(self, new_nodes: pd.DataFrame, **overwrite_cols):
"""
Add edges to this graph.
Inplace.
:param overwrite_cols: pairs of <column, value> to assign to new_nodes before adding them.
For example, to set a style.
"""
new_nodes = new_nodes.copy()
for c, v in overwrite_cols.items():
new_nodes[c] = v
missing_cols = self.nodes.columns.difference(new_nodes.columns)
if len(missing_cols) > 0:
logging.warning(f'Missing columns: {list(missing_cols)}. Got: {list(new_nodes.columns)}')
repeated = self.nodes.index.intersection(new_nodes.index)
if len(repeated):
logging.warning(f'Repeated nodes will be ignored: {repeated}')
new_nodes = new_nodes.drop(repeated)
all_nodes = pd.concat([self.nodes, new_nodes], axis=0, sort=False)
assert all_nodes.index.is_unique
self.nodes = all_nodes
def add_graph(self, other):
"""
Add another graph to this one.
Inplace.
"""
self.add_nodes(other.nodes)
self.add_edges(other.edges)
def drop_edges_orphan(self):
"""remove edges without a known source or target"""
mask_edges = (
self.edges['source'].isin(self.nodes.index) &
self.edges['target'].isin(self.nodes.index)
)
self.edges = self.edges[mask_edges].copy()
def layout_spring(self, edges_idx=None, iterations=100, source_gid=None, **kwargs):
"""
modify inplace the XY positions of the graph using a spring force algorithm
if source_gid is provided, it will be fixed at coordinate (0, 0)
initial position are taken from the current XY.
"""
fixed = kwargs.pop('fixed', None)
if source_gid is not None:
if fixed is None:
fixed = {}
fixed[source_gid] = (0, 0)
from networkx import spring_layout
pos = spring_layout(
self._get_as_networkx_digraph(edges_idx),
pos={i: (x, y) for i, x, y in self.nodes[['x', 'y']].itertuples()},
fixed=fixed,
iterations=iterations,
**kwargs,
)
self._set_node_xy(pd.DataFrame.from_dict(pos, orient='index', columns=['x', 'y']))
def layout_graphviz(self, edges_idx=None, **kwargs):
"""
modify inplace the XY positions of the graph using a one of the graphviz algorithms
see https://stackoverflow.com/questions/21978487/improving-python-networkx-graph-layout
"""
from networkx.drawing.nx_agraph import graphviz_layout
pos = graphviz_layout(
self._get_as_networkx_digraph(edges_idx),
**kwargs)
self._set_node_xy( | pd.DataFrame.from_dict(pos, orient='index', columns=['x', 'y']) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 22:50:43 2018
@author: kennedy
"""
"""
Credit:
https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code
Bug Fix by Kennedy:
Works fine for library import.
returns only column of the indicator result.
Can be used as a predictor for for forecasting
stock returns using predictive modeling
in Machine Learning.
I configured it to meet my demand for multiple predictive modelling.
"""
import pandas as pd
import numpy as np
class TechnicalIndicators:
def moving_average(df, n):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n: window
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean(), name='MA_{}'.format(n))
return MA
def exponential_moving_average(df, n):
"""
:param df: pandas.DataFrame
:param n: window of data to take moving exponent mean
:return: pandas.DataFrame
"""
EMA = pd.Series(df['Close'].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
return EMA
def momentum(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(n), name='Momentum_' + str(n))
return M
def rate_of_change(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
return ROC
def average_true_range(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean(), name='ATR_' + str(n))
return ATR
def bollinger_bands(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean())
MSD = pd.Series(df['Close'].rolling(n, min_periods=n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
return pd.concat([B1, B2], axis = 1)
def ppsr(df):
"""Calculate Pivot Points, Supports and Resistances for given data
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
psr = {'PP': PP, 'R1': R1, 'S1': S1, 'R2': R2, 'S2': S2, 'R3': R3, 'S3': S3}
PSR = pd.DataFrame(psr)
return PSR
def stochastic_oscillator_k(df):
"""Calculate stochastic oscillator %K for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return SOk
def stochastic_oscillator_d(df, n):
"""Calculate stochastic oscillator %D for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
SOd = pd.Series(SOk.ewm(span=n, min_periods=n).mean(), name='SO%d_' + str(n))
return SOd
def trix(df, n):
"""Calculate TRIX for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
EX1 = df['Close'].ewm(span=n, min_periods=n).mean()
EX2 = EX1.ewm(span=n, min_periods=n).mean()
EX3 = EX2.ewm(span=n, min_periods=n).mean()
i = 0
ROC_l = [np.nan]
while i + 1 <= df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
Trix = pd.Series(ROC_l, name='Trix_' + str(n))
return Trix
def average_directional_movement_index(df, n, n_ADX):
"""Calculate the Average Directional Movement Index for given data.
:param df: pandas.DataFrame
:param n: data window
:param n_ADX:
:return: pandas.DataFrame
"""
i = 0
UpI = []
DoI = []
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean() / ATR)
NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean() / ATR)
ADX = pd.Series((abs(PosDI - NegDI) / (PosDI + NegDI)).ewm(span=n_ADX, min_periods=n_ADX).mean(),
name='ADX_' + str(n) + '_' + str(n_ADX))
return ADX
def macd(df, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
return pd.concat([MACD, MACDsign, MACDdiff], axis = 1)
def mass_index(df, n):
"""Calculate the Mass Index for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
Range = df['High'] - df['Low']
EX1 = Range.ewm(span=9, min_periods=9).mean()
EX2 = EX1.ewm(span=9, min_periods=9).mean()
Mass = EX1 / EX2
MassI = pd.Series(Mass.rolling(n).sum(), name='Mass Index')
return MassI
def vortex_indicator(df, n):
"""Calculate the Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
TR = [0]
while i < df.index[-1]:
Range = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < df.index[-1]:
Range = abs(df.loc[i + 1, 'High'] - df.loc[i, 'Low']) - abs(df.loc[i + 1, 'Low'] - df.loc[i, 'High'])
VM.append(Range)
i = i + 1
VI = pd.Series(pd.Series(VM).rolling(n).sum() / pd.Series(TR).rolling(n).sum(), name='Vortex_' + str(n))
return VI
def kst_oscillator(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""Calculate KST Oscillator for given data.
:param df: pandas.DataFrame
:param r1:
:param r2:
:param r3:
:param r4:
:param n1:
:param n2:
:param n3:
:param n4:
:return: pandas.DataFrame
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
KST = pd.Series(
ROC1.rolling(n1).sum() + ROC2.rolling(n2).sum() * 2 + ROC3.rolling(n3).sum() * 3 + ROC4.rolling(n4).sum() * 4,
name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(
n2) + '_' + str(n3) + '_' + str(n4))
return KST
def relative_strength_index(df, n):
"""Calculate Relative Strength Index(RSI) for given data.
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = | pd.Series(DoI) | pandas.Series |
import blpapi
import logging
import datetime
import pandas as pd
import contextlib
from collections import defaultdict
from pandas import DataFrame
@contextlib.contextmanager
def bopen(debug=False):
con = BCon(debug=debug)
con.start()
try:
yield con
finally:
con.stop()
class BCon(object):
def __init__(self, host='localhost', port=8194, debug=False):
"""
Create an object which manages connection to the Bloomberg API session
Parameters
----------
host: str
Host name
port: int
Port to connect to
debug: Boolean {True, False}
Boolean corresponding to whether to log Bloomberg Open API request
and response messages to stdout
"""
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(host)
sessionOptions.setServerPort(port)
self._sessionOptions = sessionOptions
# Create a Session
self.session = blpapi.Session(sessionOptions)
# initialize logger
self.debug = debug
@property
def debug(self):
"""
When True, print all Bloomberg Open API request and response messages
to stdout
"""
return self._debug
@debug.setter
def debug(self, value):
"""
Set whether logging is True or False
"""
self._debug = value
root = logging.getLogger()
if self._debug:
# log requests and responses
root.setLevel(logging.DEBUG)
else:
# log only failed connections
root.setLevel(logging.INFO)
def start(self):
"""
start connection and init service for refData
"""
# Start a Session
if not self.session.start():
logging.info("Failed to start session.")
return
self.session.nextEvent()
# Open service to get historical data from
if not self.session.openService("//blp/refdata"):
logging.info("Failed to open //blp/refdata")
return
self.session.nextEvent()
# Obtain previously opened service
self.refDataService = self.session.getService("//blp/refdata")
self.session.nextEvent()
def restart(self):
"""
Restart the blp session
"""
# Recreate a Session
self.session = blpapi.Session(self._sessionOptions)
self.start()
def _create_req(self, rtype, tickers, flds, ovrds, setvals):
# flush event queue in case previous call errored out
while(self.session.tryNextEvent()):
pass
request = self.refDataService.createRequest(rtype)
for t in tickers:
request.getElement("securities").appendValue(t)
for f in flds:
request.getElement("fields").appendValue(f)
for name, val in setvals:
request.set(name, val)
overrides = request.getElement("overrides")
for ovrd_fld, ovrd_val in ovrds:
ovrd = overrides.appendElement()
ovrd.setElement("fieldId", ovrd_fld)
ovrd.setElement("value", ovrd_val)
return request
def bdh(self, tickers, flds, start_date, end_date, elms=[],
ovrds=[], longdata=False):
"""
Get tickers and fields, return pandas dataframe with column MultiIndex
of tickers and fields if multiple fields given an Index otherwise.
If single field is given DataFrame is ordered same as tickers,
otherwise MultiIndex is sorted
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")]
Refer to A.2.4 HistoricalDataRequest in the Developers Guide for
more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted
"""
elms = list(elms)
data = self._bdh_list(tickers, flds, start_date, end_date,
elms, ovrds)
df = DataFrame(data)
df.columns = ["date", "ticker", "field", "value"]
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"])
if not longdata:
cols = ['ticker', 'field']
df = df.set_index(['date'] + cols).unstack(cols)
df.columns = df.columns.droplevel(0)
return df
def _bdh_list(self, tickers, flds, start_date, end_date, elms,
ovrds):
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
setvals = elms
setvals.append(("startDate", start_date))
setvals.append(("endDate", end_date))
request = self._create_req("HistoricalDataRequest", tickers, flds,
ovrds, setvals)
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
data = []
# Process received events
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
if msg.getElement('securityData').hasElement('securityError') or (msg.getElement('securityData').getElement("fieldExceptions").numValues() > 0): # NOQA
raise Exception(msg)
ticker = msg.getElement('securityData').getElement('security').getValue() # NOQA
fldDatas = msg.getElement('securityData').getElement('fieldData') # NOQA
for fd in fldDatas.values():
dt = fd.getElement('date').getValue()
for element in fd.elements():
fname = str(element.name())
if fname == "date":
continue
val = element.getValue()
data.append((dt, ticker, fname, val))
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completely received, so we could exit
break
return data
def ref(self, tickers, flds, ovrds=[]):
"""
Make a reference data request, get tickers and fields, return long
pandas Dataframe with columns [ticker, field, value]
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
"""
data = self._ref(tickers, flds, ovrds)
data = DataFrame(data)
data.columns = ["ticker", "field", "value"]
return data
def _ref(self, tickers, flds, ovrds):
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
request = self._create_req("ReferenceDataRequest", tickers, flds,
ovrds, [])
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
data = []
# Process received events
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
fldData = msg.getElement('securityData')
for i in range(fldData.numValues()):
ticker = (fldData.getValue(i).getElement("security").getValue()) # NOQA
reqFldsData = (fldData.getValue(i).getElement('fieldData'))
for j in range(reqFldsData.numElements()):
fld = flds[j]
# this is for dealing with requests which return arrays
# of values for a single field
if reqFldsData.getElement(fld).isArray():
lrng = reqFldsData.getElement(fld).numValues()
for k in range(lrng):
elms = (reqFldsData.getElement(fld).getValue(k).elements()) # NOQA
# if the elements of the array have multiple
# subelements this will just append them all
# into a list
for elm in elms:
data.append([ticker, fld, elm.getValue()])
else:
val = reqFldsData.getElement(fld).getValue()
data.append([ticker, fld, val])
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completely received, so we could exit
break
return data
def ref_hist(self, tickers, flds, start_date,
end_date=datetime.date.today().strftime('%Y%m%d'),
timeout=2000, longdata=False):
"""
Get tickers and fields, periodically override REFERENCE_DATE to create
a time series. Return pandas dataframe with column MultiIndex
of tickers and fields if multiple fields given, Index otherwise.
If single field is given DataFrame is ordered same as tickers,
otherwise MultiIndex is sorted
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
timeout: int
Passed into nextEvent(timeout), number of milliseconds before
timeout occurs
"""
# correlationIDs should be unique to a session so rather than
# managing unique IDs for the duration of the session just restart
# a session for each call
self.restart()
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
# Create and fill the request for the historical data
request = self.refDataService.createRequest("ReferenceDataRequest")
for t in tickers:
request.getElement("securities").appendValue(t)
for f in flds:
request.getElement("fields").appendValue(f)
overrides = request.getElement("overrides")
dates = pd.date_range(start_date, end_date, freq='b')
ovrd = overrides.appendElement()
for dt in dates:
ovrd.setElement("fieldId", "REFERENCE_DATE")
ovrd.setElement("value", dt.strftime('%Y%m%d'))
# CorrelationID used to keep track of which response coincides with
# which request
cid = blpapi.CorrelationId(dt)
logging.debug("Sending Request:\n %s" % request)
self.session.sendRequest(request, correlationId=cid)
data = []
# Process received events
while(True):
ev = self.session.nextEvent(timeout)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
corrID = msg.correlationIds()[0].value()
fldData = msg.getElement('securityData')
for i in range(fldData.numValues()):
tckr = (fldData.getValue(i).getElement("security").getValue()) # NOQA
reqFldsData = (fldData.getValue(i).getElement('fieldData'))
for j in range(reqFldsData.numElements()):
fld = flds[j]
val = reqFldsData.getElement(fld).getValue()
data.append((fld, tckr, val, corrID))
if ev.eventType() == blpapi.Event.TIMEOUT:
# All events processed
if (len(data) / len(flds) / len(tickers)) == len(dates):
break
else:
raise(RuntimeError("Timeout, increase timeout parameter"))
data = pd.DataFrame(data)
data.columns = ['field', 'ticker', 'value', 'date']
data = data.sort_values(by='date')
data = data.reset_index(drop=True)
data = data.loc[:, ['date', 'field', 'ticker', 'value']]
if not longdata:
cols = ['ticker', 'field']
data = data.set_index(['date'] + cols).unstack(cols)
data.columns = data.columns.droplevel(0)
return data
def bdib(self, ticker, start_datetime, end_datetime, event_type, interval,
elms=[]):
"""
Get Open, High, Low, Close, Volume, and numEvents for a ticker.
Return pandas dataframe
Parameters
----------
ticker: string
String corresponding to ticker
start_datetime: string
UTC datetime in format YYYY-mm-ddTHH:MM:SS
end_datetime: string
UTC datetime in format YYYY-mm-ddTHH:MM:SS
event_type: string {TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID,
BEST_ASK}
Requested data event type
interval: int {1... 1440}
Length of time bars
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, refer to A.2.8 IntradayBarRequest in the
Developers Guide for more info on these values
"""
# flush event queue in case previous call errored out
while(self.session.tryNextEvent()):
pass
# Create and fill the request for the historical data
request = self.refDataService.createRequest("IntradayBarRequest")
request.set("security", ticker)
request.set("eventType", event_type)
request.set("interval", interval) # bar interval in minutes
request.set("startDateTime", start_datetime)
request.set("endDateTime", end_datetime)
for name, val in elms:
request.set(name, val)
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
# defaultdict - later convert to pandas
data = defaultdict(dict)
# Process received events
flds = ['open', 'high', 'low', 'close', 'volume', 'numEvents']
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
barTick = (msg.getElement('barData')
.getElement('barTickData'))
for i in range(barTick.numValues()):
for fld in flds:
dt = barTick.getValue(i).getElement(0).getValue()
val = (barTick.getValue(i).getElement(fld).getValue())
data[(fld)][dt] = val
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completly received, so we could exit
break
data = DataFrame(data)
if not data.empty:
data.index = pd.to_datetime(data.index)
data = data[flds]
return data
def stop(self):
"""
Close the blp session
"""
self.session.stop()
def beqs(self, screen_name, screen_type='PRIVATE', group='General', language_id='ENGLISH', asof_date=None):
"""
The beqs() function allows users to retrieve a table of data for a selected equity screen
that was created using the Equity Screening (EQS) function.
See https://data.bloomberglp.com/professional/sites/4/BLPAPI-Core-Developer-Guide.pdf.
Make a Beqs request, get tickers and fields, return long
pandas Dataframe with columns [ticker, field, value]
Parameters
----------
screen_name: string
String corresponding to name of screen
screen_type: 'GLOBAL' or 'PRIVATE'
Indicates that the screen is a Bloomberg-created sample screen (GLOBAL) or
a saved custom screen that users have created (PRIVATE).
group: string
If the screens are organized into groups, allows users to define the name of the group that
contains the screen. If the users use a Bloomberg sample screen,
they must use this parameter to specify the name of the folder in which the screen appears.
For example, group="Investment Banking" when importing the “Cash/Debt Ratio” screen.
language_id: string
Allows users to override the EQS report header language
asof_date: datetime or None
Allows users to backdate the screen, so they can analyze the historical results on the screen
"""
data = self._beqs(screen_name, screen_type, group, language_id, asof_date)
data = | DataFrame(data) | pandas.DataFrame |
from __future__ import annotations
import numbers
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
ArrayLike,
Dtype,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
if TYPE_CHECKING:
import pyarrow
@register_extension_dtype
class BooleanDtype(BaseMaskedDtype):
"""
Extension dtype for boolean data.
.. versionadded:: 1.0.0
.. warning::
BooleanDtype is considered experimental. The implementation and
parts of the API may change without warning.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.BooleanDtype()
BooleanDtype
"""
name = "boolean"
# https://github.com/python/mypy/issues/4125
# error: Signature of "type" incompatible with supertype "BaseMaskedDtype"
@property
def type(self) -> type: # type: ignore[override]
return np.bool_
@property
def kind(self) -> str:
return "b"
@property
def numpy_dtype(self) -> np.dtype:
return np.dtype("bool")
@classmethod
def construct_array_type(cls) -> type_t[BooleanArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return BooleanArray
def __repr__(self) -> str:
return "BooleanDtype"
@property
def _is_boolean(self) -> bool:
return True
@property
def _is_numeric(self) -> bool:
return True
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BooleanArray:
"""
Construct BooleanArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
# TODO should optimize this without going through object array
bool_arr = BooleanArray._from_sequence(np.array(arr))
results.append(bool_arr)
return BooleanArray._concat_same_type(results)
def coerce_to_array(
values, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
mask_values = isna(values_object)
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if (inferred_dtype in integer_like) and not (
np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(len(values), dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if values.ndim != 1:
raise ValueError("values must be a 1D list-like")
if mask.ndim != 1:
raise ValueError("mask must be a 1D list-like")
return values, mask
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. versionadded:: 1.0.0
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> BooleanArray:
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
return BooleanArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls,
strings: list[str],
*,
dtype: Dtype | None = None,
copy: bool = False,
true_values: list[str] | None = None,
false_values: list[str] | None = None,
) -> BooleanArray:
true_values_union = cls._TRUE_VALUES.union(true_values or [])
false_values_union = cls._FALSE_VALUES.union(false_values or [])
def map_string(s):
if isna(s):
return s
elif s in true_values_union:
return True
elif s in false_values_union:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
# For BooleanArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (BooleanArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, BooleanArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_bool_dtype(x.dtype):
m = mask.copy()
return BooleanArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an BooleanDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy)
if is_bool_dtype(dtype):
# astype_nansafe converts np.nan to True
if self._hasna:
raise ValueError("cannot convert float NaN to bool")
else:
return self._data.astype(dtype, copy=copy)
# for integer, error if there are missing values
if is_integer_dtype(dtype) and self._hasna:
raise ValueError("cannot convert NA to integer")
# for float dtype, ensure we use np.nan before casting (numpy cannot
# deal with pd.NA)
na_value = self._na_value
if is_float_dtype(dtype):
na_value = np.nan
# coerce
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = -1
return data
def any(self, *, skipna: bool = True, **kwargs):
"""
Return whether any element is True.
Returns False unless there is at least one element that is True.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be False, as for an empty array.
If `skipna` is False, the result will still be True if there is
at least one element that is True, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.any : Numpy version of this method.
BooleanArray.all : Return whether all elements are True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, False, True]).any()
True
>>> pd.array([True, False, pd.NA]).any()
True
>>> pd.array([False, False, pd.NA]).any()
False
>>> pd.array([], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="boolean").any()
False
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, False, pd.NA]).any(skipna=False)
True
>>> pd.array([False, False, pd.NA]).any(skipna=False)
<NA>
"""
kwargs.pop("axis", None)
nv.validate_any((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, False)
result = values.any()
if skipna:
return result
else:
if result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def all(self, *, skipna: bool = True, **kwargs):
"""
Return whether all elements are True.
Returns True unless there is at least one element that is False.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be True, as for an empty array.
If `skipna` is False, the result will still be False if there is
at least one element that is False, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.all : Numpy version of this method.
BooleanArray.any : Return whether any element is True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, True, pd.NA]).all()
True
>>> pd.array([True, False, pd.NA]).all()
False
>>> pd.array([], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="boolean").all()
True
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, True, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([True, False, pd.NA]).all(skipna=False)
False
"""
kwargs.pop("axis", None)
nv.validate_all((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, True)
result = values.all()
if skipna:
return result
else:
if not result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_booleanarray = isinstance(other, BooleanArray)
other_is_scalar = lib.is_scalar(other)
mask = None
if other_is_booleanarray:
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match to compare")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
elif op.__name__ in {"xor", "rxor"}:
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
# error: Argument 2 to "BooleanArray" has incompatible type "Optional[Any]";
# expected "ndarray"
return BooleanArray(result, mask) # type: ignore[arg-type]
def _cmp_method(self, other, op):
from pandas.arrays import (
FloatingArray,
IntegerArray,
)
if isinstance(other, (IntegerArray, FloatingArray)):
return NotImplemented
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
result = np.zeros_like(self._data)
mask = np.ones_like(self._data)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
result = op(self._data, other)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask, copy=False)
def _arith_method(self, other, op):
mask = None
op_name = op.__name__
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match")
# nans propagate
if mask is None:
mask = self._mask
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | mask
if other is libmissing.NA:
# if other is NA, the result will be all NA and we can't run the
# actual op, so we need to choose the resulting dtype manually
if op_name in {"floordiv", "rfloordiv", "mod", "rmod", "pow", "rpow"}:
dtype = "int8"
else:
dtype = "bool"
result = np.zeros(len(self._data), dtype=dtype)
else:
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
# Avoid DeprecationWarning: In future, it will be an error
# for 'np.bool_' scalars to be interpreted as an index
other = bool(other)
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
return super()._reduce(name, skipna=skipna, **kwargs)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
from pandas.core.arrays import FloatingArray
return FloatingArray(result, mask, copy=False)
elif | is_bool_dtype(result) | pandas.core.dtypes.common.is_bool_dtype |
import numpy as np
import pandas as pd
import pickle
import scipy.sparse
import tensorflow as tf
from typing import Union, List
import os
from tcellmatch.models.models_ffn import ModelBiRnn, ModelSa, ModelConv, ModelLinear, ModelNoseq
from tcellmatch.models.model_inception import ModelInception
from tcellmatch.estimators.additional_metrics import pr_global, pr_label, auc_global, auc_label, \
deviation_global, deviation_label
from tcellmatch.estimators.estimator_base import EstimatorBase
from tcellmatch.estimators.losses import WeightedBinaryCrossentropy
from tcellmatch.estimators.metrics import custom_r2, custom_logr2
class EstimatorFfn(EstimatorBase):
model: tf.keras.Model
model_hyperparam: dict
train_hyperparam: dict
history: dict
evaluations: dict
evaluations_custom: dict
def __init__(
self,
model_name=None
):
EstimatorBase.__init__(self=self)
self.model_name = model_name
self.model_hyperparam = None
self.train_hyperparam = None
self.wbce_weight = None
# Training and evaluation output containers.
self.history = None
self.results_test = None
self.predictions = None
self.evaluations = None
self.evaluations_custom = None
def _out_activation(self, loss) -> str:
""" Decide whether network output activation
This decision is based on the loss function.
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:return: How network output transformed:
- "categorical_crossentropy", "cce": softmax
- "binary_crossentropy", "bce": sigmoid
- "weighted_binary_crossentropy", "wbce": sigmoid
- "mean_squared_error", "mse": linear
- "mean_squared_logarithmic_error", "msle": exp
- "poisson", "pois": exp
"""
if loss.lower() in ["categorical_crossentropy", "cce"]:
return "softmax"
elif loss.lower() in ["binary_crossentropy", "bce"]:
return "sigmoid"
elif loss.lower() in ["weighted_binary_crossentropy", "wbce"]:
return "linear" # Cost function expect logits.
elif loss.lower() in ["mean_squared_error", "mse"]:
return "linear"
elif loss.lower() in ["mean_squared_logarithmic_error", "msle"]:
return "exponential"
elif loss.lower() in ["poisson", "pois"]:
return "exponential"
else:
raise ValueError("Loss %s not recognized." % loss)
def set_wbce_weight(self, weight):
""" Overwrites automatically computed weight that is chosen based on training data.
:param weight: Weight to use.
:return:
"""
self.wbce_weight = weight
def build_bilstm(
self,
topology: List[int],
split: bool = False,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
residual_connection: bool = False,
dropout: float = 0.0,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
optimize_for_gpu: bool = True,
dtype: str = "float32"
):
""" Build a BiLSTM-based feed-forward model to use in the estimator.
:param topology: The depth of each bilstm layer (length of feature vector)
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param optimize_for_gpu: Whether to choose implementation optimized for GPU.
:param dtype:
:return:
"""
self._build_sequential(
model="bilstm",
topology=topology,
split=split,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
residual_connection=residual_connection,
dropout=dropout,
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing,
optimize_for_gpu=optimize_for_gpu,
dtype=dtype
)
def build_bigru(
self,
topology: List[int],
split: bool = False,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
residual_connection: bool = False,
dropout: float = 0.0,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
optimize_for_gpu: bool = True,
dtype: str = "float32"
):
""" Build a BiGRU-based feed-forward model to use in the estimator.
:param topology: The depth of each bilstm layer (length of feature vector)
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.s
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param optimize_for_gpu: Whether to choose implementation optimized for GPU.
:param dtype:
:return:
"""
self._build_sequential(
model="bigru",
topology=topology,
split=split,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
residual_connection=residual_connection,
dropout=dropout,
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing,
optimize_for_gpu=optimize_for_gpu,
dtype=dtype
)
def _build_sequential(
self,
model: str,
topology: List[int],
split: bool,
aa_embedding_dim: Union[None, int],
depth_final_dense: int,
residual_connection: bool,
dropout: float,
optimizer: str,
lr: float,
loss: str,
label_smoothing: float,
optimize_for_gpu: bool,
dtype: str = "float32"
):
""" Build a BiLSTM-based feed-forward model to use in the estimator.
:param topology: The depth of each bilstm layer (length of feature vector)
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param optimize_for_gpu: Whether to choose implementation optimized for GPU.
:param dtype:
:return:
"""
# Save model settings:
self.model_hyperparam = {
"model": model,
"topology": topology,
"split": split,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"residual_connection": residual_connection,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"optimize_for_gpu": optimize_for_gpu,
"dtype": dtype
}
self.model = ModelBiRnn(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
model=model.lower(),
labels_dim=self.y_train.shape[1],
topology=topology,
split=split,
residual_connection=residual_connection,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
out_activation=self._out_activation(loss=loss),
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_self_attention(
self,
attention_size: List[int],
attention_heads: List[int],
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
residual_connection: bool = False,
dropout: float = 0.0,
split: bool = False,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a self-attention-based feed-forward model to use in the estimator.
:param attention_size: hidden size for attention, could be divided by attention_heads.
:param attention_heads: number of heads in attention.
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings:
self.model_hyperparam = {
"model": "selfattention",
"attention_size": attention_size,
"attention_heads": attention_heads,
"split": split,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"residual_connection": residual_connection,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelSa(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
attention_size=attention_size,
attention_heads=attention_heads,
residual_connection=residual_connection,
split=split,
aa_embedding_dim=aa_embedding_dim,
out_activation=self._out_activation(loss=loss),
depth_final_dense=depth_final_dense,
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_conv(
self,
activations: List[str],
filter_widths: List[int],
filters: List[int],
strides: Union[List[Union[int, None]], None] = None,
pool_sizes: Union[List[Union[int, None]], None] = None,
pool_strides: Union[List[Union[int, None]], None] = None,
batch_norm: bool = False,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
dropout: float = 0.0,
split: bool = False,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a self-attention-based feed-forward model to use in the estimator.
:param activations: Activation function. Refer to documentation of tf.keras.layers.Conv2D
:param filter_widths: Number of neurons per filter. Refer to documentation of tf.keras.layers.Conv2D
:param filters: NUmber of filters / output channels. Refer to documentation of tf.keras.layers.Conv2D
:param strides: Stride size for convolution on sequence. Refer to documentation of tf.keras.layers.Conv2D
:param pool_sizes: Size of max-pooling, ie. number of output nodes to pool over.
Refer to documentation of tf.keras.layers.MaxPool2D:pool_size
:param pool_strides: Stride of max-pooling.
Refer to documentation of tf.keras.layers.MaxPool2D:strides
:param batch_norm: Whether to perform batch normalization.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "conv",
"activations": activations,
"filter_widths": filter_widths,
"filters": filters,
"strides": strides,
"pool_sizes": pool_sizes,
"pool_strides": pool_strides,
"batch_norm": batch_norm,
"split": split,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelConv(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
activations=activations,
filter_widths=filter_widths,
filters=filters,
strides=strides,
pool_sizes=pool_sizes,
pool_strides=pool_strides,
batch_norm=batch_norm,
split=split,
aa_embedding_dim=aa_embedding_dim,
out_activation=self._out_activation(loss=loss),
depth_final_dense=depth_final_dense,
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_inception(
self,
n_filters_1x1: List[int],
n_filters_out: List[int],
n_hidden: int = 10,
residual_connection: bool = True,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
final_pool: str = "average",
dropout: float = 0.0,
split: bool = False,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a self-attention-based feed-forward model to use in the estimator.
:param n_filters_1x1:
:param n_filters_out:
:param n_filters_final:
:param n_hidden:
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param final_pool:
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "inception",
"n_filters_1x1": n_filters_1x1,
"n_filters_out": n_filters_out,
"n_hidden": n_hidden,
"split": split,
"final_pool": final_pool,
"residual_connection": residual_connection,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"dtype": dtype
}
# Build model.
self.model = ModelInception(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
n_filters_1x1=n_filters_1x1,
n_filters_out=n_filters_out,
n_hidden=n_hidden,
split=split,
final_pool=final_pool,
residual_connection=residual_connection,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
out_activation=self._out_activation(loss=loss),
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_linear(
self,
aa_embedding_dim: Union[None, int] = None,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a linear feed-forward model to use in the estimator.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "linear",
"aa_embedding_dim": aa_embedding_dim,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelLinear(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
aa_embedding_dim=aa_embedding_dim,
out_activation=self._out_activation(loss=loss)
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_noseq(
self,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a dense feed-forward model to use in the estimator that does not include the sequence data.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "noseq",
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelNoseq(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
out_activation=self._out_activation(loss=loss)
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def _compile_model(
self,
optimizer,
lr,
loss,
label_smoothing: float = 0
):
""" Shared model building code across model classes.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for multiple boolean binding events with categorical crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:return:
"""
# Instantiate loss.
if loss.lower() in ["categorical_crossentropy", "cce"]:
tf_loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=False,
label_smoothing=label_smoothing
)
metric_class = "categorical_crossentropy"
elif loss.lower() in ["binary_crossentropy", "bce"]:
tf_loss = tf.keras.losses.BinaryCrossentropy(
from_logits=False,
label_smoothing=label_smoothing
)
metric_class = "binary_crossentropy"
elif loss.lower() in ["weighted_binary_crossentropy", "wbce"]:
tf_loss = WeightedBinaryCrossentropy(
weight_positives=1./self.frac_positives - 1. if self.wbce_weight is None else self.wbce_weight,
label_smoothing=label_smoothing
)
metric_class = "binary_crossentropy"
elif loss.lower() in ["mean_squared_error", "mse"]:
tf_loss = tf.keras.losses.MeanSquaredError()
metric_class = "real"
elif loss.lower() in ["mean_squared_logarithmic_error", "msle"]:
tf_loss = tf.keras.losses.MeanSquaredLogarithmicError()
metric_class = "real"
elif loss.lower() in ["poisson", "pois"]:
tf_loss = tf.keras.losses.Poisson() # only in tf>=1.14.1
metric_class = "real"
else:
raise ValueError("Loss %s not recognized." % loss)
# Assemble metrics.
if metric_class == "categorical_crossentropy":
metrics = [
tf.keras.metrics.CategoricalAccuracy(name="keras_acc"),
tf.keras.metrics.Precision(name="keras_precision"),
tf.keras.metrics.Recall(name="keras_recall"),
tf.keras.metrics.AUC(name="keras_auc"),
tf.keras.metrics.FalseNegatives(name="keras_fn"),
tf.keras.metrics.FalsePositives(name="keras_fp"),
tf.keras.metrics.TrueNegatives(name="keras_tn"),
tf.keras.metrics.TruePositives(name="keras_tp"),
tf.keras.metrics.CategoricalCrossentropy(name="keras_ce", from_logits=False, label_smoothing=0)
]
elif metric_class == "binary_crossentropy":
metrics = [
tf.keras.metrics.BinaryAccuracy(name="keras_acc"),
tf.keras.metrics.Precision(name="keras_precision"),
tf.keras.metrics.Recall(name="keras_recall"),
tf.keras.metrics.AUC(name="keras_auc"),
tf.keras.metrics.FalseNegatives(name="keras_fn"),
tf.keras.metrics.FalsePositives(name="keras_fp"),
tf.keras.metrics.TrueNegatives(name="keras_tn"),
tf.keras.metrics.TruePositives(name="keras_tp"),
tf.keras.metrics.BinaryCrossentropy(name="keras_ce", from_logits=False, label_smoothing=0)
]
elif metric_class == "real":
metrics = [
tf.keras.metrics.MeanSquaredError(name="keras_mse"),
tf.keras.metrics.RootMeanSquaredError(name="keras_rmse"),
tf.keras.metrics.MeanSquaredLogarithmicError(name="keras_msle"),
tf.keras.metrics.Poisson(name="keras_poisson"),
tf.keras.metrics.CosineSimilarity(name="keras_cosine"),
custom_r2,
custom_logr2
]
else:
assert False
# Build optimizer:
if optimizer.lower() == "adam":
tf.keras.optimizers.Adam(lr=lr)
else:
raise ValueError("optimizer %s not recognized" % optimizer)
# Compile model.
self.model.training_model.compile(
loss=tf_loss,
optimizer=optimizer,
metrics=metrics
)
def train(
self,
epochs: int = 1000,
batch_size: int = 128,
max_steps_per_epoch: int = 100,
validation_split=0.1,
validation_batch_size: int = 256,
max_validation_steps: int = 100,
patience: int = 20,
lr_schedule_min_lr: float = 1e-5,
lr_schedule_factor: float = 0.2,
lr_schedule_patience: int = 5,
log_dir: Union[str, None] = None,
use_existing_eval_partition: bool = False
):
""" Train model.
Uses validation loss and maximum number of epochs as termination criteria.
:param epochs: refer to tf.keras.models.Model.fit() documentation
:param steps_per_epoch: refer to tf.keras.models.Model.fit() documentation
:param batch_size: refer to tf.keras.models.Model.fit() documentation
:param validation_split: refer to tf.keras.models.Model.fit() documentation
:param validation_batch_size: Number of validation data observations to evaluate evaluation metrics on.
:param validation_steps: refer to tf.keras.models.Model.fit() documentation
:param patience: refer to tf.keras.models.Model.fit() documentation
:param lr_schedule_min_lr: Minimum learning rate for learning rate reduction schedule.
:param lr_schedule_factor: Factor to reduce learning rate by within learning rate reduction schedule
when plateu is reached.
:param lr_schedule_patience: Patience for learning rate reduction in learning rate reduction schedule.
:param log_dir: Directory to save tensorboard callback to. Disabled if None.
:param use_existing_eval_partition: Whether to use existing training-evalutation partition of data. The index
vectors are expected in self.idx_train and self.idx_eval.
:return:
"""
# Save training settings to allow model restoring.
self.train_hyperparam = {
"epochs": epochs,
"batch_size": batch_size,
"validation_split": validation_split,
"validation_batch_size": validation_batch_size,
"patience": patience,
"lr_schedule_min_lr": lr_schedule_min_lr,
"lr_schedule_factor": lr_schedule_factor,
"lr_schedule_patience": lr_schedule_patience,
"log_dir": log_dir
}
# Set callbacks.
cbs = [
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=patience,
restore_best_weights=True
),
tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=lr_schedule_factor,
patience=lr_schedule_patience,
min_lr=lr_schedule_min_lr
)
]
if log_dir is not None:
cbs.append(tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch'
))
# Split data into training and evaluation.
if use_existing_eval_partition:
idx_val = np.array([self.idx_train_val.tolist().index(x)
for x in self.idx_train_val if x in self.idx_val])
idx_train = np.array([self.idx_train_val.tolist().index(x)
for x in self.idx_train_val if x in self.idx_train])
else:
# Split training data into training and evaluation.
# Perform this splitting based on clonotypes.
clones = np.unique(self.clone_train)
clones_eval = clones[np.random.choice(
a=np.arange(0, clones.shape[0]),
size=round(clones.shape[0] * validation_split),
replace=False
)]
clones_train = np.array([x for x in clones if x not in clones_eval])
# Collect observations by clone partition:
idx_val = np.where([x in clones_eval for x in self.clone_train])[0]
idx_train = np.where([x in clones_train for x in self.clone_train])[0]
# Save partitions in terms of original indexing.
self.idx_train = self.idx_train_val[idx_train]
self.idx_val = self.idx_train_val[idx_val]
# Assert that split is exclusive and complete:
assert len(set(clones_eval).intersection(set(clones_train))) == 0, \
"ERROR: train-test assignment was not exclusive on level of clones"
assert len(set(idx_val).intersection(set(idx_train))) == 0, \
"ERROR: train-test assignment was not exclusive on level of cells"
assert len(clones_eval) + len(clones_train) == len(clones), \
"ERROR: train-test split was not complete on the level of clones"
assert len(idx_val) + len(idx_train) == len(self.clone_train), \
"ERROR: train-test split was not complete on the level of cells"
print("Number of observations in evaluation data: %i" % len(idx_val))
print("Number of observations in training data: %i" % len(idx_train))
# Build Datasets for each training and evaluation data to feed iterators for each to model fitting.
train_dataset = tf.data.Dataset.from_tensor_slices((
(self.x_train[idx_train], self.covariates_train[idx_train]),
self.y_train[idx_train]
#self.sample_weight_train[idx_train]
)).shuffle(buffer_size=len(idx_train), reshuffle_each_iteration=True).\
repeat().batch(batch_size).prefetch(1)
eval_dataset = tf.data.Dataset.from_tensor_slices((
(self.x_train[idx_val], self.covariates_train[idx_val]),
self.y_train[idx_val]
)).shuffle(buffer_size=len(idx_val), reshuffle_each_iteration=True).\
repeat().batch(validation_batch_size).prefetch(1)
steps_per_epoch = min(max(len(idx_train) // batch_size, 1), max_steps_per_epoch)
validation_steps = min(max(len(idx_val) // validation_batch_size, 1), max_validation_steps)
# Fit model and save summary of fitting.
if len(self.x_train.shape) != 4:
raise ValueError("input shape should be [?,1,pos,feature]")
self.history = self.model.training_model.fit(
x=train_dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=validation_steps,
callbacks=cbs,
verbose=2
).history
@property
def idx_train_in_train_val(self):
return np.intersect1d(self.idx_train_val, self.idx_train, return_indices=True)[1]
@property
def idx_val_in_train_val(self):
return np.intersect1d(self.idx_train_val, self.idx_val, return_indices=True)[1]
def evaluate(
self,
batch_size: int = 1024
):
""" Evaluate loss on test data.
:param batch_size: Batch size for evaluation.
:return:
"""
results_test = self.evaluate_any(
x=self.x_test,
covar=self.covariates_test,
y=self.y_test,
batch_size=batch_size
)
results_val = self.evaluate_any(
x=self.x_train[self.idx_val_in_train_val],
covar=self.covariates_train[self.idx_val_in_train_val],
y=self.y_train[self.idx_val_in_train_val],
batch_size=batch_size,
)
results_train = self.evaluate_any(
x=self.x_train[self.idx_train_in_train_val],
covar=self.covariates_train[self.idx_train_in_train_val],
y=self.y_train[self.idx_train_in_train_val],
batch_size=batch_size,
)
self.evaluations = {
"test": results_test,
"val": results_val,
"train": results_train
}
def evaluate_any(
self,
x,
covar,
y,
batch_size: int = 1024,
):
""" Evaluate loss on supplied data.
:param batch_size: Batch size for evaluation.
:return: Dictionary of metrics
"""
results = self.model.training_model.evaluate(
x=(x, covar),
y=y,
batch_size=batch_size,
verbose=0
)
return dict(zip(self.model.training_model.metrics_names, results))
def evaluate_custom(
self,
classification_metrics: bool = True,
regression_metrics: bool = False,
transform: str = None
):
""" Obtain custom evaluation metrics for classification task on train, val and test data.
"""
results_test = self.evaluate_custom_any(
yhat=self.predict_any(x=self.x_test, covar=self.covariates_test, batch_size=1024),
yobs=self.y_test,
nc=self.nc_test,
labels=np.asarray(self.peptide_seqs_test),
labels_unique=self.peptide_seqs_unique,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform
)
results_val = self.evaluate_custom_any(
yhat=self.predict_any(
x=self.x_train[self.idx_val_in_train_val],
covar=self.covariates_train[self.idx_val_in_train_val],
batch_size=1024
),
yobs=self.y_train[self.idx_val_in_train_val],
nc=self.nc_train[self.idx_val_in_train_val] if self.nc_train is not None else None,
labels=np.asarray(self.peptide_seqs_train)[self.idx_val_in_train_val] \
if self.peptide_seqs_train is not None else None,
labels_unique=self.peptide_seqs_unique,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform
)
results_train = self.evaluate_custom_any(
yhat=self.predict_any(
x=self.x_train[self.idx_train_in_train_val],
covar=self.covariates_train[self.idx_train_in_train_val],
batch_size=1024
),
yobs=self.y_train[self.idx_train_in_train_val],
nc=self.nc_train[self.idx_train_in_train_val] if self.nc_train is not None else None,
labels=np.asarray(self.peptide_seqs_train)[self.idx_train_in_train_val] \
if self.peptide_seqs_train is not None else None,
labels_unique=self.peptide_seqs_unique,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform
)
self.evaluations_custom = {
"test": results_test,
"val": results_val,
"train": results_train
}
def _evaluate_custom_any(
self,
yhat,
yobs,
nc,
classification_metrics: bool,
regression_metrics: bool,
labels=None,
labels_unique=None,
transform_flavour: str = None
):
""" Obtain custom evaluation metrics for classification task on any data.
"""
metrics_global = {}
metrics_local = {}
if regression_metrics:
mse_global, msle_global, r2_global, r2log_global = deviation_global(
y_hat=[yhat], y_obs=[yobs]
)
mse_label, msle_label, r2_label, r2log_label = deviation_label(
y_hat=[yhat], y_obs=[yobs], labels=[labels], labels_unique=labels_unique
)
metrics_global.update({
"mse": mse_global,
"msle": msle_global,
"r2": r2_global,
"r2log": r2log_global
})
metrics_local.update({
"mse": mse_label,
"msle": msle_label,
"r2": r2_label,
"r2log": r2log_label
})
if classification_metrics:
if transform_flavour is not None:
yhat, yobs = self.transform_predictions_any(
yhat=yhat,
yobs=yobs,
nc=nc,
flavour=transform_flavour
)
score_auc_global = auc_global(y_hat=[yhat], y_obs=[yobs])
prec_global, rec_global, tp_global, tn_global, fp_global, fn_global = pr_global(
y_hat=[yhat], y_obs=[yobs]
)
score_auc_label = auc_label(
y_hat=[yhat], y_obs=[yobs], labels=[labels], labels_unique=labels_unique
)
prec_label, rec_label, tp_label, tn_label, fp_label, fn_label = pr_label(
y_hat=[yhat], y_obs=[yobs], labels=[labels], labels_unique=labels_unique
)
metrics_global.update({
"auc": score_auc_global,
"prec": prec_global,
"rec": rec_global,
"tp": tp_global,
"tn": tn_global,
"fp": fp_global,
"fn": fn_global
})
metrics_local.update({
"auc": score_auc_label,
"prec": prec_label,
"rec": rec_label,
"tp": tp_label,
"tn": tn_label,
"fp": fp_label,
"fn": fn_label
})
return {
"global": metrics_global,
"local": metrics_local
}
def evaluate_custom_any(
self,
yhat,
yobs,
nc,
labels=None,
labels_unique=None,
classification_metrics: bool = True,
regression_metrics: bool = False,
transform_flavour: str = None
):
"""
Obtain custom evaluation metrics for classification task.
Ignores labels as samples are not structured by labels (ie one sample contains observations on all labels.
:param yhat:
:param yobs:
:param nc:
:param labels:
:param transform_flavour:
:return:
"""
return self._evaluate_custom_any(
yhat=yhat,
yobs=yobs,
nc=nc,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform_flavour,
labels=None,
labels_unique=None
)
def predict(
self,
batch_size: int = 128
):
""" Predict labels on test data.
:param batch_size: Batch size for evaluation.
:return:
"""
self.predictions = self.model.training_model.predict(
x=(self.x_test, self.covariates_test),
batch_size=batch_size
)
def predict_any(
self,
x,
covar,
batch_size: int = 128
):
""" Predict labels on any data.
:param batch_size: Batch size for evaluation.
:return:
"""
return self.model.training_model.predict(
x=(x, covar),
batch_size=batch_size,
verbose=0
)
def transform_predictions_any(
self,
yhat,
yobs,
nc,
flavour="10x_cd8_v1"
):
""" Transform model predictions and ground truth labels on test data.
Transform predictions and self.y_test
- "10x_cd8" Use this setting to transform the real valued output of a network trained with MSE loss
into probability space by using the bound/unbound classifier published with the 10x data set:
An antigen is bound if it has (1) at least 10 counts and (2) at least 5 times more counts
than the highest observed negative control and (3) is the highest count pMHC.
Requires negative controls to be defined during reading.
:param flavour: Type of transform to use, see function description.
:return:
"""
if flavour == "10x_cd8_v1":
if self.model_hyperparam["loss"] not in ["mse", "msle", "poisson"]:
raise ValueError("Do not use transform_predictions with flavour=='10x_cd8_v1' on a model fit "
"with a loss that is not mse, msle or poisson.")
if nc.shape[1] == 0:
raise ValueError("Negative controls were not set, supply these during data reading.")
predictions_new = np.zeros(yhat.shape)
idx_bound_predictions = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(nc[i, :])
# At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(yhat)]
for i, j in enumerate(idx_bound_predictions):
if len(j) > 0:
predictions_new[i, j[-1]] = 1. # Chose last label if two labels are called.
yhat = predictions_new
y_test_new = np.zeros(yobs.shape)
idx_bound_y = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(nc[i, :])
# At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(yobs)]
for i, j in enumerate(idx_bound_y):
if len(j) > 0:
y_test_new[i, j[-1]] = 1. # Chose last label if two labels are called.
yobs = y_test_new
else:
raise ValueError("flavour %s not recognized" % flavour)
return yhat, yobs
def transform_predictions(
self,
flavour="10x_cd8_v1"
):
""" Transform model predictions and ground truth labels on test data.
Transform predictions and self.y_test
- "10x_cd8" Use this setting to transform the real valued output of a network trained with MSE loss
into probability space by using the bound/unbound classifier published with the 10x data set:
An antigen is bound if it has (1) at least 10 counts and (2) at least 5 times more counts
than the highest observed negative control and (3) is the highest count pMHC.
Requires negative controls to be defined during reading.
:param flavour: Type of transform to use, see function description.
:return:
"""
if flavour == "10x_cd8_v1":
if self.model_hyperparam["loss"] not in ["mse", "msle", "poisson"]:
raise ValueError("Do not use transform_predictions with flavour=='10x_cd8_v1' on a model fit "
"with a loss that is not mse, msle or poisson.")
if self.nc_test.shape[1] == 0:
raise ValueError("Negative controls were not set, supply these during data reading.")
predictions_new = np.zeros(self.predictions.shape)
idx_bound_predictions = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(self.nc_test[i, :]) # At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(self.predictions)]
for i, j in enumerate(idx_bound_predictions):
if len(j) > 0:
predictions_new[i, j[-1]] = 1. # Chose last label if two labels are called.
self.predictions = predictions_new
y_test_new = np.zeros(self.y_test.shape)
idx_bound_y = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(self.nc_test[i, :]) # At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(self.y_test)]
for i, j in enumerate(idx_bound_y):
if len(j) > 0:
y_test_new[i, j[-1]] = 1. # Chose last label if two labels are called.
self.y_test = y_test_new
else:
raise ValueError("flavour %s not recognized" % flavour)
def save_results(
self,
fn
):
""" Save training history, test loss and test predictions.
Will generate the following files:
- fn+"history.pkl": training history dictionary
- fn+"evaluations.npy": loss on test data
- fn+"evaluations_custom.npy": loss on test data
:param self:
:param fn: Path and file name prefix to write to.
:param save_labels: Whether to save ground truth labels. Use this for saving disk space.
:return:
"""
with open(fn + "_history.pkl", 'wb') as f:
pickle.dump(self.history, f)
with open(fn + "_evaluations.pkl", 'wb') as f:
pickle.dump(self.evaluations, f)
with open(fn + "_evaluations_custom.pkl", 'wb') as f:
pickle.dump(self.evaluations_custom, f)
if self.label_ids is not None:
| pd.DataFrame({"label": self.label_ids}) | pandas.DataFrame |
from stockscore.data import Stocks, return_top
import pandas as pd
import pytest
symbols = ["FB", "AAPL", "AMZN", "NFLX", "GOOGL"]
stocks = Stocks(symbols)
tdata = {
"Score": [6, 5, 4, 3, 2],
"Value Score": [1, 2, 3, 1, 0],
"Growth Score": [3, 2, 0, 1, 2],
"Momentum Score": [2, 1, 1, 1, 0],
}
tscores = | pd.DataFrame(tdata, index=symbols) | pandas.DataFrame |
from xml.parsers.expat import model
import numpy as np
import pandas as pd
import plotly.express as px
import streamlit as st
import os
from joblib import dump, load
import sklearn
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import pickle
# Titulo do app
st.write("""
# Prevendo ocorrência de doenças cardio vasculares
Aplicativo que utiliza Machine Learning para prever possíveis ocorrências de doenças do coração. \n
Legenda dos atributos:\n
Gênero: 1 - Feminino 2 - Masculino /
Colesterol: 1 - Normal 2 - Acima do normal 3 - Bem acima do normal /
Glicose: 1 - Normal 2 - Acima do normal 3 - Bem acima do normal /
Fumante: 0 - Não 1 - Sim /
Consumo de bebiba Alcoólica: 0 - Não 1 - Sim /
Praticante de atividade física: 0 - Não 1 - Sim
""")
# Cabeçalho
st.subheader("Características do Paciente")
# Nome do paciente
user_input = st.sidebar.text_input("Digite seu nome")
st.write("Paciente: ", user_input)
st.image('cardio_img.png')
# Dados do paciente
def user_input_features():
with st.form('user_input_variebles'):
gender = st.sidebar.selectbox("Gênero", [1, 2], 1)
age = st.sidebar.slider("Idade", 15, 80, 20)
height = st.sidebar.slider("Altura", 0.5, 2.5, 1.70)
weight = st.sidebar.slider("Peso", 10, 200, 60)
ap_hi = st.sidebar.slider("Pressão sistólica", 0, 500, 100)
ap_lo = st.sidebar.slider("Pressão diastólica", 0, 500, 100)
cholesterol = st.sidebar.selectbox("Colesterol", [1,2 ,3], 1)
gluc = st.sidebar.selectbox("Glicose",[1,2, 3] ,1)
smoke = st.sidebar.selectbox("Fumante", [0, 1], 0)
alco = st.sidebar.selectbox("Bebida Alcoólica", [0 ,1], 0)
active = st.sidebar.selectbox("Praticante de atividade física", [0, 1], 0)
st.form_submit_button('Fazer previsão')
user_data = {"Gênero": gender,
"Idade": age,
"Altura": height,
"Peso": weight,
"Pressão sistólica": ap_hi,
"Pressão diastólica": ap_lo,
"Colesterol": cholesterol,
"Glicose": gluc,
"Fumante": smoke,
"Bebida Alcoólica": alco,
"Praticante de atividade física": active}
features = pd.DataFrame(user_data, index = [0])
return features
user_input_varieables = user_input_features()
# Carregando os dados
cardio = | pd.read_csv('cardio_app2.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# (c) <NAME>, see LICENSE.rst.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from ciso8601 import parse_datetime
from lxml import etree
from pytz import FixedOffset
import numpy as np
import pandas as pd
import xmltodict
from tslib.readers.ts_reader import TimeSeriesReader
logger = logging.getLogger(__name__)
NS = 'http://www.wldelft.nl/fews/PI'
TIMEZONE = '{%s}timeZone' % NS
SERIES = '{%s}series' % NS
EVENT = '{%s}event' % NS
COMMENT = '{%s}comment' % NS
def fast_iterparse(source, **kwargs):
""" A version of lxml.etree.iterparse that cleans up its own memory usage
See also:
https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
"""
for event, elem in etree.iterparse(source, **kwargs):
yield event, elem
# It's safe to call clear here because no descendants will be accessed
elem.clear()
# Also eliminate now-empty references from the root node to `elem`
while elem.getprevious() is not None:
del elem.getparent()[0]
class PiXmlReader(TimeSeriesReader):
"""Read a PI XML file.
See: https://publicwiki.deltares.nl/display/FEWSDOC/Delft-
Fews+Published+Interface+timeseries+Format+(PI)+Import
Time series are returned as a Pandas dataframe and a
dictionary containing metadata. An effort has been
made to achieve a fair balance between speed and
resource consumption.
"""
def __init__(self, source):
"""docstring"""
self.source = source
def get_tz(self):
"""Return the offset in hours from UTC as a float.
Note that pi xml deals with fixed offsets only. See:
http://fews.wldelft.nl/schemas/version1.0/pi-schemas/pi_sharedtypes.xsd
A return value of None means that no `timeZone` element is present.
An empty `timeZone` will return the default value, i.e. `0.0`.
"""
for _, element in etree.iterparse(self.source):
if element.tag == TIMEZONE:
return float(element.text or 0.0)
def get_series(self):
"""Return a (metadata, dataframe) tuple.
Metadata is returned as a dict:
https://github.com/martinblech/xmltodict
http://www.xml.com/pub/a/2006/05/31/⏎
converting-between-xml-and-json.html
The dataframe's DatetimeIndex has the same time zone (offset)
as the PI XML, which is generally not UTC. If you need UTC,
the returned data frame can be converted as follows:
df.tz_convert('UTC', copy=False)
Caveat: the PI XML timeZone element is optional. In that
case, the DatetimeIndex has no time zone information.
"""
for _, series in etree.iterparse(self.source, tag=SERIES):
header = series[0]
metadata = xmltodict.parse(etree.tostring(header))
missVal = metadata['header']['missVal']
datetimes = []
values = []
flags = []
flag_sources = []
comments = []
users = []
iterator = series.iterchildren(tag=EVENT)
for event in iterator:
d = event.attrib['date']
t = event.attrib['time']
datetimes.append(parse_datetime("{}T{}".format(d, t)))
value = event.attrib['value']
values.append(value if value != missVal else "NaN")
flags.append(event.attrib.get('flag', None))
flag_sources.append(event.attrib.get('flagSource', None))
comments.append(event.attrib.get('comment', None))
users.append(event.attrib.get('user', None))
if values:
# Construct a pandas DataFrame from the events.
# NB: np.float is shorthand for np.float64. This matches the
# "double" type of the "value" attribute in the XML Schema
# (an IEEE double-precision 64-bit floating-point number).
data = {'value': np.array(values, np.float)}
# The "flag" attribute in the XML Schema is of type "int".
# This corresponds to a signed 32-bit integer. NB: this
# is not the same as the "integer" type, which is an
# infinite set. TODO: should we bother casting or
# leave flags as strings?
if any(flags):
data['flag'] = flags
# The other attributes are of type "string".
if any(flag_sources):
data['flagSource'] = flag_sources
if any(comments):
data['comment'] = comments
if any(users):
data['user'] = users
dataframe = pd.DataFrame(data=data, index=datetimes)
if series.getparent()[0].tag == TIMEZONE:
offset = float(series.getparent()[0].text or 0)
tz_localize(dataframe, offset, copy=False)
else:
# No events. The `minOccurs` attribute of the
# `event` element is 0, so this valid XML.
dataframe = None
if series[-1].tag == COMMENT:
comment = series[-1]
if comment.text is not None:
metadata[u'comment'] = unicode(comment.text)
series.clear()
yield metadata, dataframe
def bulk_get_series(self, chunk_size=250000):
"""Return a (metadata, dataframe) tuple.
Metadata is returned as a dict:
https://github.com/martinblech/xmltodict
http://www.xml.com/pub/a/2006/05/31/⏎
converting-between-xml-and-json.html
The dataframe's DatetimeIndex has the same time zone (offset)
as the PI XML, which is generally not UTC. If you need UTC,
the returned data frame can be converted as follows:
df.tz_convert('UTC', copy=False)
Caveat: the PI XML timeZone element is optional. In that
case, the DatetimeIndex has no time zone information.
"""
duplicate_check_set = set()
meta_data = []
# initialize a counter to index into the 'bulk_data' array
i = 0
# by default, do not localize
tz_offset = None
for series_i, (_, series) in enumerate(
fast_iterparse(self.source, tag=SERIES)):
header = xmltodict.parse(etree.tostring(series[0]))['header']
series_code = get_code(header)
miss_val = header['missVal']
location_code = header['locationId']
if (series_code, location_code) in duplicate_check_set:
logger.info(
'PiXML import skipped an entry because of duplicate for '
'timeseries_code "%s", location_code "%s" and file "%s".',
series_code, location_code, self.source
)
continue
duplicate_check_set.add((series_code, location_code))
# get the timezone offset, only for the first entry
if series_i == 0 and series.getparent()[0].tag == TIMEZONE:
tz_offset = FixedOffset(
float(series.getparent()[0].text or 0) * 60)
if series[-1].tag == COMMENT:
comment = series[-1].text
else:
comment = None
meta_data.append({
"code": series_code,
"location_code": location_code,
"pru": header['parameterId'],
"unit": header.get('units', None),
"name": header['parameterId'],
"location_name": (header.get('stationName', '') or '')[:80],
"lat": float(header.get('lat', np.nan)),
"lon": float(header.get('lon', np.nan)),
"comment": comment,
})
for event in series.iterchildren(tag=EVENT):
if i == 0:
# the first in this chunk: init the bulk_data
# NB: np.float is shorthand for np.float64. This matches the
# "double" type of the "value" attribute in the XML Schema
# (an IEEE double-precision 64-bit floating-point number).
bulk_data = {
"code": np.empty(chunk_size, dtype=object),
"comment": np.empty(chunk_size, dtype=object),
"timestamp": np.empty(chunk_size,
dtype='datetime64[ms]'),
"flag_source": np.empty(chunk_size, dtype=object),
# use float64 to allow np.nan values
"flag": np.empty(chunk_size, dtype=np.float64),
"location_code": np.empty(chunk_size, dtype=object),
"user": np.empty(chunk_size, dtype=object),
"value": np.empty(chunk_size, dtype=np.float64),
}
# check if we need the leftover metadata from the prev iter
if len(meta_data) > 0:
if meta_data[0]['code'] != series_code:
meta_data = meta_data[1:]
# create timestamp and code rows, these will form the index
d = event.attrib['date']
t = event.attrib['time']
bulk_data["timestamp"][i] = "{}T{}".format(d, t)
bulk_data["code"][i] = series_code
bulk_data["location_code"][i] = location_code
# add the data
value = event.attrib['value']
bulk_data["value"][i] = value if value != miss_val else np.nan
bulk_data["flag"][i] = event.attrib.get('flag', np.nan)
bulk_data["flag_source"][i] = \
event.attrib.get('flagSource', None)
bulk_data["comment"][i] = event.attrib.get('comment', None)
bulk_data["user"][i] = event.attrib.get('user', None)
i += 1
if i >= chunk_size:
i = 0 # for next iter
# Construct a pandas DataFrame from the events.
dataframe = dataframe_from_bulk(bulk_data, tz_offset)
yield | pd.DataFrame(meta_data) | pandas.DataFrame |
"""Dynamic file checks."""
from dataclasses import dataclass
from datetime import date, timedelta
from typing import Dict, Set
import re
import pandas as pd
import numpy as np
from .errors import ValidationFailure, APIDataFetchError
from .datafetcher import get_geo_signal_combos, threaded_api_calls
from .utils import relative_difference_by_min, TimeWindow, lag_converter
class DynamicValidator:
"""Class for validation of static properties of individual datasets."""
@dataclass
class Parameters:
"""Configuration parameters."""
# data source name, one of
# https://cmu-delphi.github.io/delphi-epidata/api/covidcast_signals.html
data_source: str
# span of time over which to perform checks
time_window: TimeWindow
# date that this df_to_test was generated; typically 1 day after the last date in df_to_test
generation_date: date
# number of days back to perform sanity checks, starting from the last date appearing in
# df_to_test
max_check_lookbehind: timedelta
# names of signals that are smoothed (7-day avg, etc)
smoothed_signals: Set[str]
# maximum number of days behind do we expect each signal to be
max_expected_lag: Dict[str, int]
# minimum number of days behind do we expect each signal to be
min_expected_lag: Dict[str, int]
def __init__(self, params):
"""
Initialize object and set parameters.
Arguments:
- params: dictionary of user settings; if empty, defaults will be used
"""
common_params = params["common"]
dynamic_params = params.get("dynamic", dict())
self.test_mode = dynamic_params.get("test_mode", False)
self.params = self.Parameters(
data_source=common_params["data_source"],
time_window=TimeWindow.from_params(common_params["end_date"],
common_params["span_length"]),
generation_date=date.today(),
max_check_lookbehind=timedelta(
days=max(7, dynamic_params.get("ref_window_size", 14))),
smoothed_signals=set(dynamic_params.get("smoothed_signals", [])),
min_expected_lag=lag_converter(common_params.get(
"min_expected_lag", dict())),
max_expected_lag=lag_converter(common_params.get(
"max_expected_lag", dict()))
)
def validate(self, all_frames, report):
"""
Perform all checks over the combined data set from all files.
Parameters
----------
all_frames: pd.DataFrame
combined data from all input files
report: ValidationReport
report to which the results of these checks will be added
"""
# Get 14 days prior to the earliest list date
outlier_lookbehind = timedelta(days=14)
# Get all expected combinations of geo_type and signal.
geo_signal_combos = get_geo_signal_combos(self.params.data_source)
all_api_df = threaded_api_calls(self.params.data_source,
self.params.time_window.start_date - outlier_lookbehind,
self.params.time_window.end_date,
geo_signal_combos)
# Keeps script from checking all files in a test run.
kroc = 0
# Comparison checks
# Run checks for recent dates in each geo-sig combo vs semirecent (previous
# week) API data.
for geo_type, signal_type in geo_signal_combos:
geo_sig_df = all_frames.query(
"geo_type == @geo_type & signal == @signal_type")
# Drop unused columns.
geo_sig_df.drop(columns=["geo_type", "signal"])
report.increment_total_checks()
if geo_sig_df.empty:
report.add_raised_error(ValidationFailure(check_name="check_missing_geo_sig_combo",
geo_type=geo_type,
signal=signal_type,
message="file with geo_type-signal combo "
"does not exist"))
continue
max_date = geo_sig_df["time_value"].max()
self.check_min_allowed_max_date(
max_date, geo_type, signal_type, report)
self.check_max_allowed_max_date(
max_date, geo_type, signal_type, report)
# Get relevant reference data from API dictionary.
api_df_or_error = all_api_df[(geo_type, signal_type)]
report.increment_total_checks()
if isinstance(api_df_or_error, APIDataFetchError):
report.add_raised_error(api_df_or_error)
continue
# Only do outlier check for cases and deaths signals
if (signal_type in ["confirmed_7dav_cumulative_num", "confirmed_7dav_incidence_num",
"confirmed_cumulative_num", "confirmed_incidence_num",
"deaths_7dav_cumulative_num",
"deaths_cumulative_num"]):
# Outlier dataframe
earliest_available_date = geo_sig_df["time_value"].min()
source_df = geo_sig_df.query(
'time_value <= @self.params.time_window.end_date & '
'time_value >= @self.params.time_window.start_date'
)
# These variables are interpolated into the call to `api_df_or_error.query()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
outlier_start_date = earliest_available_date - outlier_lookbehind
outlier_end_date = earliest_available_date - timedelta(days=1)
outlier_api_df = api_df_or_error.query(
'time_value <= @outlier_end_date & time_value >= @outlier_start_date')
# pylint: enable=unused-variable
self.check_positive_negative_spikes(
source_df, outlier_api_df, geo_type, signal_type, report)
# Check data from a group of dates against recent (previous 7 days,
# by default) data from the API.
for checking_date in self.params.time_window.date_seq:
create_dfs_or_error = self.create_dfs(
geo_sig_df, api_df_or_error, checking_date, geo_type, signal_type, report)
if not create_dfs_or_error:
continue
recent_df, reference_api_df = create_dfs_or_error
self.check_max_date_vs_reference(
recent_df, reference_api_df, checking_date, geo_type, signal_type, report)
self.check_rapid_change_num_rows(
recent_df, reference_api_df, checking_date, geo_type, signal_type, report)
if not re.search("cumulative", signal_type):
self.check_avg_val_vs_reference(
recent_df, reference_api_df, checking_date, geo_type,
signal_type, report)
# Keeps script from checking all files in a test run.
kroc += 1
if self.test_mode and kroc == 2:
break
def check_min_allowed_max_date(self, max_date, geo_type, signal_type, report):
"""Check if time since data was generated is reasonable or too long ago.
The most recent data should be at least max_expected_lag from generation date
Arguments:
- max_date: date of most recent data to be validated; datetime format.
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
min_thres = timedelta(days = self.params.max_expected_lag.get(
signal_type, self.params.max_expected_lag.get('all', 10)))
if max_date < self.params.generation_date - min_thres:
report.add_raised_error(
ValidationFailure("check_min_max_date",
geo_type=geo_type,
signal=signal_type,
message="date of most recent generated file seems too long ago"))
report.increment_total_checks()
def check_max_allowed_max_date(self, max_date, geo_type, signal_type, report):
"""Check if time since data was generated is reasonable or too recent.
The most recent data should be at most min_expected_lag from generation date
Arguments:
- max_date: date of most recent data to be validated; datetime format.
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
max_thres = timedelta(days = self.params.min_expected_lag.get(
signal_type, self.params.min_expected_lag.get('all', 1)))
if max_date > self.params.generation_date - max_thres:
report.add_raised_error(
ValidationFailure("check_max_max_date",
geo_type=geo_type,
signal=signal_type,
message="date of most recent generated file seems too recent"))
report.increment_total_checks()
def create_dfs(self, geo_sig_df, api_df_or_error, checking_date, geo_type, signal_type, report):
"""Create recent_df and reference_api_df from params.
Raises error if recent_df is empty.
Arguments:
- geo_sig_df: Pandas dataframe of test data
- api_df_or_error: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- False if recent_df is empty, else (recent_df, reference_api_df)
(after reference_api_df has been padded if necessary)
"""
# recent_lookbehind: start from the check date and working backward in time,
# how many days at a time do we want to check for anomalies?
# Choosing 1 day checks just the daily data.
recent_lookbehind = timedelta(days=1)
recent_cutoff_date = checking_date - \
recent_lookbehind + timedelta(days=1)
recent_df = geo_sig_df.query(
'time_value <= @checking_date & time_value >= @recent_cutoff_date')
report.increment_total_checks()
if recent_df.empty:
min_thres = timedelta(days = self.params.max_expected_lag.get(
signal_type, self.params.max_expected_lag.get('all', 10)))
if checking_date < self.params.generation_date - min_thres:
report.add_raised_error(
ValidationFailure("check_missing_geo_sig_date_combo",
checking_date,
geo_type,
signal_type,
"test data for a given checking date-geo type-signal type"
" combination is missing. Source data may be missing"
" for one or more dates"))
return False
# Reference dataframe runs backwards from the recent_cutoff_date
#
# These variables are interpolated into the call to `api_df_or_error.query()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
reference_start_date = recent_cutoff_date - self.params.max_check_lookbehind
if signal_type in self.params.smoothed_signals:
# Add an extra 7 days to the reference period.
reference_start_date = reference_start_date - \
timedelta(days=7)
reference_end_date = recent_cutoff_date - timedelta(days=1)
# pylint: enable=unused-variable
# Subset API data to relevant range of dates.
reference_api_df = api_df_or_error.query(
"time_value >= @reference_start_date & time_value <= @reference_end_date")
report.increment_total_checks()
if reference_api_df.empty:
report.add_raised_error(
ValidationFailure("empty_reference_data",
checking_date,
geo_type,
signal_type,
"reference data is empty; comparative checks could not "
"be performed"))
return False
reference_api_df = self.pad_reference_api_df(
reference_api_df, geo_sig_df, reference_end_date)
return (geo_sig_df, reference_api_df)
def pad_reference_api_df(self, reference_api_df, geo_sig_df, reference_end_date):
"""Check if API data is missing, and supplement from test data.
Arguments:
- reference_api_df: API data within lookbehind range
- geo_sig_df: Test data
- reference_end_date: Supposed end date of reference data
Returns:
- reference_api_df: Supplemented version of original
"""
reference_api_df_max_date = reference_api_df.time_value.max()
if reference_api_df_max_date < reference_end_date:
# Querying geo_sig_df, only taking relevant rows
geo_sig_df_supplement = geo_sig_df.query(
'time_value <= @reference_end_date & time_value > \
@reference_api_df_max_date')[[
"geo_id", "val", "se", "sample_size", "time_value"]]
# Matching time_value format
geo_sig_df_supplement["time_value"] = \
pd.to_datetime(geo_sig_df_supplement["time_value"],
format = "%Y-%m-%d %H:%M:%S")
reference_api_df = pd.concat(
[reference_api_df, geo_sig_df_supplement])
return reference_api_df
def check_max_date_vs_reference(self, df_to_test, df_to_reference, checking_date,
geo_type, signal_type, report):
"""
Check if reference data is more recent than test data.
Arguments:
- df_to_test: pandas dataframe of a single CSV of source data
(one day-signal-geo_type combo)
- df_to_reference: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
if df_to_test["time_value"].max() < df_to_reference["time_value"].max():
report.add_raised_error(
ValidationFailure("check_max_date_vs_reference",
checking_date,
geo_type,
signal_type,
"reference df has days beyond the max date in the =df_to_test="))
report.increment_total_checks()
def check_rapid_change_num_rows(self, df_to_test, df_to_reference, checking_date,
geo_type, signal_type, report):
"""
Compare number of obervations per day in test dataframe vs reference dataframe.
Arguments:
- df_to_test: pandas dataframe of CSV source data
- df_to_reference: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- checking_date: datetime date
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
test_rows_per_reporting_day = df_to_test[df_to_test['time_value']
== checking_date].shape[0]
reference_rows_per_reporting_day = df_to_reference.shape[0] / len(
set(df_to_reference["time_value"]))
try:
compare_rows = relative_difference_by_min(
test_rows_per_reporting_day,
reference_rows_per_reporting_day)
except ZeroDivisionError as e:
print(checking_date, geo_type, signal_type)
raise e
if abs(compare_rows) > 0.35:
report.add_raised_error(
ValidationFailure("check_rapid_change_num_rows",
checking_date,
geo_type,
signal_type,
"Number of rows per day seems to have changed rapidly (reference "
"vs test data)"))
report.increment_total_checks()
def check_positive_negative_spikes(self, source_df, api_frames, geo, sig, report):
"""
Adapt Dan's corrections package to Python (only consider spikes).
See https://github.com/cmu-delphi/covidcast-forecast/tree/dev/corrections/data_corrections
Statistics for a right shifted rolling window and a centered rolling window are used
to determine outliers for both positive and negative spikes.
As it is now, ststat will always be NaN for source frames.
Arguments:
- source_df: pandas dataframe of CSV source data
- api_frames: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo: str; geo type name (county, msa, hrr, state) as in the CSV name
- sig: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
"""
report.increment_total_checks()
# Combine all possible frames so that the rolling window calculations make sense.
source_frame_start = source_df["time_value"].min()
# This variable is interpolated into the call to `add_raised_error()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
source_frame_end = source_df["time_value"].max()
# pylint: enable=unused-variable
all_frames = pd.concat([api_frames, source_df]). \
drop_duplicates(subset=["geo_id", "time_value"], keep='last'). \
sort_values(by=['time_value']).reset_index(drop=True)
# Tuned Variables from Dan's Code for flagging outliers. Size_cut is a
# check on the minimum value reported, sig_cut is a check
# on the ftstat or ststat reported (t-statistics) and sig_consec
# is a lower check for determining outliers that are next to each other.
size_cut, sig_cut, sig_consec = 5, 3, 2.25
# Functions mapped to rows to determine outliers based on fstat and ststat values
def outlier_flag(frame):
if (abs(frame["val"]) > size_cut) and not (pd.isna(frame["ststat"])) \
and (frame["ststat"] > sig_cut):
return True
if (abs(frame["val"]) > size_cut) and (pd.isna(frame["ststat"])) and \
not ( | pd.isna(frame["ftstat"]) | pandas.isna |
# Importar librerias
import pandas # importar libreria pandas
import time # importar libreria time
import datetime # importar libreria de fecha y hora
from datetime import datetime # importar la libreria de datetime
import os # importar la libreria de os
from termcolor import colored # importar la libreria termcolor
import sqlite3 # importar libreria sqlite3
os.system('CLS') # limpiar la terminal
# Seccion carga de datos desde CSV (base de datos)
"""-----------------------------------------------------------------------------------------------------------------------"""
conn = sqlite3.connect('./database.db') # conexion a la base de datos
matrixpandas = pandas.read_sql_query("SELECT * FROM productos", conn) # carga de datos desde la base de datos de stock
matriz = matrixpandas.values.tolist() # convertir la matriz a una lista
registros = pandas.read_sql_query("SELECT * FROM registros", conn) # carga de datos desde la base de datos de registros
registros = registros.values.tolist() # convertir la matriz a una lista
"""-----------------------------------------------------------------------------------------------------------------------"""
# Seccion de funciones
"""-----------------------------------------------------------------------------------------------------------------------"""
# funcion para imprimir la matriz de productos
def print_data(matriz):
os.system('CLS')
print_matriz = pandas.DataFrame(
matriz, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
print("Imprimiendo matriz de datos...") # mensaje de impresion
time.sleep(1) # esperar 1 segundo
print(print_matriz) # imprimir la matriz de stock
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
# funcion para imprimir la matriz de registros
def print_registros(registros):
print_registros = pandas.DataFrame(
registros, columns=["code", "variacion", "motivo", "timestamp"]) # generar la matriz en formato pandas
print("Imprimiendo matriz de datos...") # mensaje de impresion
time.sleep(1) # esperar 1 segundo
print(print_registros) # imprimir la matriz de registros
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
# funcion para consultar el stock de un producto
def product_stock(matriz):
os.system("CLS") # limpiar la terminal
founded = False # variable para saber si se encontro el producto
stock = (input("Ingrese el código del producto a consultar stock: ")).upper() # capturar el codigo del producto a buscar
os.system('CLS') # limpiar la terminal
for i in range(len(matriz)): # recorrer la matriz
if stock == matriz[i][0]: # si se encontró el codigo del producto en la matriz
print("El stock actual del producto ", stock, "es: ", matriz[i][3]) # imprimir el stock del producto
founded = True # cambiar la variable a True
input("Ingrese cualquier tecla cuando desee volver al menu principal: ") # volver al menu principal
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
if founded == False: # si no se encontró el codigo del producto en la matriz
print("No se encontro el codigo") # mensaje de error
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar ") # mensaje de volver a intentar
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menú principal") # mensaje de volver al menu principal
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
product_stock(matriz) # volver a intentar
elif choose == "2": # si la opcion es 2
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
# funcion para filtrar los productos por tipo
def product_type(matriz):
type_product = input(
"Ingrese la categoria de producto por el que desea filtrar: ") # capturar el tipo de producto para filtrar
a = len(matriz) # obtener la longitud de la matriz
lista = list() # crear una lista
for i in range(a): # recorrer la matriz
if (matriz[i][2]).upper() == (type_product).upper(): # si el tipo de producto es igual al tipo de producto capturado
lista.append(matriz[i]) # agregar el producto a la lista
if len(lista) != 0:
c = pandas.DataFrame(
lista, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
os.system('CLS') # limpiar la terminal
print(c) # imprimir la matriz de productos
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
else:
print("No se encontraron productos con ese tipo") # mensaje de error
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar ") # mensaje de volver a intentar
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menú principal") # mensaje de volver al menu principal
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
product_type(matriz) # volver a intentar
elif choose == "2": # si la opcion es 2
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
# funcion para obtener el tiempo actual
def get_current_time():
time_update = datetime.now() # obtener la fecha y hora actual
now = time_update.strftime("%d/%m/%Y %H:%M:%S") # formatear la fecha y hora actual
return now # retornar fecha
# funcion para alertar si hay que reponer un producto
def alert(matriz):
time.sleep(0.2) # esperar 0.2 segundos
os.system("CLS") # limpiar la terminal
to_repos = list() # crear una lista para los productos a reponer
codes_to_repos = list() # crear una lista para los codigos de los productos a reponer
for i in range(len(matriz)): # recorrer la matriz
if int(matriz[i][3]) <= int(matriz[i][4]): # si el stock es menor o igual al reposicion
to_repos.append(matriz[i]) # agregar el producto a la lista
codes_to_repos.append(matriz[i][0]) # agregar el codigo del producto a la lista
to_repos = pandas.DataFrame(to_repos, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
if len(codes_to_repos) > 0: # si hay productos a reponer
print("Los codigos a reponer son: ") # mensaje de los codigos a reponer
for i in codes_to_repos: # recorrer la lista de codigos a reponer
print(i, end=" ") # imprimir los codigos a reponer
print("")
print("-----------------------------")
print(" ")
print(to_repos) # imprimir la matriz de productos a reponer
print(" ")
a = input("Ingrese una tecla cuando desee volver al menu principal: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
else:
print("No hay ningun codigo a reponer por el momento.") # mensaje de error
os.system('CLS') # limpiar la terminal
# funcion para agregar un nuevo producto
def add_new_product(matriz):
new_product = list() # crear una lista para almacenar los datos del nuevo producto
code = input("Ingresa el codigo del producto que desea agregar: ") # capturar el codigo del producto
name = input("Ingresa el nombre del producto que va a agregar: ") # capturar el nombre del producto
type_product = input("Ingresa la categoria del producto: ") # capturar el tipo de producto
stock = int(input("Ingresa el stock inicial del producto, puede ser 0: ")) # capturar el stock inicial del producto
reposition = int(input("Punto de reposicion del producto: ")) # capturar el punto de reposicion del producto
price = input("Ingresa el precio del producto: ") # capturar el precio del producto
new_product.append(code.upper()) # agregar el codigo al nuevo producto
new_product.append(name) # agregar el nombre al nuevo producto
new_product.append(type_product) # agregar el tipo de producto al nuevo producto
new_product.append(stock) # agregar el stock al nuevo producto
new_product.append(reposition) # agregar el punto de reposicion al nuevo producto
new_product.append(price) # agregar el precio al nuevo producto
new_product.append(get_current_time()) # agregar la fecha y hora actual al nuevo producto
matriz.append(new_product) # agregar el nuevo producto a la matriz
print("El producto " + code.upper() + " fue agregado") # mensaje de confirmacion
time.sleep(2) # esperar 2 segundos
os.system('CLS') # limpiar la terminal
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = [code.upper(), "Se añadió un producto",
"Producto agregado", get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
# funcion para eliminar producto
def delete_product(matriz):
long = len(matriz) # obtener la longitud de la matriz
eliminated = False # variable para saber si se elimino un producto
code_eliminate = input(
"Ingresa el codigo del producto que quieres eliminar: ") # capturar el codigo del producto a eliminar
for i in range(long): # recorrer la matriz
try:
pos = matriz[i][0].index(code_eliminate) # obtener la posicion del codigo capturado
name1 = matriz[i][1] # obtener el nombre del producto
print("El producto ", name1, " fue encontrado, eliminando...") # mensaje de código encontrado
matriz.pop(i) # eliminar el producto de la matriz
time.sleep(1) # esperar 1 segundo
print("El producto fue eliminado") # mensaje de confirmacion
time.sleep(1.5) # esperar 1.5 segundos
os.system('CLS') # limpiar la terminal
eliminated = True # cambiar la variable a True
except:
continue
if eliminated == False: # si no se eliminó ningun producto
print("El codigo no es correcto") # mensaje de error
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = "Se borro el producto"
motivo = "Producto eliminado"
ajuste = [code_eliminate, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz de registros en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
# fución para modificar el stock de un producto
def modificate_stock(matriz, code_modified):
time.sleep(0.5) # esperar 0.5 segundos
long = len(matriz) # obtener la longitud de la matriz
os.system("CLS") # limpiar la terminal
os.system("CLS") # limpiar la terminal
code_founded = False # variable para saber si se encontro el codigo
for i in range(long): # recorrer la matriz
try:
pos = matriz[i][0].index(code_modified) # obtener la posicion del codigo capturado
pos_change = i # obtener la posicion del producto a modificar
code_founded = True # cambiar la variable a True
print(f"Se encontro el producto {matriz[pos_change][1]}...") # mensaje de confirmacion de encontrado
time.sleep(2) # esperar 2 segundos
os.system("CLS") # limpiar la terminal
except:
continue
print(colored("- 1.", "blue", attrs=["bold"]), "Aumentar stock") # mensaje de opcion 1
print(colored("- 2.", "blue", attrs=["bold"]), "Disminuir stock") # mensaje de opcion 2
print(colored("- 3.", "blue", attrs=["bold"]), # mensaje de opcion 3
"Ajuste por perdida de stock")
egressingress = (input("Ingrese una opción: ")).upper() # capturar la opcion del usuario
os.system("CLS") # limpiar la terminal
if egressingress == "1" and code_founded == True or egressingress == "AUMENTAR" and code_founded == True: # si la opcion es 1 y el codigo fue encontrado
actual_stock = int(matriz[pos_change][3]) # obtener el stock actual del producto
time.sleep(1) # esperar 1 segundo
print(f"El stock actual de {code_modified} es: ", actual_stock) # mensaje de stock actual
increase = int(
input(f"Cuanto stock desea agregar al stock de {code_modified}: ")) # capturar el stock a aumentar
suma = actual_stock + increase # sumar el stock actual mas el stock a aumentar
suma = str(suma) # convertir el stock a string
matriz[pos_change][3] = suma # cambiar el stock del producto
matriz[pos_change][6] = get_current_time() # cambiar la fecha y hora de modificacion del producto
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = "+" + str(increase)
motivo = "Ingreso de stock"
ajuste = [code_modified, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz de registros en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
time.sleep(2) # esperar 2 segundos
print(
f"El stock de {code_modified} ha sido modificado, ahora es: {matriz[pos_change][3]}") # mensaje de confirmacion de modificacion
time.sleep(2) # esperar 2 segundos
os.system("CLS") # limpiar la terminal
elif egressingress == "2" and code_founded == True or egressingress == "DISMINUIR" and code_founded == True: # si la opcion es 2 y el codigo fue encontrado
actual_stock = int(matriz[pos_change][3]) # obtener el stock actual del producto
print(
f"El stock actual de {code_modified} producto es: ", actual_stock) # mensaje de stock actual
time.sleep(1) # esperar 1 segundo
decrease = int(
input(f"Cuanto stock desea restar al stock de {code_modified}: ")) # capturar el stock a disminuir
resta = actual_stock - decrease # restar el stock actual menos el stock a disminuir
resta = str(resta) # convertir el stock a string
matriz[pos_change][3] = resta # cambiar el stock del producto
matriz[pos_change][6] = get_current_time() # cambiar la fecha de modificacion
print(
f"El stock de {code_modified} ha sido modificado, ahora es: {matriz[pos_change][3]}") # mensaje de confirmacion de modificacion
time.sleep(2) # esperar 2 segundos
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = "-" + str(decrease)
motivo = "Egreso de stock"
ajuste = [code_modified, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz de registros en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
time.sleep(2) # esperar 2 segundos
os.system("CLS") # limpiar la terminal
elif egressingress == "3" and code_founded == True: # si la opcion es 3 y el codigo fue encontrado
actual_stock = int(matriz[pos_change][3]) # obtener el stock actual del producto
print(
f"El stock actual de {code_modified} producto es: ", actual_stock) # mensaje de stock actual
time.sleep(1) # esperar 1 segundo
ajustar = int(input(f"Cuanto stock se extravio de {code_modified}: ")) # capturar el stock a ajustar
motivo = input("Motivo del ajuste: ") # capturar el motivo del ajuste
os.system("CLS") # limpiar la terminal
print("Vamos a modificar el stock restando lo que se perdio, y lo que tiene que volver a enviar al cliente. ¿Es usted conciente?") # mensaje de confirmacion
print(colored("- 1.", "blue", attrs=["bold"]), "Si") # opcion si
print(colored("- 2.", "blue", attrs=["bold"]), "No") # opcion no
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
mod = actual_stock - (ajustar+ajustar) # modificar el stock
mod = str(mod) # convertir el stock a string
ajuste = "-"+str(ajustar+ajustar) # crear string del ajuste
matriz[pos_change][3] = mod # cambiar el stock del producto
os.system("CLS") # limpiar la terminal
ajuste = [code_modified, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
print(
f"Ahora el stock de {code_modified} es: ", (matriz[pos_change][3])) # mensaje de confirmacion de modificacion
print(f"Ajuste de {code_modified} realizado con exito") # mensaje de confirmacion de ajuste
df = | pandas.DataFrame(registros) | pandas.DataFrame |
from ..pyhrp.tools.distancematrices import CorrDistance, PortfolioDistance, LTDCDistance
from math import fabs, sqrt, log
import pandas as pd
import numpy as np
def test_CorrDistance_get_distance_matrix():
d = {'A': [0.5, 1, 0], 'B': [0, -1, 0.5], 'C':[-0.5, 1, 0]}
df = pd.DataFrame(data=d)
corr = df.corr()
zoc = CorrDistance(corr)
zoc_matrix = zoc.get_distance_matrix()
assert fabs(zoc_matrix.iloc[0,0] - 0) < 1e-5
assert fabs(zoc_matrix.iloc[1,0] - 0.995485057) < 1e-5
assert fabs(zoc_matrix.iloc[2,0] - 0.415539408) < 1e-5
assert fabs(zoc_matrix.iloc[2,1] - 0.944911106) < 1e-5
def test_PortfolioDistance_get_distance_matrix():
d = {'A': [0.5, 1, 0], 'B': [0, -1, 0.5], 'C':[-0.5, 1, 0]}
df = pd.DataFrame(data=d)
corr = df.corr()
portd = PortfolioDistance(corr)
portd_matrix = portd.get_distance_matrix()
assert fabs(portd_matrix.iloc[0,0] - 0) < 1e-5
assert fabs(portd_matrix.iloc[0,1] - sqrt(((0 - 0.995485057)**2 + (0.995485057 - 0)**2 + (0.415539408 - 0.944911106)**2))) < 1e-5
def test_ZOCDistance_get_compressed_distance_matrix():
d = {'A': [0.5, 1, 0], 'B': [0, -1, 0.5], 'C':[-0.5, 1, 0]}
df = pd.DataFrame(data=d)
corr = df.corr()
zoc = CorrDistance(corr)
zoc_matrix = zoc.get_distance_matrix()
zoc_matrix_compressed = zoc.get_compressed_distance_matrix()
m = zoc_matrix.shape[0]
for i in range(0, m):
for j in range(i+1, m):
assert fabs(zoc_matrix.iloc[i,j] - zoc_matrix_compressed[int(m*i + j - (((i+2)*(i+1))/2))]) < 1e-5
def test_PORTDistance_get_compressed_distance_matrix():
d = {'A': [0.5, 1, 0], 'B': [0, -1, 0.5], 'C':[-0.5, 1, 0]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
'''
pyjade
A program to export, curate, and transform data from the MySQL database used by the Jane Addams Digital Edition.
'''
import os
import re
import sys
import json
import string
import datetime
import mysql.connector
from diskcache import Cache
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from tqdm import tqdm
from safeprint import print
'''
Options
'''
try: # Options file setup credit <NAME>
with open(os.path.join('options.json')) as env_file:
ENV = json.loads(env_file.read())
except:
print('"Options.json" not found; please add "options.json" to the current directory.')
'''
SQL Connection
'''
DB = mysql.connector.connect(
host=ENV['SQL']['HOST'],
user=ENV['SQL']['USER'],
passwd=ENV['SQL']['PASSWORD'],
database=ENV['SQL']['DATABASE']
)
CUR = DB.cursor(buffered=True)
'''
Setup
'''
BEGIN = datetime.datetime.now()
TS = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ITEM_ELEMENTS = ENV['ELEMENT_DICTIONARY']['DCTERMS_IN_USE']
ITEM_ELEMENTS.update(ENV['ELEMENT_DICTIONARY']['DESC_JADE_ELEMENTS'])
TYPES = ENV['ELEMENT_DICTIONARY']['TYPES']
OUT_DIR = 'outputs/'
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
DATASET_OPTIONS = ENV['DATASET_OPTIONS']
CRUMBS = DATASET_OPTIONS['EXPORT_SEPARATE_SQL_CRUMBS']
PROP_SET_LIST = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
INCLUDE_PROPS = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
class Dataset():
def __init__(self):
'''
Start building the dataset objects by pulling IDs and types from omek_items
'''
statement = '''
SELECT omek_items.id as item_id, omek_item_types.`name` as 'jade_type', collection_id as 'jade_collection' FROM omek_items
JOIN omek_item_types on omek_items.item_type_id = omek_item_types.id
WHERE public = 1
ORDER BY item_id;
'''
self.omek_items = | pd.read_sql(statement,DB) | pandas.read_sql |
import pandas as pd
from assistants.compliance.util import fields
from assistants.compliance.util.input_data_utility import completed_to_due_vector
dt_new_mogl = pd.Timestamp(2020, 9, 15) # September 2020 MOGL changes
def fix_some_dates(data: pd.DataFrame) -> pd.DataFrame:
"""Fix Some Dates.
Make sure if we have old M1 / M1EX then we also have Safety and Safeguarding
Make sure M1, M1EX, Trustee Intro, GDPR, First Aid, Safety & Safeguarding are applied as appropriate
We use pandas (vector) operations - slower but easier to understand!
"""
m1_m1ex_min = "min_mod_1"
data[m1_m1ex_min] = data[[fields.MOD_01, fields.MOD_01_EX]].min(axis=1)
g = data.groupby(fields.MEMBERSHIP_NUMBER)
# Given M01Ex validates M01, check that both are blank
m_01_and_m01_ex_unset = data[fields.MOD_01].isna() & data[fields.MOD_01_EX].isna()
# Where a given role has a blank for M01 or M01Ex and that member has a valid value for that
# training type, fill it in
# rolling_exclude is a crude attempt at an elif/switch chain in boolean logic
rolling_exclude = | pd.Series(False, index=data.index) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
from __future__ import print_function, absolute_import
import os
import re
import warnings
import oci
import datetime
import pandas as pd
from IPython.core.display import display
from fsspec.utils import infer_storage_options
import inspect
import fsspec
from ads.common import utils
from ads.common.utils import is_same_class
from ads.dataset import logger
from ads.dataset.classification_dataset import (
BinaryClassificationDataset,
MultiClassClassificationDataset,
BinaryTextClassificationDataset,
MultiClassTextClassificationDataset,
)
from ads.dataset.dataset import ADSDataset
from ads.dataset.forecasting_dataset import ForecastingDataset
from ads.dataset.helper import (
get_feature_type,
is_text_data,
generate_sample,
DatasetDefaults,
ElaboratedPath,
DatasetLoadException,
)
from ads.dataset.regression_dataset import RegressionDataset
from ads.type_discovery.type_discovery_driver import TypeDiscoveryDriver
from ads.type_discovery.typed_feature import (
ContinuousTypedFeature,
DateTimeTypedFeature,
CategoricalTypedFeature,
OrdinalTypedFeature,
GISTypedFeature,
DocumentTypedFeature,
)
from ads.type_discovery.typed_feature import TypedFeature
from typing import Callable, Tuple
from ocifs import OCIFileSystem
from ads.common.decorator.runtime_dependency import runtime_dependency
default_snapshots_dir = None
default_storage_options = None
mindate = datetime.date(datetime.MINYEAR, 1, 1)
class DatasetFactory:
@staticmethod
def open(
source,
target=None,
format="infer",
reader_fn: Callable = None,
name: str = None,
description="",
npartitions: int = None,
type_discovery=True,
html_table_index=None,
column_names="infer",
sample_max_rows=10000,
positive_class=None,
transformer_pipeline=None,
types={},
**kwargs,
):
"""
Returns an object of ADSDataset or ADSDatasetWithTarget read from the given path
Parameters
----------
source: Union[str, pandas.DataFrame, h2o.DataFrame, pyspark.sql.dataframe.DataFrame]
If str, URI for the dataset. The dataset could be read from local or network file system, hdfs, s3, gcs and optionally pyspark in pyspark
conda env
target: str, optional
Name of the target in dataset.
If set an ADSDatasetWithTarget object is returned, otherwise an ADSDataset object is returned which can be
used to understand the dataset through visualizations
format: str, default: infer
Format of the dataset.
Supported formats: CSV, TSV, Parquet, libsvm, JSON, XLS/XLSX (Excel), HDF5, SQL, XML,
Apache server log files (clf, log), ARFF.
By default, the format would be inferred from the ending of the dataset file path.
reader_fn: Callable, default: None
The user may pass in their own custom reader function.
It must accept `(path, **kwarg)` and return a pandas DataFrame
name: str, optional default: ""
description: str, optional default: ""
Text describing the dataset
npartitions: int, deprecated
Number of partitions to split the data
By default this is set to the max number of cores supported by the backend compute accelerator
type_discovery: bool, default: True
If false, the data types of the dataframe are used as such.
By default, the dataframe columns are associated with the best suited data types. Associating the features
with the disovered datatypes would impact visualizations and model prediction.
html_table_index: int, optional
The index of the dataframe table in html content. This is used when the format of dataset is html
column_names: 'infer', list of str or None, default: 'infer'
Supported only for CSV and TSV.
List of column names to use.
By default, column names are inferred from the first line of the file.
If set to None, column names would be auto-generated instead of inferring from file.
If the file already contains a column header, specify header=0 to ignore the existing column names.
sample_max_rows: int, default: 10000, use -1 auto calculate sample size, use 0 (zero) for no sampling
Sample size of the dataframe to use for visualization and optimization.
positive_class: Any, optional
Label in target for binary classification problems which should be identified as positive for modeling.
By default, the first unique value is considered as the positive label.
types: dict, optional
Dictionary of <feature_name> : <data_type> to override the data type of features.
transformer_pipeline: datasets.pipeline.TransformerPipeline, optional
A pipeline of transformations done outside the sdk and need to be applied at the time of scoring
storage_options: dict, default: varies by source type
Parameters passed on to the backend filesystem class.
sep: str
Delimiting character for parsing the input file.
kwargs: additional keyword arguments that would be passed to underlying dataframe read API
based on the format of the dataset
Returns
-------
dataset : An instance of ADSDataset
(or)
dataset_with_target : An instance of ADSDatasetWithTarget
Examples
--------
>>> ds = DatasetFactory.open("/path/to/data.data", format='csv', delimiter=" ",
... na_values="n/a", skipinitialspace=True)
>>> ds = DatasetFactory.open("/path/to/data.csv", target="col_1", prefix="col_",
... skiprows=1, encoding="ISO-8859-1")
>>> ds = DatasetFactory.open("oci://bucket@namespace/path/to/data.tsv",
... column_names=["col1", "col2", "col3"], header=0)
>>> ds = DatasetFactory.open("oci://bucket@namespace/path/to/data.csv",
... storage_options={"config": "~/.oci/config",
... "profile": "USER_2"}, delimiter = ';')
>>> ds = DatasetFactory.open("/path/to/data.parquet", engine='pyarrow',
... types={"col1": "ordinal",
... "col2": "categorical",
... "col3" : "continuous",
... "col4" : "float64"})
>>> ds = DatasetFactory.open(df, target="class", sample_max_rows=5000,
... positive_class="yes")
>>> ds = DatasetFactory.open("s3://path/to/data.json.gz", format="json",
... compression="gzip", orient="records")
"""
if npartitions:
warnings.warn(
"Variable `npartitions` is deprecated and will not be used",
DeprecationWarning,
stacklevel=2,
)
if (
"storage_options" not in kwargs
and type(source) is str
and len(source) > 6
and source[:6] == "oci://"
):
kwargs["storage_options"] = {"config": {}}
if isinstance(source, str) or isinstance(source, list):
progress = utils.get_progress_bar(4)
progress.update("Opening data")
path = ElaboratedPath(source, format=format, **kwargs)
reader_fn = (
get_format_reader(path=path, **kwargs)
if reader_fn is None
else reader_fn
)
df = load_dataset(path=path, reader_fn=reader_fn, **kwargs)
name = path.name
elif isinstance(source, pd.DataFrame):
progress = utils.get_progress_bar(4)
progress.update("Partitioning data")
df = source
name = "User Provided DataFrame" if name is None else name
else:
raise TypeError(
f"The Source type: {type(source)} is not supported for DatasetFactory."
)
shape = df.shape
return DatasetFactory._build_dataset(
df=df,
shape=shape,
target=target,
sample_max_rows=sample_max_rows,
type_discovery=type_discovery,
types=types,
positive_class=positive_class,
name=name,
transformer_pipeline=transformer_pipeline,
description=description,
progress=progress,
**utils.inject_and_copy_kwargs(
kwargs,
**{"html_table_index": html_table_index, "column_names": column_names},
),
)
@staticmethod
def open_to_pandas(
source: str, format: str = None, reader_fn: Callable = None, **kwargs
) -> pd.DataFrame:
path = ElaboratedPath(source, format=format, **kwargs)
reader_fn = (
get_format_reader(path=path, **kwargs) if reader_fn is None else reader_fn
)
df = load_dataset(path=path, reader_fn=reader_fn, **kwargs)
return df
@staticmethod
def from_dataframe(df, target: str = None, **kwargs):
"""
Returns an object of ADSDatasetWithTarget or ADSDataset given a pandas.DataFrame
Parameters
----------
df: pandas.DataFrame
target: str
kwargs: dict
See DatasetFactory.open() for supported kwargs
Returns
-------
dataset: an object of ADSDataset target is not specified, otherwise an object of ADSDatasetWithTarget tagged
according to the type of target
Examples
--------
>>> df = pd.DataFrame(data)
>>> ds = from_dataframe(df)
"""
return DatasetFactory.open(df, target=target, **kwargs)
@staticmethod
@runtime_dependency(
module="ipywidgets",
object="HTML",
is_for_notebook_only=True,
install_from="oracle-ads[notebook]",
)
def list_snapshots(snapshot_dir=None, name="", storage_options=None, **kwargs):
"""
Displays the URIs for dataset snapshots under the given directory path.
Parameters
----------
snapshot_dir: str
Return all dataset snapshots created using ADSDataset.snapshot() within this directory.
The path can contain protocols such as oci, s3.
name: str, optional
The list of snapshots in the directory gets filtered by the name. Accepts glob expressions.
default = `"ads_"`
storage_options: dict
Parameters passed on to the backend filesystem class.
Example
--------
>>> DatasetFactory.list_snapshots(snapshot_dir="oci://my_bucket/snapshots_dir",
... name="ads_iris_")
Returns a list of all snapshots (recursively) saved to obj storage bucket `"my_bucket"` with prefix
`"/snapshots_dir/ads_iris_**"` sorted by time created.
"""
if snapshot_dir is None:
snapshot_dir = default_snapshots_dir
if snapshot_dir is None:
raise ValueError(
"Specify snapshot_dir or use DatasetFactory.set_default_storage() to set default \
storage options"
)
else:
logger.info("Using default snapshots dir %s" % snapshot_dir)
if storage_options is None:
if default_storage_options is not None:
storage_options = default_storage_options
logger.info("Using default storage options")
else:
storage_options = dict()
assert isinstance(storage_options, dict), (
"The storage options parameter must be a dictionary. You can set "
"this gloabally by calling DatasetFactory.set_default_storage("
"storage_options={'config': 'location'}). "
)
url_options = infer_storage_options(snapshot_dir)
protocol = url_options.pop("protocol", None)
fs = OCIFileSystem(config=storage_options.get("config", None))
kwargs.update({"refresh": True})
obj_list = [
(k, v.get("timeCreated", mindate).strftime("%Y-%m-%d %H:%M:%S"))
for k, v in fs.glob(
os.path.join(snapshot_dir, name + "**"), detail=True, **kwargs
).items()
if v["type"] == "file"
]
files = []
for file, file_time in obj_list:
if protocol in ["oci"]:
r1 = re.compile(r"/part\.[0-9]{1,6}\.parquet$")
parquet_part = r1.search(file)
if parquet_part is not None:
parquet_filename = file[: parquet_part.start()]
elif file.endswith("/_common_metadata"):
parquet_filename = file[: -len("/_common_metadata")]
elif file.endswith("/_metadata"):
parquet_filename = file[: -len("/_metadata")]
else:
parquet_filename = file
else:
parquet_filename = file
parent_path = "%s://" % protocol
files.append((parent_path + parquet_filename, file_time))
files.sort(key=lambda x: x[1] or mindate, reverse=True)
list_df = pd.DataFrame(files, columns=["Name", "Created Time"])
list_df = list_df.drop_duplicates(subset=["Name"]).reset_index()
if len(list_df) == 0:
print(f"No snapshots found at: {os.path.join(snapshot_dir, name)}")
# display in HTML format if sdk is run in notebook mode
if utils.is_notebook():
display(
HTML(
list_df.style.set_table_attributes("class=table")
.hide_index()
.render()
)
)
return list_df
@staticmethod
def download(remote_path, local_path, storage=None, overwrite=False):
"""
Download a remote file or directory to local storage.
Parameters
---------
remote_path: str
Supports protocols like oci, s3, also supports glob expressions
local_path: str
Supports glob expressions
storage: dict
Parameters passed on to the backend remote filesystem class.
overwrite: bool, default False
If True, the method will overwrite any existing files in the local_path
Examples
---------
>>> DatasetFactory.download("oci://Bucket/prefix/to/data/*.csv",
... "/home/datascience/data/")
"""
if storage is None:
if default_storage_options is not None:
storage = default_storage_options
logger.info("Using default storage options")
else:
storage = dict()
remote_files = fsspec.open_files(
remote_path, mode="rb", name_function=lambda i: "", **storage
)
if len(remote_files) < 1:
raise FileNotFoundError(remote_path)
display_error, error_msg = DatasetFactory._download_files(
remote_files=remote_files, local_path=local_path, overwrite=overwrite
)
if display_error:
logger.error(error_msg)
else:
logger.info(f"Download {remote_path} to {local_path}.")
@staticmethod
def _download_files(remote_files, local_path, overwrite=False):
display_error, error_msg = False, ""
for remote_file in remote_files:
bucket_idx = remote_file.path.find("/")
suffix = remote_file.path[bucket_idx + 1 :]
try:
with remote_file as f1:
local_filepath = (
os.path.join(local_path, suffix) if suffix else local_path
)
if os.path.exists(local_filepath) and not overwrite:
raise FileExistsError(
f"Trying to overwrite files in {local_filepath}. If you'd like to "
f"overwrite these files, set force_overwrite to True."
)
os.makedirs(os.path.dirname(local_filepath), exist_ok=True)
with open(local_filepath, "wb") as f2:
f2.write(f1.read())
except oci.exceptions.ServiceError as e:
raise FileNotFoundError(f"Unable to open file: {remote_file.path}")
return display_error, error_msg
@staticmethod
def upload(local_file_or_dir, remote_file_or_dir, storage_options=None):
"""
Upload local file or directory to remote storage
Parameters
---------
local_file_or_dir: str
Supports glob expressions
remote_file_or_dir: str
Supports protocols like oci, s3, also supports glob expressions
storage_options: dict
Parameters passed on to the backend remote filesystem class.
"""
if not os.path.exists(local_file_or_dir):
raise ValueError("File/Directory does not exist: %s" % local_file_or_dir)
if storage_options is None and default_storage_options is not None:
storage_options = default_storage_options
logger.info("Using default storage options")
if os.path.isdir(local_file_or_dir):
for subdir, dirs, files in os.walk(local_file_or_dir):
for file in files:
if os.path.abspath(subdir) == os.path.abspath(local_file_or_dir):
path = file
else:
path = os.path.join(
os.path.abspath(subdir).split("/", 2)[2], file
)
DatasetFactory._upload_file(
os.path.join(subdir, file),
os.path.join(remote_file_or_dir, path),
storage_options=storage_options,
)
else:
DatasetFactory._upload_file(
local_file_or_dir, remote_file_or_dir, storage_options=storage_options
)
@staticmethod
def set_default_storage(snapshots_dir=None, storage_options=None):
"""
Set default storage directory and options.
Both snapshots_dir and storage_options can be overridden at the API scope.
Parameters
----------
snapshots_dir: str
Path for the snapshots directory. Can contain protocols such as oci, s3
storage_options: dict, optional
Parameters passed on to the backend filesystem class.
"""
global default_snapshots_dir
default_snapshots_dir = snapshots_dir
global default_storage_options
if storage_options is not None:
assert isinstance(storage_options, dict), (
f"The storage options parameter must be a dictionary. Instead "
f"we got the type: {type(storage_options)} "
)
default_storage_options = storage_options
@classmethod
def _upload_file(cls, local_file, remote_file, storage_options=None):
kwargs = {}
if storage_options is not None:
kwargs = {"storage_options": storage_options}
remote_file_handler = fsspec.open_files(
remote_file + "*", mode="wb", name_function=lambda i: "", **kwargs
)[0]
with remote_file_handler as f1:
with open(local_file, "rb") as f2:
for line in f2:
f1.write(line)
print("Uploaded %s to %s" % (local_file, remote_file))
@classmethod
def _build_dataset(
cls,
df: pd.DataFrame,
shape: Tuple[int, int],
target: str = None,
progress=None,
**kwargs,
):
n = shape[0]
if progress:
progress.update("Generating data sample")
sampled_df = generate_sample(
df,
n,
DatasetDefaults.sampling_confidence_level,
DatasetDefaults.sampling_confidence_interval,
**kwargs,
)
if target is None:
if progress:
progress.update("Building the dataset with no target.")
result = ADSDataset(df=df, sampled_df=sampled_df, shape=shape, **kwargs)
if progress:
progress.update("Done")
logger.info(
"Use `set_target()` to type the dataset for a particular learning task."
)
return result
if progress:
progress.update("Building dataset")
discover_target_type = kwargs["type_discovery"]
if target in kwargs["types"]:
sampled_df[target] = sampled_df[target].astype(kwargs["types"][target])
discover_target_type = False
# if type discovery is turned off, infer type from pandas dtype
target_type = DatasetFactory.infer_target_type(
target, sampled_df[target], discover_target_type
)
result = DatasetFactory._get_dataset(
df=df,
sampled_df=sampled_df,
target=target,
target_type=target_type,
shape=shape,
**kwargs,
)
if progress:
progress.update("Done")
logger.info(
"Use `suggest_recommendations()` to view and apply recommendations for dataset optimization."
)
return result
@classmethod
def infer_target_type(cls, target, target_series, discover_target_type=True):
# if type discovery is turned off, infer type from pandas dtype
if discover_target_type:
target_type = TypeDiscoveryDriver().discover(
target, target_series, is_target=True
)
else:
target_type = get_feature_type(target, target_series)
return target_type
@classmethod
def _get_dataset(
cls,
df: pd.DataFrame,
sampled_df: pd.DataFrame,
target: str,
target_type: TypedFeature,
shape: Tuple[int, int],
positive_class=None,
**init_kwargs,
):
if len(df[target].dropna()) == 0:
logger.warning(
"It is not recommended to use an empty column as the target variable."
)
raise ValueError(
f"We do not support using empty columns as the chosen target"
)
if is_same_class(target_type, ContinuousTypedFeature):
return RegressionDataset(
df=df,
sampled_df=sampled_df,
target=target,
target_type=target_type,
shape=shape,
**init_kwargs,
)
elif is_same_class(
target_type, DateTimeTypedFeature
) or df.index.dtype.name.startswith("datetime"):
return ForecastingDataset(
df=df,
sampled_df=sampled_df,
target=target,
target_type=target_type,
shape=shape,
**init_kwargs,
)
# Adding ordinal typed feature, but ultimately we should rethink how we want to model this type
elif is_same_class(target_type, CategoricalTypedFeature) or is_same_class(
target_type, OrdinalTypedFeature
):
if target_type.meta_data["internal"]["unique"] == 2:
if is_text_data(sampled_df, target):
return BinaryTextClassificationDataset(
df=df,
sampled_df=sampled_df,
target=target,
shape=shape,
target_type=target_type,
positive_class=positive_class,
**init_kwargs,
)
return BinaryClassificationDataset(
df=df,
sampled_df=sampled_df,
target=target,
shape=shape,
target_type=target_type,
positive_class=positive_class,
**init_kwargs,
)
else:
if is_text_data(sampled_df, target):
return MultiClassTextClassificationDataset(
df=df,
sampled_df=sampled_df,
target=target,
target_type=target_type,
shape=shape,
**init_kwargs,
)
return MultiClassClassificationDataset(
df=df,
sampled_df=sampled_df,
target=target,
target_type=target_type,
shape=shape,
**init_kwargs,
)
elif (
is_same_class(target, DocumentTypedFeature)
or "text" in target_type["type"]
or "text" in target
):
raise ValueError(
f"The column {target} cannot be used as the target column."
)
elif (
is_same_class(target_type, GISTypedFeature)
or "coord" in target_type["type"]
or "coord" in target
):
raise ValueError(
f"The column {target} cannot be used as the target column."
)
# This is to catch constant columns that are boolean. Added as a fix for pd.isnull(), and datasets with a
# binary target, but only data on one instance
elif target_type["low_level_type"] == "bool":
return BinaryClassificationDataset(
df=df,
sampled_df=sampled_df,
target=target,
shape=shape,
target_type=target_type,
positive_class=positive_class,
**init_kwargs,
)
raise ValueError(
f"Unable to identify problem type. Specify the data type of {target} using 'types'. "
f"For example, types = {{{target}: 'category'}}"
)
class CustomFormatReaders:
@staticmethod
def read_tsv(path: str, **kwargs) -> pd.DataFrame:
return pd.read_csv(
path, **utils.inject_and_copy_kwargs(kwargs, **{"sep": "\t"})
)
@staticmethod
def read_json(path: str, **kwargs) -> pd.DataFrame:
try:
return pd.read_json(path, **kwargs)
except ValueError as e:
return pd.read_json(
path, **utils.inject_and_copy_kwargs(kwargs, **{"lines": True})
)
@staticmethod
def read_libsvm(path: str, **kwargs) -> pd.DataFrame:
from sklearn.datasets import load_svmlight_file
from joblib import Memory
mem = Memory("./mycache")
@mem.cache
def get_data(path):
X, y = load_svmlight_file(path)
df = pd.DataFrame(X.todense())
df["target"] = y
return df
return get_data(path)
@staticmethod
@runtime_dependency(
module="pandavro", object="read_avro", install_from="oracle-ads[data]"
)
def read_avro(path: str, **kwargs) -> pd.DataFrame:
return read_avro(path, **kwargs)
DEFAULT_SQL_CHUNKSIZE = 12007
DEFAULT_SQL_ARRAYSIZE = 50000
DEFAULT_SQL_MIL = 128
DEFAULT_SQL_CTU = False
@classmethod
def read_sql(cls, path: str, table: str = None, **kwargs) -> pd.DataFrame:
"""
:param path: str
This is the connection URL that gets passed to sqlalchemy's create_engine method
:param table: str
This is either the name of a table to select * from or a sql query to be run
:param kwargs:
:return: pd.DataFrame
"""
if table is None:
raise ValueError(
"In order to read from a database you need to specify the table using the `table` "
"argument."
)
# check if it's oracle dialect
if str(path).lower().startswith("oracle"):
kwargs = utils.inject_and_copy_kwargs(
kwargs,
**{
"arraysize": cls.DEFAULT_SQL_ARRAYSIZE,
"max_identifier_length": cls.DEFAULT_SQL_MIL,
"coerce_to_unicode": cls.DEFAULT_SQL_CTU,
},
)
engine = utils.get_sqlalchemy_engine(path, **kwargs)
table_name = table.strip()
with engine.connect() as connection:
# if it's a query expression:
if table_name.lower().startswith("select"):
sql_query = table_name
else:
sql_query = f"select * from {table_name}"
chunks = pd.read_sql_query(
sql_query,
con=connection,
**_validate_kwargs(
pd.read_sql_query,
utils.inject_and_copy_kwargs(
kwargs, **{"chunksize": cls.DEFAULT_SQL_CHUNKSIZE}
),
),
)
df = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
x4s = pd.to_numeric(df['4s'])
runs = pd.to_numeric(df['Runs'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation='vertical')
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost(tendulkarsp,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
# retrieve the file path of a data file installed with cricketr
batsmanCumulativeAverageRuns(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = | pd.DataFrame(df1) | pandas.DataFrame |
# importação de bibliotecas
import pandas as pd
# carrega um arquivo do HD para a memoria
data = pd.read_csv('data/kc_house_data.csv')
# mostrar na tela as primeiras 6 linhas
# print(data.head())
# fução que converte de object (string) -> date
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
import sqlite3
import pandas as pd
import pandas.io.sql as psql
import ast
import hashlib
import sys
import random
from sqlalchemy import create_engine
import sqlalchemy.types as dtype
import requests
# message: |channel|user|text| <= ts
# coin : val <= username
slack_columns = ["ts", "text", "user", "channel"]
slack_types = dict(zip(slack_columns, [
lambda x: int(x.replace(".", "")), str, str, str
]))
slack_columns_types = dict(zip(slack_columns, [
dtype.INT(), dtype.TEXT(), dtype.NVARCHAR(length=9), dtype.NVARCHAR(length=9)
]))
slack_db_name = "all.db"
gacha_required_jewel = 100
def parse_dict(dic, columns=slack_columns, types=slack_types):
res = []
for column in columns:
if column not in dic:
return None
res.append(types[column](dic[column]))
return res
def log2dataframe(filename):
def is_prettyprint(l):
start = l.startswith(" ") or l.startswith("{")
end = l.endswith(",\n") or l.endswith("'\n") or l.endswith("\"\n")
return start and end
datas = []
with open(filename) as f:
chunkline = ""
for line in f.readlines():
if is_prettyprint(line):
chunkline += line
continue
if chunkline:
if line.startswith("{"):
chunkline = ""
line = chunkline + line
chunkline = ""
else:
if not line.startswith("{"):
continue
if not line.endswith("}\n") and not line.endswith("}"):
continue
try:
data = parse_dict(ast.literal_eval(line), slack_columns)
if data:
datas.append(data)
except Exception as e:
print("############ERROR#############")
print(e)
print(line)
return datas
def add_dataframe(conn, df, tablename="message"):
if conn.has_table(tablename):
# そのtsがすでにあるかチェック
ts = str(df["ts"][0])
a = conn.execute("select count(*) from %s where ts=?" % tablename, ts)
if a.fetchone()[0]:
return
psql.to_sql(df, "message", conn, index=False,
if_exists="append", dtype=slack_columns_types)
def log_slack(info):
try:
conn = create_engine("sqlite:///" + slack_db_name)
data = parse_dict(info, slack_columns, slack_types)
if not data:
return
data = [data]
df = | pd.DataFrame(data, columns=slack_columns, index=None) | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.utils import resample
from sklearn.metrics import accuracy_score, log_loss,roc_auc_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.ensemble import RandomForestClassifier
import pickle
from training import dataProcessAndTraining
model = dataProcessAndTraining(dataFile="export_dataframeTrainingRandom260419.csv",round_or_True="TrueCount")
data = model.dataProcess()
normalizerTrueCount = data["normalizer"]
model = dataProcessAndTraining(dataFile="export_dataframeTrainingRandom260419.csv",round_or_True="RoundCount")
data = model.dataProcess()
normalizerRoundCount = data["normalizer"]
thorpDF = | pd.read_csv('thorp.csv') | pandas.read_csv |
import pandas as pd
import os
def prepare_legends(mean_models, models, interpretability_name):
bars = []
y_pos = []
index_bars = 0
for nb, i in enumerate(mean_models):
if nb % len(models) == int(len(models)/2):
bars.append(interpretability_name[index_bars])
index_bars += 1
else:
bars.append('')
if nb < len(mean_models)/len(interpretability_name):
y_pos.append(nb)
elif nb < 2*len(mean_models)/len(interpretability_name):
y_pos.append(nb+1)
elif nb < 3*len(mean_models)/len(interpretability_name):
y_pos.append(nb+2)
elif nb < 4*len(mean_models)/len(interpretability_name):
y_pos.append(nb+3)
elif nb < 5*len(mean_models)/len(interpretability_name):
y_pos.append(nb+4)
else:
y_pos.append(nb+5)
colors = ['black', 'red', 'green', 'blue', 'yellow', 'grey', 'purple', 'cyan', 'gold', 'brown']
color= []
for nb, model in enumerate(models):
color.append(colors[nb])
return color, bars, y_pos
class store_experimental_informations(object):
"""
Class to store the experimental results of precision, coverage and F1 score for graph representation
"""
def __init__(self, len_models, len_interpretability_name, columns_name_file1, nb_models, columns_name_file2=None, columns_name_file3=None, columns_multimodal=None):
"""
Initialize all the variable that will be used to store experimental results
Args: len_models: Number of black box models that we are explaining during experiments
len_interpretability_name: Number of explanation methods used to explain each model
interpretability_name: List of the name of the explanation methods used to explain each model
"""
columns_name_file2 = columns_name_file1 if columns_name_file2 is None else columns_name_file2
columns_name_file3 = columns_name_file1 if columns_name_file3 is None else columns_name_file3
columns_name_file4 = columns_name_file1
self.multimodal_columns = ["LS", "LSe log", "LSe lin", "Anchors", 'APE SI', 'APE CF', 'APE FOLD', 'APE FULL', 'APE FULL pvalue', "DT", "Multimodal",
"radius", "fr pvalue", "cf pvalue", "separability", "fr fold",
"cf fold", "SI bon", "CF bon", "fold bon", "ape bon", "ape pvalue bon", "bb"] if columns_multimodal == None else columns_multimodal
self.columns_name_file1 = columns_name_file1
self.columns_name_file2 = columns_name_file2
self.columns_name_file3 = columns_name_file3
self.columns_name_file4 = columns_name_file4
self.len_interpretability_name, self.len_models, = len_interpretability_name, len_models
self.nb_models = nb_models - 1
self.pd_all_models_results1 = pd.DataFrame(columns=columns_name_file1)
self.pd_all_models_results2 = pd.DataFrame(columns=columns_name_file2)
self.pd_all_models_results3 = pd.DataFrame(columns=columns_name_file3)
self.pd_all_models_results4 = pd.DataFrame(columns=columns_name_file4)
self.pd_all_models_multimodal = pd.DataFrame(columns=self.multimodal_columns)
def initialize_per_models(self, filename):
self.filename = filename
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
self.pd_results1 = pd.DataFrame(columns=self.columns_name_file1)
self.pd_results2 = pd.DataFrame(columns=self.columns_name_file2)
self.pd_results3 = | pd.DataFrame(columns=self.columns_name_file3) | pandas.DataFrame |
"""
Data: Temperature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = | pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_hydrofunctions
----------------------------------
Tests for `hydrofunctions` module.
"""
from __future__ import (
absolute_import,
print_function,
division,
unicode_literals,
)
from unittest import mock
import unittest
import warnings
from pandas.testing import assert_frame_equal
import pandas as pd
import numpy as np
import pyarrow as pa
import json
import hydrofunctions as hf
from .fixtures import (
fakeResponse,
daily_dupe,
daily_dupe_altered,
tzfail,
JSON15min2day,
two_sites_two_params_iv,
nothing_avail,
mult_flags,
diff_freq,
startDST,
endDST,
recent_only,
)
class TestHydrofunctionsParsing(unittest.TestCase):
"""Test the parsing of hf.extract_nwis_df()
test the following:
Can it handle multiple qualifier flags?
how does it encode mult params & mult sites?
Does it raise HydroNoDataError if nothing returned?
"""
def test_hf_extract_nwis_df_accepts_response_obj(self):
fake_response = fakeResponse()
actual_df, actual_dict = hf.extract_nwis_df(fake_response, interpolate=False)
self.assertIsInstance(
actual_df, pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertIsInstance(actual_dict, dict, msg="Did not return a dict.")
def test_hf_extract_nwis_df_parse_multiple_flags(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
self.assertIsInstance(
actual_df, pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertIsInstance(actual_dict, dict, msg="Did not return a dict.")
def test_hf_extract_nwis_df_parse_two_sites_two_params_iv_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(
two_sites_two_params_iv, interpolate=False
)
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertIs(type(actual_dict), dict, msg="Did not return a dict.")
# TODO: test that data is organized correctly
def test_hf_extract_nwis_df_parse_two_sites_two_params_iv_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(
two_sites_two_params_iv, interpolate=False
)
actual_len, actual_width = actual_df.shape
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertEqual(actual_len, 93, "Wrong length for dataframe")
self.assertEqual(actual_width, 8, "Wrong width for dataframe")
expected_columns = [
"USGS:01541000:00060:00000",
"USGS:01541000:00060:00000_qualifiers",
"USGS:01541000:00065:00000",
"USGS:01541000:00065:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541200:00065:00000_qualifiers",
]
actual_columns = actual_df.columns.values
self.assertCountEqual(
actual_columns, expected_columns, "column names don't match expected"
)
self.assertTrue(actual_df.index.is_unique, "index has repeated values.")
self.assertTrue(actual_df.index.is_monotonic, "index is not monotonic.")
def test_hf_extract_nwis_df_parse_JSON15min2day_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(JSON15min2day, interpolate=False)
actual_len, actual_width = actual_df.shape
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertEqual(actual_len, 192, "Wrong length for dataframe")
self.assertEqual(actual_width, 2, "Wrong width for dataframe")
expected_columns = [
"USGS:03213700:00060:00000",
"USGS:03213700:00060:00000_qualifiers",
]
actual_columns = actual_df.columns.values
self.assertCountEqual(
actual_columns, expected_columns, "column names don't match expected"
)
self.assertTrue(actual_df.index.is_unique, "index has repeated values.")
self.assertTrue(actual_df.index.is_monotonic, "index is not monotonic.")
def test_hf_extract_nwis_df_parse_mult_flags_return_df(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_len, actual_width = actual_df.shape
self.assertIs(
type(actual_df), pd.core.frame.DataFrame, msg="Did not return a df"
)
self.assertEqual(actual_len, 480, "Wrong length for dataframe")
self.assertEqual(actual_width, 2, "Wrong width for dataframe")
expected_columns = [
"USGS:01542500:00060:00000",
"USGS:01542500:00060:00000_qualifiers",
]
actual_columns = actual_df.columns.values
self.assertCountEqual(
actual_columns, expected_columns, "column names don't match expected"
)
self.assertTrue(actual_df.index.is_unique, "index has repeated values.")
self.assertTrue(actual_df.index.is_monotonic, "index is not monotonic.")
def test_hf_extract_nwis_raises_exception_when_df_is_empty(self):
empty_response = {"value": {"timeSeries": []}}
with self.assertRaises(hf.HydroNoDataError):
hf.extract_nwis_df(empty_response, interpolate=False)
def test_hf_extract_nwis_raises_exception_when_df_is_empty_nothing_avail(self):
with self.assertRaises(hf.HydroNoDataError):
hf.extract_nwis_df(nothing_avail, interpolate=False)
@unittest.skip(
"assertWarns errors on Linux. See https://bugs.python.org/issue29620"
)
def test_hf_extract_nwis_warns_when_diff_series_have_diff_freq(self):
with self.assertWarns(hf.HydroUserWarning):
hf.extract_nwis_df(diff_freq, interpolate=False)
def test_hf_extract_nwis_accepts_no_startdate_no_period_no_interpolate(self):
actual_df, actual_dict = hf.extract_nwis_df(recent_only, interpolate=False)
expected_shape = (
2,
4,
) # only the most recent data for two parameters, plus qualifiers = 4 columns; 2 rows: different dates.
self.assertEqual(
actual_df.shape,
expected_shape,
"The dataframe should have four columns and two rows.",
)
def test_hf_extract_nwis_accepts_no_startdate_no_period_interpolate(self):
actual_df, actual_dict = hf.extract_nwis_df(recent_only, interpolate=True)
expected_shape = (
2,
4,
) # only the most recent data for two parameters, plus qualifiers = 4 columns; 2 rows: different dates.
self.assertEqual(
actual_df.shape,
expected_shape,
"The dataframe should have four columns and two rows.",
)
def test_hf_extract_nwis_returns_comma_separated_qualifiers_1(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_flags_1 = actual_df.loc[
"2019-01-24T10:30:00.000-05:00", "USGS:01542500:00060:00000_qualifiers"
]
expected_flags_1 = "P,e"
self.assertEqual(
actual_flags_1,
expected_flags_1,
"The data qualifier flags were not parsed correctly.",
)
def test_hf_extract_nwis_returns_comma_separated_qualifiers_2(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_flags_2 = actual_df.loc[
"2019-01-28T16:00:00.000-05:00", "USGS:01542500:00060:00000_qualifiers"
]
expected_flags_2 = "P,Ice"
self.assertEqual(
actual_flags_2,
expected_flags_2,
"The data qualifier flags were not parsed correctly.",
)
def test_hf_extract_nwis_replaces_NWIS_noDataValue_with_npNan(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_nodata = actual_df.loc[
"2019-01-28T16:00:00.000-05:00", "USGS:01542500:00060:00000"
]
self.assertTrue(
np.isnan(actual_nodata),
"The NWIS no data value was not replaced with np.nan. ",
)
def test_hf_extract_nwis_adds_missing_tags(self):
actual_df, actual_dict = hf.extract_nwis_df(mult_flags, interpolate=False)
actual_missing = actual_df.loc[
"2019-01-24 17:00:00-05:00", "USGS:01542500:00060:00000_qualifiers"
]
self.assertEqual(
actual_missing,
"hf.missing",
"Missing records should be given 'hf.missing' _qualifier tags.",
)
def test_hf_extract_nwis_adds_upsample_tags(self):
actual_df, actual_dict = hf.extract_nwis_df(diff_freq, interpolate=False)
actual_upsample = actual_df.loc[
"2018-06-01 00:15:00-04:00", "USGS:01570500:00060:00000_qualifiers"
]
self.assertEqual(
actual_upsample,
"hf.upsampled",
"New records created by upsampling should be given 'hf.upsample' _qualifier tags.",
)
def test_hf_extract_nwis_interpolates(self):
actual_df, actual_dict = hf.extract_nwis_df(diff_freq, interpolate=True)
actual_upsample_interpolate = actual_df.loc[
"2018-06-01 00:15:00-04:00", "USGS:01570500:00060:00000"
]
self.assertEqual(
actual_upsample_interpolate,
42200.0,
"New records created by upsampling should have NaNs replaced with interpolated values.",
)
@unittest.skip("This feature is not implemented yet.")
def test_hf_extract_nwis_interpolates_and_adds_tags(self):
# Ideally, every data value that was interpolated should have a tag
# added to the qualifiers that says it was interpolated.
actual_df, actual_dict = hf.extract_nwis_df(diff_freq, interpolate=True)
actual_upsample_interpolate_flag = actual_df.loc[
"2018-06-01 00:15:00-04:00", "USGS:01570500:00060:00000_qualifiers"
]
expected_flag = "hf.interpolated"
self.assertEqual(
actual_upsample_interpolate_flag,
expected_flag,
"Interpolated values should be marked with a flag.",
)
def test_hf_extract_nwis_corrects_for_start_of_DST(self):
actual_df, actual_dict = hf.extract_nwis_df(startDST, interpolate=False)
actual_len, width = actual_df.shape
expected = 284
self.assertEqual(
actual_len,
expected,
"Three days including the start of DST should have 3 * 24 * 4 = 288 observations, minus 4 = 284",
)
def test_hf_extract_nwis_corrects_for_end_of_DST(self):
actual_df, actual_dict = hf.extract_nwis_df(endDST, interpolate=False)
actual_len, width = actual_df.shape
expected = 292
self.assertEqual(
actual_len,
expected,
"Three days including the end of DST should have 3 * 24 * 4 = 288 observations, plus 4 = 292",
)
def test_hf_extract_nwis_can_find_tz_in_tzfail(self):
actualDF = hf.extract_nwis_df(tzfail, interpolate=False)
def test_hf_extract_nwis_can_deal_with_duplicated_records_as_input(self):
actualDF = hf.extract_nwis_df(daily_dupe, interpolate=False)
def test_hf_extract_nwis_can_deal_with_duplicated_records_that_have_been_altered_as_input(
self,
):
# What happens if a scientist replaces an empty record with new
# estimated data, and forgets to discard the old data?
actualDF = hf.extract_nwis_df(daily_dupe_altered, interpolate=False)
def test_hf_get_nwis_property(self):
sites = None
bBox = (-105.430, 39.655, -104, 39.863)
# TODO: test should be the json for a multiple site request.
names = hf.get_nwis_property(JSON15min2day, key="name")
self.assertIs(type(names), list, msg="Did not return a list")
class TestHydrofunctions(unittest.TestCase):
@mock.patch("requests.get")
def test_hf_get_nwis_calls_correct_url(self, mock_get):
"""
Thanks to
http://engineroom.trackmaven.com/blog/making-a-mockery-of-python/
"""
site = "A"
service = "iv"
start = "C"
end = "D"
expected_url = "https://waterservices.usgs.gov/nwis/" + service + "/?"
expected_headers = {"Accept-encoding": "gzip", "max-age": "120"}
expected_params = {
"format": "json,1.1",
"sites": "A",
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": "C",
"endDT": "D",
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site, service, start, end)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
@mock.patch("requests.get")
def test_hf_get_nwis_calls_correct_url_multiple_sites(self, mock_get):
site = ["site1", "site2"]
parsed_site = hf.check_parameter_string(site, "site")
service = "iv"
start = "C"
end = "D"
expected_url = "https://waterservices.usgs.gov/nwis/" + service + "/?"
expected_headers = {"max-age": "120", "Accept-encoding": "gzip"}
expected_params = {
"format": "json,1.1",
"sites": parsed_site,
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": "C",
"endDT": "D",
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site, service, start, end)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
@mock.patch("requests.get")
def test_hf_get_nwis_service_defaults_dv(self, mock_get):
site = "01541200"
expected_service = "dv"
expected_url = "https://waterservices.usgs.gov/nwis/" + expected_service + "/?"
expected_headers = {"max-age": "120", "Accept-encoding": "gzip"}
expected_params = {
"format": "json,1.1",
"sites": site,
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": None,
"endDT": None,
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
@mock.patch("requests.get")
def test_hf_get_nwis_converts_parameterCd_all_to_None(self, mock_get):
site = "01541200"
service = "iv"
parameterCd = "all"
expected_parameterCd = None
expected_url = "https://waterservices.usgs.gov/nwis/" + service + "/?"
expected_headers = {"max-age": "120", "Accept-encoding": "gzip"}
expected_params = {
"format": "json,1.1",
"sites": site,
"stateCd": None,
"countyCd": None,
"bBox": None,
"parameterCd": None,
"period": None,
"startDT": None,
"endDT": None,
}
expected = fakeResponse()
expected.status_code = 200
expected.reason = "any text"
mock_get.return_value = expected
actual = hf.get_nwis(site, service, parameterCd=parameterCd)
mock_get.assert_called_once_with(
expected_url, params=expected_params, headers=expected_headers
)
self.assertEqual(actual, expected)
def test_hf_get_nwis_raises_ValueError_too_many_locations(self):
with self.assertRaises(ValueError):
hf.get_nwis("01541000", stateCd="MD")
def test_hf_get_nwis_raises_ValueError_start_and_period(self):
with self.assertRaises(ValueError):
hf.get_nwis("01541000", start_date="2014-01-01", period="P1D")
def test_hf_nwis_custom_status_codes_returns_None_for_200(self):
fake = fakeResponse()
fake.status_code = 200
fake.reason = "any text"
fake.url = "any text"
self.assertIsNone(hf.nwis_custom_status_codes(fake))
@unittest.skip(
"assertWarns errors on Linux. See https://bugs.python.org/issue29620"
)
def test_hf_nwis_custom_status_codes_raises_warning_for_non200(self):
expected_status_code = 400
bad_response = fakeResponse(code=expected_status_code)
with self.assertWarns(SyntaxWarning) as cm:
hf.nwis_custom_status_codes(bad_response)
def test_hf_nwis_custom_status_codes_returns_status_for_non200(self):
expected_status_code = 400
bad_response = fakeResponse(code=expected_status_code)
actual = hf.nwis_custom_status_codes(bad_response)
self.assertEqual(actual, expected_status_code)
def test_hf_calc_freq_returns_Timedelta_and_60min(self):
test_index = pd.date_range("2014-12-29", "2015-01-03", freq="60T")
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = pd.Timedelta("60 minutes")
self.assertEqual(
actual, expected, "Calc_freq() should have converted 60T to 60 minutes."
)
def test_hf_calc_freq_accepts_Day(self):
test_index = pd.date_range("2014-12-29", periods=3)
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = pd.Timedelta("1 day")
self.assertEqual(
actual, expected, "Calc_freq() should have found a 1 day frequency."
)
def test_hf_calc_freq_accepts_hour(self):
test_index = pd.date_range("2014-12-29", freq="1H", periods=30)
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = pd.Timedelta("1 hour")
self.assertEqual(
actual, expected, "Calc_freq() should have found a 1 hour frequency."
)
def test_hf_calc_freq_accepts_1Day_1hour(self):
test_index = pd.date_range("2014-12-29", freq="1D1H2T", periods=30)
actual = hf.calc_freq(test_index)
self.assertIsInstance(
actual, pd.Timedelta, "Calc_freq() should return pd.Timedelta."
)
expected = | pd.Timedelta("1 day 1 hour 2 minutes") | pandas.Timedelta |
import pandas as pd
import numpy as np
from preprocess import process_examples, get_requests_from_logs
from scipy.spatial import distance
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def _process(data, max_attributes, restructure):
processed = process_examples(data, max_attributes, restructure)
return list(map(lambda p: p[0] | p[1], processed))
def _preprocess(requests, prev_requests, max_attributes, restructure):
requests = _process(requests, max_attributes, restructure)
prev_requests = _process(prev_requests, max_attributes, restructure)
processed = pd.DataFrame(requests + prev_requests).astype(str)
one_hot = | pd.get_dummies(processed) | pandas.get_dummies |
import pandas as pd
import sys,os,io,re
import numpy as np
path=sys.argv[1]
outName=sys.argv[2]
thresh=int(sys.argv[3])
anno_file=sys.argv[4]
anno_table= | pd.read_csv(anno_file) | pandas.read_csv |
#! /usr/bin/env python
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from bson.objectid import ObjectId
from sentiment_analysis.tweet_preprocessing import preprocess_tweets
from sentiment_analysis.data_helpers import batch_iter
from config import config
from utils.utils import get_posts
connection = config.initdb()
db = connection.scope_db
def sentimetal_analysis(candidate, sns_type):
print(
"##########################Sentimental Analysis {0} {1} ##########################".format(candidate, sns_type))
posts = []
sns_relation = db.sns_relation.find_one({'user_id': candidate}, {'user_list'})
if not sns_relation:
return
if sns_type == 'twitter':
tweets = get_posts(candidate, sns_type)
for tweet in tweets:
if tweet['tweet'] is not '':
posts.append({'text': tweet['tweet'], 'type': 'twitter', 'id': tweet['_id']})
elif sns_type == 'facebook':
fposts = get_posts(candidate, sns_type)
for post in fposts:
if post['status_message'] is not '':
posts.append({'text': post['status_message'], 'type': 'facebook', 'id': post['_id']})
if len(posts) == 0:
return
if sns_type == 'facebook':
db.fb_posts.update_many({'page_id': {"$in": sns_relation['user_list']}},
{'$unset': {'has_sentiment': None, 'sentiment_value': None}})
elif sns_type == 'twitter':
db.twitter_tweets.update_many({'user': {"$in": sns_relation['user_list']}},
{'$unset': {'has_sentiment': None, 'sentiment_value': None}})
print("Post Count {0}".format(len(posts)))
BATCH_SIZE = 64
CHECKPOINT_DIR = config.project_path + "sentiment_analysis/runs/" + config.sentiment_checkpoint
ALLOW_SOFT_PLACEMENT = True
LOG_DEVICE_PLACEMENT = False
pdPosts = | pd.DataFrame(posts) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
from datetime import datetime
from dateutil import parser
import csv
import urllib2
import sys
import pytz
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=NSE:RELIANCE&interval=1min&datatype=csv&outputsize=full&apikey=<KEY>'
response = urllib2.urlopen(url)
#cr = csv.reader(response)
dfP = pd.read_csv(response)
old_timezone = pytz.timezone("US/Eastern")
new_timezone = pytz.timezone("Asia/Kolkata")
dfP = dfP[0:400]
dfP = dfP.iloc[::-1]
for i in range(0,len(dfP)):
dfP['timestamp'][i] = str(old_timezone.localize(pd.to_datetime(dfP['timestamp'][i])).astimezone(new_timezone))
#print(old_timezone.localize(dfP[i].timestamp).astimezone(new_timezone))
#print(dfP[dfP.timestamp >= '2020-03-20'])
#sys.exit("Done Executing")
#df = pd.read_csv('nifty-12-19.csv')
#df.loc[df['date'] == '20190101']
#print(dfP.tail())
#dfP = pd.read_csv('2020-JAN-NIFTY.csv')
df = | pd.read_hdf('StockMarketData_2020-02-14.h5', key='/RELIANCE__EQ__NSE__NSE__MINUTE') | pandas.read_hdf |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 26 14:26:51 2021
@author: michaeltown
"""
## beginning of module 1 MVP data analysis
import numpy as np
import pandas as pd
import os as os
import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
## revised EDA project to look for patterns in MTA ridership due to COVID19 restrictions
## filter functions
# from year to year each SCP jumps a lot
def filterLargeDiff(x):
if (x < 2500) & (x > 0):
return x;
else:
return np.nan;
def weekdayfilter(x):
return x.DOW < 5;
# date limits
lowerDate = pd.to_datetime('2020-11-10');
upperDate = pd.to_datetime('2020-11-30');
# mta data load
dataFileLoc = '/home/michaeltown/work/metis/modules/exploratoryDataAnalysis/data/mtaData202002-202110.csv';
mtaData = pd.read_csv(dataFileLoc);
# covid19 data load
dataFileLoc1 = '/home/michaeltown/work/metis/modules/exploratoryDataAnalysis/data/timeLineOfCOVID19_NYC_schools.csv';
covid19Data = | pd.read_csv(dataFileLoc1) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
| pd.Timestamp('2011-01-04') | pandas.Timestamp |
import altair as alt
import pandas as pd
import streamlit as st
import coin_metrics
# Get data from coin metrics API
@st.cache
def get_data(asset_id, metrics, asset_name):
rates = coin_metrics.get_reference_rates_pandas(asset_id, metrics)
df = | pd.DataFrame(data=rates) | pandas.DataFrame |
# load in libraries
from bs4 import BeautifulSoup
import pandas as pd
import time
from selenium import webdriver
# %% set up selenium
from selenium import webdriver
driver = webdriver.Firefox()
url1 = 'https://freida.ama-assn.org/search/list?spec=43236&page=1'
driver.get(url1)
# %% define standard parse function
def standard_parse(dict_,lis_):
for val1,val2 in zip(lis_[::2],lis_[1::2]):
if val1 in dict_.keys():
dict_[val1] = [dict_[val1][0] + "_" + val2]
else:
dict_[val1] = [val2]
# %% initialize dataframes
names = pd.read_csv('names.csv')
names = names.tail(names.shape[0] - 530 - 860 - 444 - 520 - 255 - 159 - 540 -550 - 290 - 920 - 95)
details = pd.DataFrame({})
# %% parse programs
for index, row in names.iterrows():
# print(row[1])
# load page
driver.get(row[2])
time.sleep(3)
soup = BeautifulSoup(driver.page_source,'html.parser')
# initialize itemdict
itemdict = {}
itemdict['Link'] = row[2]
# perform base parse
table_first = [val.text.strip() for val in soup.find_all('td')]
del table_first[-4]
standard_parse(itemdict,table_first)
# get contact info. If not exist, page is blank, move on
contacts = [list(a.stripped_strings) for a in soup.find_all('small',{'class':['contact-info__contacts__details']})]
try:
itemdict['Program director'] = '\n'.join(contacts[0])
except:
itemdict['Program director'] = None
try:
itemdict['Person to contact for more information about the program'] = '\n'.join(contacts[1])
except:
itemdict['Person to contact for more information about the program'] = None
# perform ID and location parse
special = [a.get_text(separator = ", ") for a in soup.find_all('small',{'class':"ng-star-inserted"})]
try:
itemdict['ID'] = special[0][4:]
except:
itemdict['ID'] = None
try:
itemdict['Location'] = special[1]
except:
itemdict['Location'] = None
try:
itemdict['Sponsor'] = special[2]
except:
itemdict['Sponsor'] = None
for i,val in enumerate(special[3:-1]):
try:
itemdict['Participant '+ str(i)] = val
except:
itemdict['Participant '+ str(i)] = None
# introduction
try:
itemdict['Intro'] = [a.text.strip() for a in soup.find_all('div',{'class':['special_features ng-star-inserted']})][0]
except:
itemdict['Intro'] = None
# go to second tab
#get program length to adjust parse
try:
length = int(itemdict['Required length'][0])
except:
length = 4
try:
driver.find_element_by_xpath("//div[@data-test='program-sub-nav__item'][position()=2]").click()
except:
# adjust dataframe
df_item = pd.DataFrame.from_dict(itemdict)
details = details.append(df_item)
continue
time.sleep(0.25)
soup = BeautifulSoup(driver.page_source,'html.parser')
table_second = [val.text.strip() for val in soup.find_all('td')]
end_second = list(reversed(table_second)).index('Full-time paid') + 1
table_third = table_second[-end_second:]
del table_second[-end_second:]
table_fourth = table_third[9:]
del table_third[9:]
try:
end_fourth = list(reversed(table_fourth)).index('% Male') -1
except:
try:
end_fourth = list(reversed(table_fourth)).index('1') + 1
except:
end_fourth = len(table_fourth)
table_fifth = table_fourth[-end_fourth:]
del table_fourth[-end_fourth:]
del table_fifth[-1]
standard_parse(itemdict,table_second)
for i,val in enumerate(table_third[::3]):
try:
itemdict[val.strip()+'_Physician'] = [table_third[3*i+1]]
except:
itemdict[val.strip()+'_Physician'] = None
try:
itemdict[val.strip()+'_Non-physician'] = [table_third[3*i+2]]
except:
itemdict[val.strip()+'_Non-physician'] = None
standard_parse(itemdict,table_fourth)
for i,val in enumerate(table_fifth[::3]):
try:
itemdict['Year most taxing schedule And frequency per year_Year ' + val] = [table_fifth[3*i+1]]
except:
itemdict['Year most taxing schedule And frequency per year_Year ' + val] = None
try:
itemdict['Beeper or home call (Weeks/Year)_Year ' + val] = [table_fifth[3*i+2]]
except:
itemdict['Beeper or home call (Weeks/Year)_Year ' + val] = None
#move to third tab
try:
driver.find_element_by_xpath("//div[@data-test='program-sub-nav__item'][position()=3]").click()
except:
# adjust dataframe
df_item = | pd.DataFrame.from_dict(itemdict) | pandas.DataFrame.from_dict |
from enum import Enum
import sys
import os
import re
from typing import Any, Callable, Tuple
from pandas.core.frame import DataFrame
from tqdm import tqdm
import yaml
from icecream import ic
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from argparse import ArgumentParser
from configparser import ConfigParser
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from utils.colored_print import ColoredPrint
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import Binarizer
import warnings
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
log = ColoredPrint()
def log_call(func: Callable) -> Callable:
def wrapper(*args, **kwargs):
name: str = args[0].name if type(args[0]) == pd.Series else type(args[0])
log.info(f'{name}\t{func.__name__}')
return func(*args, **kwargs)
return wrapper
class Result(Enum):
DataFrame = 0,
Series = 1
@log_call
def one_hot_encode(train_series: pd.Series, test_series: pd.Series) -> Tuple[pd.DataFrame, pd.Series]:
# TODO Should one hot encode train and test datasets
def __replace_non_letters_with_underscore(name: str) -> str:
return re.sub('\W', '_', name).lower()
ohe: OneHotEncoder = OneHotEncoder(handle_unknown='ignore', dtype=np.int0)
sprs: csr_matrix = ohe.fit_transform(pd.DataFrame(train_series))
fixed_series_name: str = __replace_non_letters_with_underscore(train_series.name)
columns: list[str] = [f'{fixed_series_name}_{__replace_non_letters_with_underscore(col)}' for col in ohe.categories_[0]]
train_tmp: pd.DataFrame = pd.DataFrame.sparse.from_spmatrix(sprs, columns=columns)
sprs = ohe.transform(pd.DataFrame(test_series))
test_tmp: pd.DataFrame = | pd.DataFrame.sparse.from_spmatrix(sprs, columns=columns) | pandas.DataFrame.sparse.from_spmatrix |
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
"""
Created by <NAME> on 4/2/18.
Email : <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.layers import Dense, Input, Dropout
from keras.models import Sequential
from sklearn.preprocessing import normalize
"""
Created by <NAME> on 4/2/18.
Email : <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
"""
# Constants
TRAINING_PATH = './train.csv'
TEST_PATH = './test.csv'
# Hyper-Parameters
n_features = 81
n_epochs = 1000
batch_size = 256
def load_data(filepath):
return pd.read_csv(filepath)
def MLP_model():
model = Sequential()
# model.add(Input(shape=(n_features,)))
model.add(Dense(768, activation='relu', input_shape=(n_features,)))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
def train_model(model, x_train, y_train):
model.fit(x=x_train,
y=y_train,
epochs=n_epochs,
batch_size=batch_size,
validation_split=0.2)
def predict(model, x_test):
return pd.DataFrame(model.predict(x_test))
def build_submission(x_test, y_pred):
idx = x_test.columns.get_loc('PassengerId')
import csv
with open('./submission.csv', 'w') as file:
writer = csv.writer(file)
writer.writerow(['PassengerId', 'Survived'])
for i in range(y_pred.shape[0]):
writer.writerow([x_test.iloc[i, idx], np.argmax(y_pred[i])])
def preprocess_data(data, test_data):
data = data.drop('Name', axis=1)
test_data = test_data.drop('Name', axis=1)
# 1.1. Drop Columns which Contains Missing Values in all elements
# data = data.dropna(axis=1, how='all')
# 1. Drop All Missing Data is a Fantastic Way
data, test_data = process_missing_data(data, test_data)
# 1.2. Drop Rows Containing Any Missing Values (NaN)
# data = data.dropna(axis=0, how='any')
# 2. Convert Categorical to Dummies
n_train_samples = data.shape[0]
y_train = data['Survived']
train_data = data.drop('Survived', axis=1)
all_data = train_data.append(test_data)
all_data = | pd.get_dummies(all_data) | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> ### Reading a standard ReEDS output file and saving it in a PV ICE input format </li>
# <li>### Reading scenarios of interest and running PV ICE tool </li>
# <li>###Plotting </li>
# <li>### GeoPlotting.</li>
# </ol>
# Notes:
#
# Scenarios of Interest:
# the Ref.Mod,
# o 95-by-35.Adv, and
# o 95-by-35+Elec.Adv+DR ones
#
# In[1]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP')
print ("Your simulation will be stored in %s" % testfolder)
# In[3]:
PV_ICE.__version__
# ### Reading REEDS original file to get list of SCENARIOs, PCAs, and STATEs
# In[3]:
reedsFile = str(Path().resolve().parent.parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v3a.xlsx')
print ("Input file is stored in %s" % reedsFile)
rawdf = pd.read_excel(reedsFile,
sheet_name="new installs PV")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
#now set year as an index in place
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
# In[4]:
scenarios = list(rawdf.index.get_level_values('Scenario').unique())
PCAs = list(rawdf.index.get_level_values('PCA').unique())
STATEs = list(rawdf.index.get_level_values('State').unique())
# ### Reading GIS inputs
# In[5]:
GISfile = str(Path().resolve().parent.parent.parent.parent / 'gis_centroid_n.xlsx')
GIS = pd.read_excel(GISfile)
GIS = GIS.set_index('id')
# In[6]:
GIS.head()
# In[7]:
GIS.loc['p1'].long
# ### Create Scenarios in PV_ICE
# #### Rename difficult characters from Scenarios Names
# In[8]:
simulationname = scenarios
simulationname = [w.replace('+', '_') for w in simulationname]
simulationname
# #### Downselect to Solar Future scenarios of interest
#
# Scenarios of Interest:
# <li> Ref.Mod
# <li> 95-by-35.Adv
# <li> 95-by-35+Elec.Adv+DR
# In[9]:
SFscenarios = [simulationname[0], simulationname[4], simulationname[8]]
SFscenarios
# #### Create the 3 Scenarios and assign Baselines
#
# Keeping track of each scenario as its own PV ICE Object.
# In[10]:
#for ii in range (0, 1): #len(scenarios):
i = 0
r1 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(PCAs)):
filetitle = SFscenarios[i]+'_'+PCAs[jj]+'.csv'
filetitle = os.path.join(testfolder, 'PCAs', filetitle)
r1.createScenario(name=PCAs[jj], file=filetitle)
r1.scenario[PCAs[jj]].addMaterial('glass', file=r'..\baselines\SolarFutures_2021\baseline_material_glass_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('silicon', file=r'..\baselines\SolarFutures_2021\baseline_material_silicon_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('silver', file=r'..\baselines\SolarFutures_2021\baseline_material_silver_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('copper', file=r'..\baselines\SolarFutures_2021\baseline_material_copper_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('aluminum', file=r'..\baselines\SolarFutures_2021\baseline_material_aluminium_Reeds.csv')
r1.scenario[PCAs[jj]].latitude = GIS.loc[PCAs[jj]].lat
r1.scenario[PCAs[jj]].longitude = GIS.loc[PCAs[jj]].long
i = 1
r2 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(PCAs)):
filetitle = SFscenarios[i]+'_'+PCAs[jj]+'.csv'
filetitle = os.path.join(testfolder, 'PCAs', filetitle)
r2.createScenario(name=PCAs[jj], file=filetitle)
r2.scenario[PCAs[jj]].addMaterial('glass', file=r'..\baselines\SolarFutures_2021\baseline_material_glass_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('silicon', file=r'..\baselines\SolarFutures_2021\baseline_material_silicon_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('silver', file=r'..\baselines\SolarFutures_2021\baseline_material_silver_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('copper', file=r'..\baselines\SolarFutures_2021\baseline_material_copper_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('aluminum', file=r'..\baselines\SolarFutures_2021\baseline_material_aluminium_Reeds.csv')
r2.scenario[PCAs[jj]].latitude = GIS.loc[PCAs[jj]].lat
r2.scenario[PCAs[jj]].longitude = GIS.loc[PCAs[jj]].long
i = 2
r3 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(PCAs)):
filetitle = SFscenarios[i]+'_'+PCAs[jj]+'.csv'
filetitle = os.path.join(testfolder, 'PCAs', filetitle)
r3.createScenario(name=PCAs[jj], file=filetitle)
r3.scenario[PCAs[jj]].addMaterial('glass', file=r'..\baselines\SolarFutures_2021\baseline_material_glass_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('silicon', file=r'..\baselines\SolarFutures_2021\baseline_material_silicon_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('silver', file=r'..\baselines\SolarFutures_2021\baseline_material_silver_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('copper', file=r'..\baselines\SolarFutures_2021\baseline_material_copper_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('aluminum', file=r'..\baselines\SolarFutures_2021\baseline_material_aluminium_Reeds.csv')
r3.scenario[PCAs[jj]].latitude = GIS.loc[PCAs[jj]].lat
r3.scenario[PCAs[jj]].longitude = GIS.loc[PCAs[jj]].long
# In[11]:
list(r1.scenario[PCAs[0]].data.year)
# In[12]:
r1.scenario[PCAs[0]].data
# # 2 FINISH: Set characteristics of Recycling to SF values.
# In[13]:
#r1.scenario[]
# #### Calculate Mass Flow
# In[14]:
IRENA= False
PERFECTMFG = True
mats = ['glass', 'silicon','silver','copper','aluminum']
ELorRL = 'EL'
if IRENA:
if ELorRL == 'RL':
weibullInputParams = {'alpha': 5.3759, 'beta':30} # Regular-loss scenario IRENA
if ELorRL == 'EL':
weibullInputParams = {'alpha': 2.49, 'beta':30} # Regular-loss scenario IRENA
if PERFECTMFG:
for jj in range (0, len(r1.scenario.keys())):
r1.scenario[STATEs[jj]].data['mod_lifetime'] = 40
r1.scenario[STATEs[jj]].data['mod_MFG_eff'] = 100.0
r2.scenario[STATEs[jj]].data['mod_lifetime'] = 40
r2.scenario[STATEs[jj]].data['mod_MFG_eff'] = 100.0
r3.scenario[STATEs[jj]].data['mod_lifetime'] = 40
r3.scenario[STATEs[jj]].data['mod_MFG_eff'] = 100.0
for kk in range(0, len(mats)):
mat = mats[kk]
r1.scenario[STATEs[jj]].material[mat].materialdata['mat_MFG_eff'] = 100.0
r2.scenario[STATEs[jj]].material[mat].materialdata['mat_MFG_eff'] = 100.0
r3.scenario[STATEs[jj]].material[mat].materialdata['mat_MFG_eff'] = 100.0
r1.calculateMassFlow(weibullInputParams=weibullInputParams)
r2.calculateMassFlow(weibullInputParams=weibullInputParams)
r3.calculateMassFlow(weibullInputParams=weibullInputParams)
title_Method = 'Irena_'+ELorRL
else:
r1.calculateMassFlow()
r2.calculateMassFlow()
r3.calculateMassFlow()
title_Method = 'PVICE'
# In[15]:
print("PCAs:", r1.scenario.keys())
print("Module Keys:", r1.scenario[PCAs[jj]].data.keys())
print("Material Keys: ", r1.scenario[PCAs[jj]].material['glass'].materialdata.keys())
# In[16]:
"""
r1.plotScenariosComparison(keyword='Cumulative_Area_disposedby_Failure')
r1.plotMaterialComparisonAcrossScenarios(material='silicon', keyword='mat_Total_Landfilled')
r1.scenario['p1'].data.head(21)
r2.scenario['p1'].data.head(21)
r3.scenario['p1'].data.head(21)
"""
pass
# # SAVE DATA FOR BILLY: PCAs
# ### PCA vs. Cumulative Waste by 2050
#
# In[17]:
#for 3 significant numbers rounding
N = 2
# SFScenarios[kk].scenario[PCAs[zz]].data.year
#
# Index 20 --> 2030
#
# Index 30 --> 2040
#
# Index 40 --> 2050
# In[18]:
idx2030 = 20
idx2040 = 30
idx2050 = 40
print("index ", idx2030, " is year ", r1.scenario[PCAs[0]].data['year'].iloc[idx2030])
print("index ", idx2040, " is year ", r1.scenario[PCAs[0]].data['year'].iloc[idx2040])
print("index ", idx2050, " is year ", r1.scenario[PCAs[0]].data['year'].iloc[idx2050])
# #### 1 - PCA Cumulative Virgin Needs by 2050
# In[19]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(PCAs)):
keywordsum.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=PCAs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' 1 - PCA Cumulative2050 VirginMaterialNeeds_tons.csv')
# #### 2 - PCA Cumulative EoL Only Waste by 2050
# In[20]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(PCAs)):
keywordsum.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=PCAs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' 2 - PCA Cumulative2050 Waste EOL_tons.csv')
# #### 3 - PCA Yearly Virgin Needs 2030 2040 2050
# In[21]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(PCAs)):
keywordsum2030.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=PCAs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' 3 - PCA Yearly 2030 2040 2050 VirginMaterialNeeds_tons.csv')
# #### 4 - PCA Yearly EoL Waste 2030 2040 2050
# In[22]:
keyword='mat_Total_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import shap
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
class Boba_Model_Diagnostics():
def __init__(self):
pass
def run_model_diagnostics(self, model, X_train, X_test, y_train, y_test, target):
self.get_model_stats(model, X_train, X_test, y_train, y_test, target)
self.plot_shap_imp(model,X_train)
self.plot_shap_bar(model,X_train)
self.residual_plot(model,X_test,y_test,target)
self.residual_density_plot(model,X_test,y_test,target)
self.identify_outliers(model, X_test, y_test,target)
self.residual_mean_plot(model,X_test,y_test,target)
self.residual_variance_plot(model,X_test,y_test,target)
self.PVA_plot(model,X_test,y_test,target)
self.inverse_PVA_plot(model,X_train,y_train,target)
self.estimates_by_var(model,X_train,y_train,target,'Age')
self.error_by_var(model,X_train,y_train,target,'Age')
self.volatility_by_var(model,X_train,y_train,target,'Age')
def get_model_stats(self, model, X_train, X_test, y_train, y_test, target):
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
test_RMSE = np.sqrt(mean_squared_error(y_test, test_pred)),
test_R2 = model.score(X_test,y_test),
test_MAE = mean_absolute_error(y_test, test_pred),
train_RMSE = np.sqrt(mean_squared_error(y_train, train_pred)),
train_R2 = model.score(X_train,y_train),
train_MAE = mean_absolute_error(y_train, train_pred),
df = pd.DataFrame(data = {'RMSE': np.round(train_RMSE,4),
'R^2': np.round(train_R2,4),
'MAE': np.round(train_MAE,4)}, index = ['train'])
df2 = pd.DataFrame(data = {'RMSE': np.round(test_RMSE,4),
'R^2': np.round(test_R2,4),
'MAE': np.round(test_MAE,4)}, index = ['test'])
print("Model Statistics for {}".format(target))
print('-'*40)
print(df)
print('-'*40)
print(df2)
print('-'*40)
def plot_shap_imp(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train)
plt.show()
def plot_shap_bar(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train, plot_type='bar')
plt.show()
def feature_imp(self,model,X_train,target):
sns.set_style('darkgrid')
names = X_train.columns
coef_df = pd.DataFrame({"Feature": names, "Importance": model.feature_importances_},
columns=["Feature", "Importance"])
coef_df = coef_df.sort_values('Importance',ascending=False)
coef_df
fig, ax = plt.subplots()
sns.barplot(x="Importance", y="Feature", data=coef_df.head(20),
label="Importance", color="b",orient='h')
plt.title("XGB Feature Importances for {}".format(target))
plt.show()
def residual_plot(self,model, X_test, y_test,target):
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
fig, ax = plt.subplots()
ax.scatter(pred, residuals)
ax.plot([pred.min(), pred.max()], [0, 0], 'k--', lw=4)
ax.set_xlabel('Predicted')
ax.set_ylabel('Residuals')
plt.title("Residual Plot for {}".format(target))
plt.show()
def residual_density_plot(self,model, X_test, y_test,target):
sns.set_style('darkgrid')
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
sns.distplot(residuals)
plt.title("Residual Density Plot for {}".format(target))
plt.show()
def residual_variance_plot(self, model, X_test, y_test,target):
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp[target], 10))['residuals'].std()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Variance plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Variance")
plt.show()
except:
pass
def residual_mean_plot(self, model, X_test, y_test,target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp['pred'], 10))['residuals'].mean()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Mean plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Mean")
plt.show()
except:
pass
def PVA_plot(self,model, X_test, y_test, target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['predicted'] = pred
y_temp['residuals'] = residuals
pva = y_temp.groupby( | pd.qcut(y_temp['predicted'], 10) | pandas.qcut |
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = {
'bids_regdup': pd.read_csv('data/as_bids_REGUP.csv'),
'bids_regdown': pd.read_csv('data/as_bids_REGDOWN.csv'),
'plans': pd.read_csv('data/as_plan.csv'),
'energy_prices': | pd.read_csv('data/energy_price.csv') | pandas.read_csv |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = | Int64Index([1, 2, 5, 7, 12, 25]) | pandas.core.index.Int64Index |
from datetime import datetime, date
import sys
if sys.version_info >= (2, 7):
from nose.tools import assert_dict_equal
import xlwings as xw
try:
import numpy as np
from numpy.testing import assert_array_equal
def nparray_equal(a, b):
try:
assert_array_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
np = None
try:
import pandas as pd
from pandas import DataFrame, Series
from pandas.util.testing import assert_frame_equal, assert_series_equal
def frame_equal(a, b):
try:
assert_frame_equal(a, b)
except AssertionError:
return False
return True
def series_equal(a, b):
try:
assert_series_equal(a, b)
except AssertionError:
return False
return True
except ImportError:
pd = None
def dict_equal(a, b):
try:
assert_dict_equal(a, b)
except AssertionError:
return False
return True
# Defaults
@xw.func
def read_float(x):
return x == 2.
@xw.func
def write_float():
return 2.
@xw.func
def read_string(x):
return x == 'xlwings'
@xw.func
def write_string():
return 'xlwings'
@xw.func
def read_empty(x):
return x is None
@xw.func
def read_date(x):
return x == datetime(2015, 1, 15)
@xw.func
def write_date():
return datetime(1969, 12, 31)
@xw.func
def read_datetime(x):
return x == datetime(1976, 2, 15, 13, 6, 22)
@xw.func
def write_datetime():
return datetime(1976, 2, 15, 13, 6, 23)
@xw.func
def read_horizontal_list(x):
return x == [1., 2.]
@xw.func
def write_horizontal_list():
return [1., 2.]
@xw.func
def read_vertical_list(x):
return x == [1., 2.]
@xw.func
def write_vertical_list():
return [[1.], [2.]]
@xw.func
def read_2dlist(x):
return x == [[1., 2.], [3., 4.]]
@xw.func
def write_2dlist():
return [[1., 2.], [3., 4.]]
# Keyword args on default converters
@xw.func
@xw.arg('x', ndim=1)
def read_ndim1(x):
return x == [2.]
@xw.func
@xw.arg('x', ndim=2)
def read_ndim2(x):
return x == [[2.]]
@xw.func
@xw.arg('x', transpose=True)
def read_transpose(x):
return x == [[1., 3.], [2., 4.]]
@xw.func
@xw.ret(transpose=True)
def write_transpose():
return [[1., 2.], [3., 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as1(x):
return x == [[1., date(2015, 1, 13)], [date(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', dates=date)
def read_dates_as2(x):
return x == date(2005, 1, 15)
@xw.func
@xw.arg('x', dates=datetime)
def read_dates_as3(x):
return x == [[1., datetime(2015, 1, 13)], [datetime(2000, 12, 1), 4.]]
@xw.func
@xw.arg('x', empty='empty')
def read_empty_as(x):
return x == [[1., 'empty'], ['empty', 4.]]
if sys.version_info >= (2, 7):
# assert_dict_equal isn't available on nose for PY 2.6
# Dicts
@xw.func
@xw.arg('x', dict)
def read_dict(x):
return dict_equal(x, {'a': 1., 'b': 'c'})
@xw.func
@xw.arg('x', dict, transpose=True)
def read_dict_transpose(x):
return dict_equal(x, {1.0: 'c', 'a': 'b'})
@xw.func
def write_dict():
return {'a': 1., 'b': 'c'}
# Numpy Array
if np:
@xw.func
@xw.arg('x', np.array)
def read_scalar_nparray(x):
return nparray_equal(x, np.array(1.))
@xw.func
@xw.arg('x', np.array)
def read_empty_nparray(x):
return nparray_equal(x, np.array(np.nan))
@xw.func
@xw.arg('x', np.array)
def read_horizontal_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_vertical_nparray(x):
return nparray_equal(x, np.array([1., 2.]))
@xw.func
@xw.arg('x', np.array)
def read_date_nparray(x):
return nparray_equal(x, np.array(datetime(2000, 12, 20)))
# Keyword args on Numpy arrays
@xw.func
@xw.arg('x', np.array, ndim=1)
def read_ndim1_nparray(x):
return nparray_equal(x, np.array([2.]))
@xw.func
@xw.arg('x', np.array, ndim=2)
def read_ndim2_nparray(x):
return nparray_equal(x, np.array([[2.]]))
@xw.func
@xw.arg('x', np.array, transpose=True)
def read_transpose_nparray(x):
return nparray_equal(x, np.array([[1., 3.], [2., 4.]]))
@xw.func
@xw.ret(transpose=True)
def write_transpose_nparray():
return np.array([[1., 2.], [3., 4.]])
@xw.func
@xw.arg('x', np.array, dates=date)
def read_dates_as_nparray(x):
return nparray_equal(x, np.array(date(2000, 12, 20)))
@xw.func
@xw.arg('x', np.array, empty='empty')
def read_empty_as_nparray(x):
return nparray_equal(x, np.array('empty'))
@xw.func
def write_np_scalar():
return np.float64(2)
# Pandas Series
if pd:
@xw.func
@xw.arg('x', pd.Series, header=False, index=False)
def read_series_noheader_noindex(x):
return series_equal(x, pd.Series([1., 2.]))
@xw.func
@xw.arg('x', pd.Series, header=False, index=True)
def read_series_noheader_index(x):
return series_equal(x, pd.Series([1., 2.], index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=False)
def read_series_header_noindex(x):
return series_equal(x, pd.Series([1., 2.], name='name'))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_named_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix')))
@xw.func
@xw.arg('x', pd.Series, header=True, index=True)
def read_series_header_nameless_index(x):
return series_equal(x, pd.Series([1., 2.], name='name', index=[10., 20.]))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_nameless_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=True, index=2)
def read_series_header_named_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]], names=['ix1', 'ix2'])
return series_equal(x, pd.Series([1., 2.], name='name', index=ix))
@xw.func
@xw.arg('x', pd.Series, header=False, index=2)
def read_series_noheader_2index(x):
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return series_equal(x, pd.Series([1., 2.], index=ix))
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_noheader_noindex():
return pd.Series([1., 2.])
@xw.func
@xw.ret(pd.Series, index=True)
def write_series_noheader_index():
return pd.Series([1., 2.], index=[10., 20.])
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_header_noindex():
return pd.Series([1., 2.], name='name')
@xw.func
def write_series_header_named_index():
return pd.Series([1., 2.], name='name', index=pd.Index([10., 20.], name='ix'))
@xw.func
@xw.ret(pd.Series, index=True, header=True)
def write_series_header_nameless_index():
return pd.Series([1., 2.], name='name', index=[10., 20.])
@xw.func
@xw.ret(pd.Series, header=True, index=2)
def write_series_header_nameless_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return pd.Series([1., 2.], name='name', index=ix)
@xw.func
@xw.ret(pd.Series, header=True, index=2)
def write_series_header_named_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]], names=['ix1', 'ix2'])
return pd.Series([1., 2.], name='name', index=ix)
@xw.func
@xw.ret(pd.Series, header=False, index=2)
def write_series_noheader_2index():
ix = pd.MultiIndex.from_arrays([['a', 'a'], [10., 20.]])
return pd.Series([1., 2.], index=ix)
@xw.func
@xw.arg('x', pd.Series)
def read_timeseries(x):
return series_equal(x, pd.Series([1.5, 2.5], name='ts', index=[datetime(2000, 12, 20), datetime(2000, 12, 21)]))
@xw.func
@xw.ret(pd.Series)
def write_timeseries():
return pd.Series([1.5, 2.5], name='ts', index=[datetime(2000, 12, 20), datetime(2000, 12, 21)])
@xw.func
@xw.ret(pd.Series, index=False)
def write_series_nan():
return pd.Series([1, np.nan, 3])
# Pandas DataFrame
if pd:
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=False)
def read_df_0header_0index(x):
return frame_equal(x, pd.DataFrame([[1., 2.], [3., 4.]]))
@xw.func
@xw.ret(pd.DataFrame, index=False, header=False)
def write_df_0header_0index():
return pd.DataFrame([[1., 2.], [3., 4.]])
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=True)
def read_df_1header_0index(x):
return frame_equal(x, pd.DataFrame([[1., 2.], [3., 4.]], columns=['a', 'b']))
@xw.func
@xw.ret(pd.DataFrame, index=False, header=True)
def write_df_1header_0index():
return pd.DataFrame([[1., 2.], [3., 4.]], columns=['a', 'b'])
@xw.func
@xw.arg('x', pd.DataFrame, index=True, header=False)
def read_df_0header_1index(x):
return frame_equal(x, pd.DataFrame([[1., 2.], [3., 4.]], index=[10., 20.]))
@xw.func
@xw.ret(pd.DataFrame, index=True, header=False)
def write_df_0header_1index():
return pd.DataFrame([[1., 2.], [3., 4.]], index=[10, 20])
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=False)
def read_df_0header_2index(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]))
return frame_equal(x, df)
@xw.func
@xw.ret(pd.DataFrame, index=2, header=False)
def write_df_0header_2index():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=1)
def read_df_1header_1namedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
df.index.name = 'ix1'
return frame_equal(x, df)
@xw.func
def write_df_1header_1namedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
df.index.name = 'ix1'
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=1)
def read_df_1header_1unnamedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
return frame_equal(x, df)
@xw.func
def write_df_1header_1unnamedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=['c', 'd', 'c'])
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=False, header=2)
def read_df_2header_0index(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
@xw.ret(pd.DataFrame, index=False, header=2)
def write_df_2header_0index():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=2)
def read_df_2header_1namedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
df.index.name = 'ix1'
return frame_equal(x, df)
@xw.func
def write_df_2header_1namedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
df.index.name = 'ix1'
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=1, header=2)
def read_df_2header_1unnamedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
def write_df_2header_1unnamedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.]],
index=[1., 2.],
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=2)
def read_df_2header_2namedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]], names=['x1', 'x2']),
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
def write_df_2header_2namedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]], names=['x1', 'x2']),
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return df
@xw.func
@xw.arg('x', pd.DataFrame, index=2, header=2)
def read_df_2header_2unnamedindex(x):
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index=pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]),
columns=pd.MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'c']]))
return frame_equal(x, df)
@xw.func
def write_df_2header_2unnamedindex():
df = pd.DataFrame([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
index= | pd.MultiIndex.from_arrays([['a', 'a', 'b'], [1., 2., 1.]]) | pandas.MultiIndex.from_arrays |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.data_checks import (
ClassImbalanceDataCheck,
DataCheckError,
DataCheckMessageCode,
DataCheckWarning,
)
class_imbalance_data_check_name = ClassImbalanceDataCheck.name
def test_class_imbalance_errors():
X = pd.DataFrame()
with pytest.raises(ValueError, match="threshold 0 is not within the range"):
ClassImbalanceDataCheck(threshold=0).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="threshold 0.51 is not within the range"):
ClassImbalanceDataCheck(threshold=0.51).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="threshold -0.5 is not within the range"):
ClassImbalanceDataCheck(threshold=-0.5).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="Provided number of CV folds"):
ClassImbalanceDataCheck(num_cv_folds=-1).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="Provided value min_samples"):
ClassImbalanceDataCheck(min_samples=-1).validate(X, y=pd.Series([0, 1, 1]))
@pytest.mark.parametrize("input_type", ["pd", "np", "ww"])
def test_class_imbalance_data_check_binary(input_type):
X = pd.DataFrame()
y = pd.Series([0, 0, 1])
y_long = pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_balanced = pd.Series([0, 0, 1, 1])
if input_type == "np":
X = X.to_numpy()
y = y.to_numpy()
y_long = y_long.to_numpy()
y_balanced = y_balanced.to_numpy()
elif input_type == "ww":
X.ww.init()
y = ww.init_series(y)
y_long = ww.init_series(y_long)
y_balanced = ww.init_series(y_balanced)
class_imbalance_check = ClassImbalanceDataCheck(min_samples=1, num_cv_folds=0)
assert class_imbalance_check.validate(X, y) == []
assert class_imbalance_check.validate(X, y_long) == [
DataCheckWarning(
message="The following labels fall below 10% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict()
]
assert ClassImbalanceDataCheck(
threshold=0.25, min_samples=1, num_cv_folds=0
).validate(X, y_long) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict()
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 2 instances: [1]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [1]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_balanced) == []
class_imbalance_check = ClassImbalanceDataCheck()
assert class_imbalance_check.validate(X, y) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [0, 1]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 1]},
).to_dict()
]
@pytest.mark.parametrize("input_type", ["pd", "np", "ww"])
def test_class_imbalance_data_check_multiclass(input_type):
X = pd.DataFrame()
y = pd.Series([0, 2, 1, 1])
y_imbalanced_default_threshold = pd.Series([0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_imbalanced_set_threshold = pd.Series(
[0, 2, 2, 2, 2, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
)
y_imbalanced_cv = pd.Series([0, 1, 2, 2, 1, 1, 1])
y_long = pd.Series([0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4])
if input_type == "np":
X = X.to_numpy()
y = y.to_numpy()
y_imbalanced_default_threshold = y_imbalanced_default_threshold.to_numpy()
y_imbalanced_set_threshold = y_imbalanced_set_threshold.to_numpy()
y_imbalanced_cv = y_imbalanced_cv.to_numpy()
y_long = y_long.to_numpy()
elif input_type == "ww":
X.ww.init()
y = ww.init_series(y)
y_imbalanced_default_threshold = ww.init_series(y_imbalanced_default_threshold)
y_imbalanced_set_threshold = ww.init_series(y_imbalanced_set_threshold)
y_imbalanced_cv = ww.init_series(y_imbalanced_cv)
y_long = ww.init_series(y_long)
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=0)
assert class_imbalance_check.validate(X, y) == []
assert class_imbalance_check.validate(X, y_imbalanced_default_threshold) == [
DataCheckWarning(
message="The following labels fall below 10% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 10% of the target and have less than 100 samples: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [0]},
).to_dict(),
]
assert ClassImbalanceDataCheck(
threshold=0.25, num_cv_folds=0, min_samples=1
).validate(X, y_imbalanced_set_threshold) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: [3, 0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [3, 0]},
).to_dict()
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=2)
assert class_imbalance_check.validate(X, y_imbalanced_cv) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 4 instances: [0, 2]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 2]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_long) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 4 instances: [0, 1]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 1]},
).to_dict()
]
class_imbalance_check = ClassImbalanceDataCheck()
assert class_imbalance_check.validate(X, y_long) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [0, 1, 2, 3]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 1, 2, 3]},
).to_dict()
]
@pytest.mark.parametrize("input_type", ["pd", "np", "ww"])
def test_class_imbalance_empty_and_nan(input_type):
X = pd.DataFrame()
y_empty = pd.Series([])
y_has_nan = pd.Series([np.nan, np.nan, np.nan, np.nan, 1, 1, 1, 1, 2])
if input_type == "np":
X = X.to_numpy()
y_empty = y_empty.to_numpy()
y_has_nan = y_has_nan.to_numpy()
elif input_type == "ww":
X.ww.init()
y_empty = ww.init_series(y_empty)
y_has_nan = ww.init_series(y_has_nan)
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=0)
assert class_imbalance_check.validate(X, y_empty) == []
assert ClassImbalanceDataCheck(
threshold=0.5, min_samples=1, num_cv_folds=0
).validate(X, y_has_nan) == [
DataCheckWarning(
message="The following labels fall below 50% of the target: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [2.0]},
).to_dict()
]
assert ClassImbalanceDataCheck(threshold=0.5, num_cv_folds=0).validate(
X, y_has_nan
) == [
DataCheckWarning(
message="The following labels fall below 50% of the target: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [2.0]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 50% of the target and have less than 100 samples: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [2.0]},
).to_dict(),
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y_empty) == []
assert ClassImbalanceDataCheck(threshold=0.5, num_cv_folds=1).validate(
X, y_has_nan
) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 2 instances: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [2.0]},
).to_dict(),
DataCheckWarning(
message="The following labels fall below 50% of the target: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [2.0]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 50% of the target and have less than 100 samples: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [2.0]},
).to_dict(),
]
@pytest.mark.parametrize("input_type", ["pd", "ww"])
def test_class_imbalance_nonnumeric(input_type):
X = pd.DataFrame()
y_bools = pd.Series([True, False, False, False, False])
y_binary = pd.Series(["yes", "no", "yes", "yes", "yes"])
y_multiclass = pd.Series(
[
"red",
"green",
"red",
"red",
"blue",
"green",
"red",
"blue",
"green",
"red",
"red",
"red",
]
)
y_multiclass_imbalanced_folds = pd.Series(["No", "Maybe", "Maybe", "No", "Yes"])
y_binary_imbalanced_folds = pd.Series(["No", "Yes", "No", "Yes", "No"])
if input_type == "ww":
X.ww.init()
y_bools = ww.init_series(y_bools)
y_binary = ww.init_series(y_binary)
y_multiclass = ww.init_series(y_multiclass)
class_imbalance_check = ClassImbalanceDataCheck(
threshold=0.25, min_samples=1, num_cv_folds=0
)
assert class_imbalance_check.validate(X, y_bools) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: [True]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [True]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_binary) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: ['no']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": ["no"]},
).to_dict()
]
assert ClassImbalanceDataCheck(threshold=0.35, num_cv_folds=0).validate(
X, y_multiclass
) == [
DataCheckWarning(
message="The following labels fall below 35% of the target: ['green', 'blue']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": ["green", "blue"]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 35% of the target and have less than 100 samples: ['green', 'blue']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": ["green", "blue"]},
).to_dict(),
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y_multiclass_imbalanced_folds) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 2 instances: ['Yes']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": ["Yes"]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_multiclass) == []
class_imbalance_check = ClassImbalanceDataCheck()
assert class_imbalance_check.validate(X, y_binary_imbalanced_folds) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: ['No', 'Yes']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": ["No", "Yes"]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_multiclass) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: ['blue', 'green']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": ["blue", "green"]},
).to_dict()
]
@pytest.mark.parametrize("input_type", ["pd", "ww"])
def test_class_imbalance_nonnumeric_balanced(input_type):
X = pd.DataFrame()
y_bools_balanced = pd.Series([True, True, True, False, False])
y_binary_balanced = pd.Series(["No", "Yes", "No", "Yes"])
y_multiclass_balanced = pd.Series(
["red", "green", "red", "red", "blue", "green", "red", "blue", "green", "red"]
)
if input_type == "ww":
X.ww.init()
y_bools_balanced = ww.init_series(y_bools_balanced)
y_binary_balanced = ww.init_series(y_binary_balanced)
y_multiclass_balanced = ww.init_series(y_multiclass_balanced)
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y_multiclass_balanced) == []
assert class_imbalance_check.validate(X, y_binary_balanced) == []
assert class_imbalance_check.validate(X, y_multiclass_balanced) == []
@pytest.mark.parametrize("input_type", ["pd", "ww"])
@pytest.mark.parametrize("min_samples", [1, 20, 50, 100, 500])
def test_class_imbalance_severe(min_samples, input_type):
X = pd.DataFrame()
# 0 will be < 10% of the data, but there will be 50 samples of it
y_values_binary = pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] * 50)
y_values_multiclass = pd.Series(
[0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] * 50
)
if input_type == "ww":
X.ww.init()
y_values_binary = ww.init_series(y_values_binary)
y_values_multiclass = ww.init_series(y_values_multiclass)
class_imbalance_check = ClassImbalanceDataCheck(
min_samples=min_samples, num_cv_folds=1
)
warnings = [
DataCheckWarning(
message="The following labels fall below 10% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict()
]
if min_samples > 50:
warnings.append(
DataCheckWarning(
message=f"The following labels in the target have severe class imbalance because they fall under 10% of the target and have less than {min_samples} samples: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [0]},
).to_dict()
)
assert class_imbalance_check.validate(X, y_values_binary) == warnings
assert class_imbalance_check.validate(X, y_values_multiclass) == warnings
def test_class_imbalance_large_multiclass():
X = pd.DataFrame()
y_values_multiclass_large = pd.Series(
[0] * 20 + [1] * 25 + [2] * 99 + [3] * 105 + [4] * 900 + [5] * 900
)
y_multiclass_huge = pd.Series([i % 200 for i in range(100000)])
y_imbalanced_multiclass_huge = y_multiclass_huge.append(
| pd.Series([200] * 10) | pandas.Series |
"""
Base class for a runnable script
"""
import pandas as pd
import numpy as np
from .. import api as mhapi
import os
from ..utility import logger
class Processor:
def __init__(self, verbose=True, violate=False, independent=True):
self.verbose = verbose
self.independent = independent
self.violate = violate
self.name = 'BaseProcessor'
def run_on_file(self, file, prev_file=None, next_file=None):
self.file = file
if self.independent:
prev_file = None
next_file = None
self._extract_meta(file)
data, prev_data, next_data = self._load_file(file, prev_file=prev_file, next_file=next_file)
combined_data, data_start_indicator, data_stop_indicator = self._merge_data(data, prev_data=prev_data, next_data=next_data)
result_data = self._run_on_data(combined_data, data_start_indicator, data_stop_indicator)
result_data = self._post_process(result_data)
return result_data
def set_meta(self, meta):
self.meta = meta
def _extract_meta(self, file):
file = os.path.normpath(os.path.abspath(file))
pid = mhapi.extract_pid(file)
if not self.violate:
data_type = mhapi.extract_datatype(file)
file_type = mhapi.extract_file_type(file)
sensor_type = mhapi.extract_sensortype(file)
sid = mhapi.extract_id(file)
date = mhapi.extract_date(file)
hour = mhapi.extract_hour(file)
meta = dict(
pid=pid,
data_type=data_type,
file_type=file_type,
sensor_type=sensor_type,
sid=sid,
date=date,
hour=hour
)
else:
meta = dict(
pid=pid
)
self.meta = meta
def _load_file(self, file, prev_file=None, next_file=None):
raise NotImplementedError("Subclass must implement this method")
def _merge_data(self, data, prev_data=None, next_data=None):
raise NotImplementedError("Subclass must implement this method")
def _run_on_data(self, combined_data, data_start_indicator, data_stop_indicator):
raise NotImplementedError("Subclass must implement this method")
def _post_process(self, result_data):
return result_data
def __str__(self):
return self.name
class SensorProcessor(Processor):
def __init__(self, verbose=True, violate=False, independent=True):
Processor.__init__(self, verbose=verbose, violate=violate, independent=independent)
self.name = 'SensorProcessor'
def _load_file(self, file, prev_file=None, next_file=None):
file = os.path.normpath(os.path.abspath(file))
df = mhapi.helpers.importer.import_sensor_file_mhealth(file)
if self.verbose:
logger.info("Current file: " + file)
logger.info("Previous file: " + str(prev_file))
logger.info("Next file: " + str(next_file))
if prev_file is not None and prev_file != "None":
prev_file = os.path.normpath(os.path.abspath(prev_file))
prev_df = mhapi.helpers.importer.import_sensor_file_mhealth(prev_file)
else:
prev_df = pd.DataFrame()
if next_file is not None and next_file != "None":
next_file = os.path.normpath(os.path.abspath(next_file))
next_df = mhapi.helpers.importer.import_sensor_file_mhealth(next_file)
else:
next_df = pd.DataFrame()
return df, prev_df, next_df
def _merge_data(self, data, prev_data=None, next_data=None):
if data.empty:
return | pd.DataFrame() | pandas.DataFrame |
"""--------------------------------------------------------------------------------------------------------------------
Copyright 2021 Market Maker Lite, LLC (MML)
Licensed under the Apache License, Version 2.0
THIS CODE IS PROVIDED AS IS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND
This file is part of the MML Open Source Library (www.github.com/MarketMakerLite)
--------------------------------------------------------------------------------------------------------------------"""
from tda import auth
import pandas as pd
import datetime
from datetime import time, timezone
import time
import pandas_market_calendars as mcal
from sqlalchemy import create_engine, inspect
from sqlalchemy.pool import QueuePool
import os
import config
import traceback
def dt_now():
"""Get current datetime"""
dt_now = datetime.datetime.now(tz=timezone.utc).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return dt_now
def opencheck():
"""Check if markets are currently open using pandas_market_calendars"""
trading_day = mcal.get_calendar('NYSE').schedule(start_date=date.today(), end_date=date.today())
try:
open_time = trading_day.iloc[0][0]
close_time = trading_day.iloc[0][1]
if open_time < datetime.datetime.now(tz=timezone.utc) < close_time:
market_open = True
else:
market_open = False
except Exception:
market_open = False
return market_open
def logins():
"""Login and Connect to Database"""
# Login to TD-Ameritrade
token_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ameritrade-credentials.json')
api_key = config.api_key
redirect_uri = config.redirect_uri
try:
c = auth.client_from_token_file(token_path, api_key)
except FileNotFoundError:
from selenium import webdriver
with webdriver.Chrome() as driver:
c = auth.client_from_login_flow(
driver, api_key, redirect_uri, token_path)
# Connect to Database
engine = create_engine(config.psql, poolclass=QueuePool, pool_size=1, max_overflow=20, pool_recycle=3600,
pool_pre_ping=True, isolation_level='AUTOCOMMIT')
return c, engine
def getsymbols():
"""Get a list of symbols to use"""
"""Example 1: Read from Database"""
# engine = create_engine(config.psql)
# symbol_df = pd.read_sql_query('select ticker, market_cap from companies where market_cap >= 900000000', con=engine)
# symbol_df = symbol_df.sort_values("market_cap", ascending=False)
# symbols = symbol_df['ticker'].tolist()
"""Example 2: Get S&P500 symbols from wikipedia"""
symbol_df = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
symbol_df = | pd.DataFrame(symbol_df[0:][0]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from scipy.stats import entropy
import pickle
import os
import json
from flask import Flask
from flask import request
from jinja2 import Template
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline, FeatureUnion
import re
from sklearn.base import TransformerMixin
from sklearn.pipeline import Pipeline
from nltk import pos_tag, word_tokenize
import unicodedata
from sklearn.preprocessing import MultiLabelBinarizer
def get_nth_token(text, n):
toks = re.findall('[\w+\(\),:;\[\]]+', text)
if len(toks) > n:
return toks[n]
else:
return ''
def cleanup_string(text):
toret = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('ascii')
toret = toret.strip()
toret = re.sub('[\r\n\t]+', ' ', toret)
# toks = re.findall('[\w+\(\),:;\[\]]+', toret)
# toret = ' '.join(toks)
toret = re.sub('[^\w+\(\),:;\[\]\-.|& \t\n/]+', ' ', toret)
return toret
def get_pos_string(text, text_len=100):
if len(text) < text_len:
tags = pos_tag(word_tokenize(text))
tags = [t[1] for t in tags]
return ' '.join(tags)
else:
return ''
def is_alpha_and_numeric(string):
toret = ''
if string.isdigit():
toret = 'DIGIT'
elif string.isalpha():
if string.isupper():
toret = 'ALPHA_UPPER'
elif string.islower():
toret = 'ALPHA_LOWER'
else:
toret = 'ALPHA'
elif len(string) > 0:
toks = [string[0], string[-1]]
alphanum = 0
for tok in toks:
if tok.isdigit():
alphanum += 1
elif tok.isalpha():
alphanum -= 1
if alphanum == 0:
toret = 'ALPHA_NUM'
else:
toret = 'EMPTY'
return toret
class ColumnsSelector(TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.cols]
class DropColumns(TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.drop(columns=self.cols)
return X
class ToDense(TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
return X.todense()
class DefaultTextFeaturizer(TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
data = pd.DataFrame(data={'text': X})
data.text = data.text.apply(lambda x: cleanup_string(x))
data["pos_string"] = data.text.apply(lambda x: get_pos_string(x))
data['text_feature_text_length'] = data['text'].apply(lambda x: len(x))
data['text_feature_capitals'] = data['text'].apply(lambda comment: sum(1 for c in comment if c.isupper()))
data['text_feature_digits'] = data['text'].apply(lambda comment: sum(1 for c in comment if c.isdigit()))
data['text_feature_caps_vs_length'] = data.apply(
lambda row: row['text_feature_capitals'] / (row['text_feature_text_length'] + 0.001), axis=1)
data['text_feature_num_symbols'] = data['text'].apply(lambda comment: len(re.findall('\W', comment)))
data['text_feature_num_words'] = data['text'].apply(lambda comment: len(comment.split()))
data['text_feature_num_unique_words'] = data['text'].apply(lambda comment: len(set(w for w in comment.split())))
data['text_feature_words_vs_unique'] = data['text_feature_num_unique_words'] / (
data['text_feature_num_words'] + 0.001)
data['text_feature_first_token'] = data['text'].apply(lambda x: is_alpha_and_numeric(get_nth_token(x, 0)))
data['text_feature_second_token'] = data['text'].apply(lambda x: is_alpha_and_numeric(get_nth_token(x, 1)))
data['text_feature_third_token'] = data['text'].apply(lambda x: is_alpha_and_numeric(get_nth_token(x, 2)))
data['text_feature_title_word_count'] = data['text'].apply(lambda x: sum(1 for c in x.split() if c.istitle()))
data['text_feature_title_word_total_word_ratio'] = data['text_feature_title_word_count'] / (
data['text_feature_num_words'] + 0.001)
data['text_feature_numeric_tokens'] = data['text'].apply(lambda x: sum(1 for c in x.split() if c.isdigit()))
data['text_feature_capital_tokens'] = data['text'].apply(lambda x: sum(1 for c in x.split() if c.isupper()))
return data.drop(columns=['text'])
class MultiLabelEncoder(TransformerMixin):
def __init__(self, inplace=False):
self.inplace = inplace
def fit(self, X, y=None):
self.encoder = {}
self.cols = [c for c in X.columns if X[c].dtype.name == 'object']
for col in self.cols:
col_enc = {}
count = 1
unique = list(X[col].unique())
for u in unique:
col_enc[u] = count
count += 1
self.encoder[col] = col_enc
return self
def transform(self, X):
if self.inplace:
temp = X
else:
temp = X.copy()
for col in self.cols:
temp[col] = temp[col].apply(lambda x: self.encoder[col].get(x, 0))
return temp
import pprint
class BaseTextClassifier:
"""
A utility class for Text Classification
"""
def __init__(self,
all_data,
text_col='text',
label_col='class',
multilabel=False,
feature_transformer=None,
display_function=None,
data_directory=''):
"""
Initialize with a DataFrame(['text']) and/or DataFrame(['text', 'class'])
:param unlabelled: DataFrame(['text'])
:param labelled: DataFrame(['text', 'class'])
"""
if type(all_data).__name__ == 'list':
self.all_data = pd.DataFrame(data={
'text': all_data
})
else:
self.all_data = all_data
self.label_col = label_col
self.text_col = text_col
self.multilabel = multilabel
if self.label_col not in self.all_data:
self.all_data[self.label_col] = np.nan
# extra feature functions
if feature_transformer is 'default':
self.feature_transformer = DefaultTextFeaturizer()
elif feature_transformer is not None:
self.feature_transformer = feature_transformer
else:
self.feature_transformer = None
# self._refresh_text_feature_data()
self.model = None
self.display_function = display_function
self.data_directory = os.path.join(data_directory, 'Text_Classification_Data')
os.makedirs(self.data_directory, exist_ok=True)
# def _refresh_text_feature_data(self):
#
# feature_data = self.feature_transformer.fit_transform(self.all_data['text'])
# self.feature_columns = list(feature_data.columns)
# for col in feature_data.columns:
# self.all_data[col] = feature_data[col]
def default_display_function(self, example):
data = {
'text': example[self.text_col],
}
dump = json.dumps(data, indent=2)
return dump
def generate_view(self, example):
if self.display_function:
return self.display_function(example)
else:
return self.default_display_function(example)
def get_new_random_example(self):
"""
Returns a random example to be tagged. Used to bootstrap the model.
:return:
"""
unl = self.all_data[self.all_data[self.label_col].isna()].index
current_example_index = np.random.choice(unl)
current_example = self.all_data.loc[current_example_index]
toret = {
'example_index': int(current_example_index),
'view': self.generate_view(current_example)
}
if self.model:
preds = self.model.predict(self.all_data.loc[current_example_index: current_example_index+1])
if self.multilabel:
preds = list(self.multilabel_binarizer.inverse_transform(preds)[0])
toret['predictions'] = preds
else:
preds = [preds[0]]
toret['predictions'] = preds
return toret
def query_new_example(self, mode='entropy'):
"""
Returns a new example based on the chosen active learning strategy.
:param mode: Active Learning Strategy
- max (Default)
- mean
:return:
"""
unlab = self.all_data[self.all_data[self.label_col].isna()]
if len(unlab) > 1000:
unlab = unlab.sample(1000)
if mode == 'entropy':
unlabelled_idx = unlab.index
probs = self.model.predict_proba(unlab)
if self.multilabel:
ent = np.array([entropy(p.T) for p in probs])
mean_proba = ent.mean(axis=0)
proba_idx = np.argmax(mean_proba)
else:
ent = entropy(probs.T)
proba_idx = np.argmax(ent)
actual_idx = unlabelled_idx[proba_idx]
current_example_index = actual_idx
current_example = self.all_data.loc[current_example_index]
toret = {
'example_index': int(current_example_index),
'view': self.generate_view(current_example)
}
if self.model:
preds = self.model.predict(self.all_data.loc[current_example_index: current_example_index+1])
if self.multilabel:
preds = list(self.multilabel_binarizer.inverse_transform(preds)[0])
toret['predictions'] = preds
else:
preds = [preds[0]]
toret['predictions'] = preds
return toret
def update_model(self):
"""
Updates the model with the currently labelled dataset
:return:
"""
lab = self.all_data[self.all_data[self.label_col].notna()]
if len(lab) == 0:
return None
if self.model is None:
if self.feature_transformer is None:
self.model = Pipeline([
('vect', make_pipeline(ColumnsSelector(self.text_col), CountVectorizer(ngram_range=(1, 2)))),
('clf', RandomForestClassifier())
])
else:
self.model = Pipeline([
('fu', FeatureUnion([
('text_vectorizer',
make_pipeline(ColumnsSelector(self.text_col), CountVectorizer(ngram_range=(1, 2)), ToDense())),
('text_featurizer', make_pipeline(ColumnsSelector(self.feature_columns), MultiLabelEncoder()))
])),
('clf', RandomForestClassifier())
])
if self.multilabel:
self.multilabel_binarizer = MultiLabelBinarizer()
labels = self.multilabel_binarizer.fit_transform(lab[self.label_col].apply(lambda x: x.split('; ')))
else:
labels = lab[self.label_col]
self.model.fit(lab, labels)
def save_example(self, example_index, data):
"""
Saves the current example with the user tagged data
:param data: User tagged data. [list of tags]
:return:
"""
print(data)
self.all_data.loc[example_index, self.label_col] = '; '.join(data)
def save_data(self, filepath=None):
"""
Saves the labelled data to a file
:param filepath: file to save the data in a pickle format.
:return:
"""
if filepath is None:
filepath = os.path.join(self.data_directory, 'text_classification_data.csv')
self.all_data[[self.text_col, self.label_col]].dropna().to_csv(filepath, index=False)
def load_data(self, filepath=None):
"""
Loads labelled data from file.
:param filepath: file containing pickeled labelled dataset
:return:
"""
if filepath is None:
filepath = os.path.join(self.data_directory, 'text_classification_data.csv')
self.labelled = pd.read_csv(filepath)
self.all_data = | pd.concat([self.all_data, self.labelled]) | pandas.concat |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 13:37:10 2019
@author:Imarticus Machine Learning Team
"""
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
pd.options.mode.chained_assignment = None # default='warn'
order_products_test_df=pd.read_csv("order_products_test.csv")
order_products_train_df=pd.read_csv("order_products_train.csv")
order_products_train_df=order_products_train_df.loc[order_products_train_df['order_id']<=2110720]
order_products_prior_df = | pd.read_csv("order_products_prior.csv") | pandas.read_csv |
#! /usr/bin/env python
##! /usr/bin/arch -x86_64 /usr/bin/env python
from logging import error
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
import datetime as dt
from plotly.subplots import make_subplots
from dash.dependencies import Input, Output, State
from pprint import pprint
import waitress
import json
import re
import argparse
import os
import zlib
import math
import textwrap
from ordered_set import OrderedSet
import natsort
from zipfile import ZipFile
from bs4 import BeautifulSoup # you also need to install "lxml" for the XML parser
from tabulate import tabulate
from collections import OrderedDict
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
print("############################################")
print("############################################")
print("############################################")
print("############################################")
debug=False
def DebugMsg(msg1,msg2=None,printmsg=True):
if debug and printmsg:
print(dt.datetime.now().strftime("%c"),end=" " )
print(msg1,end=" " )
if msg2 is not None:
print(msg2)
print("")
def DebugMsg2(msg1,msg2=None,printmsg=True):
DebugMsg(msg1,msg2,printmsg)
def DebugMsg3(msg1,msg2=None,printmsg=True):
DebugMsg(msg1,msg2,printmsg)
def Info(msg1,msg2=None,printmsg=True):
DebugMsg(msg1,msg2,printmsg)
def get_xlsx_sheet_names(xlsx_file,return_As_dropdown_options=False):
with ZipFile(xlsx_file) as zipped_file:
summary = zipped_file.open(r'xl/workbook.xml').read()
soup = BeautifulSoup(summary, "html.parser")
sheets = [sheet.get("name") for sheet in soup.find_all("sheet")]
if return_As_dropdown_options:
doptions=[]
for sheet in sheets:
doptions.append({"label": sheet, "value": sheet})
return doptions
else:
return sheets
# assume you have a "long-form" data frame
# see https://plotly.com/python/px-arguments/ for more options
class Dashboard:
def __init__(self, datafile,isxlsx=False,sheetname=None,skiprows=0,replace_with_nan=None, DashboardMode=False):
self.RecentFilesListPath="./recent"
self.DashboardMode=DashboardMode
self.ComparisonFunctionalityPlaceholder()
df_index=self.default_df_index
# self.setDataFile(datafile,isxlsx,sheetname,skiprows,replace_with_nan,df_index)
self.createDashboard(df_index,self.DashboardMode)
self.app = dash.Dash(external_scripts=["./dashboard.css"])
# self.app = dash.Dash()
self.app.layout = html.Div(self.layout())
def reset_df_index(self,idx):
self.df[idx]=None
self.filtered_df[idx]=None
self.plot_df[idx]=None
self.DataFile[idx]=None
def ComparisonFunctionalityPlaceholder(self):
self.df_indexes=["1","2"]
self.current_df_index="1"
self.default_df_index="1"
self.df=dict()
self.filtered_df=dict()
self.plot_df=dict()
self.DataFile=dict()
for idx in self.df_indexes:
self.df[idx]=None
self.filtered_df[idx]=None
self.plot_df[idx]=None
self.DataFile[idx]=None
def createDashboard(self, df_index, DashboardMode=False):
self.init_constants()
# self.df=None
self.reset=False
self.newXAxisColName = "#"
self.DatatoDownload = None
self.ControlMode=not DashboardMode
self.GlobalParams={}
self.GlobalParams['available_legends']=OrderedSet()
self.GlobalParams['SecAxisTitles']=OrderedSet()
self.GlobalParams['PrimaryAxisTitles']=OrderedSet()
self.GlobalParams['LegendTitle']="Legend"
self.GlobalParams['Datatable_columns']=[]
self.GlobalParams['columns_updated']=False
self.GlobalParams['PreAggregatedData']=True
tmp=None
if self.DataFile[df_index] is not None :
tmp=self.loadMetadata(df_index,"LastGraph")
if tmp is not None:
self.GraphParams = tmp
self.update_aggregate()
else:
self.initialize_GraphParams()
self.update_aggregate()
self.groups = [[json.dumps(self.GraphParams)]]
self.DF_read_copy = dict()
self.readFileInitDash(df_index)
self.updateGraphList(df_index)
self.filtered_df[df_index] = self.df[df_index].copy()
self.plot_df[df_index]=self.filtered_df[df_index]
self.table_df=self.filtered_df[df_index]
self.initialize_figs()
#self.update_graph()
def setDataFile(self,datafile,isxlsx,sheetname,skiprows,replace_with_nan,df_index):
if datafile is not None:
datafile1=os.path.abspath(datafile)
self.DataFile[df_index] = {'Path': datafile1,
'isXlsx':isxlsx,
'Sheet': sheetname,
'SkipRows': skiprows,
'ReplaceWithNan' : replace_with_nan,
'LastModified' : 0 ,
'MetadataFile' : datafile + ".dashjsondata" ,
}
self.update_df(self.DataFile[df_index],df_index)
self.updateRecentFiles(df_index)
else:
self.DataFile[df_index]=None
self.reset_df_index(df_index)
self.updateRecentFiles(df_index)
def initialize_GraphParams(self):
self.GraphParams["GraphId"] = ""
self.GraphParams["Name"] = ""
self.GraphParams["Xaxis"] = []
self.GraphParams["GraphType"] = "Scatter"
self.GraphParams["Primary_Yaxis"] = []
self.GraphParams["Primary_Legends"] = []
self.GraphParams["Aggregate_Func"] = []
self.GraphParams["Secondary_Legends"] = []
self.GraphParams["Aggregate"] = []
self.GraphParams["Scatter_Labels"] = []
self.GraphParams["SortBy"] = []
self.GraphParams["Filters"] = ""
self.GraphParams["FilterAgregatedData"] = ""
self.GraphParams["SortAgregatedData"] = ""
self.GraphParams["PreviousOperations"] = []
self.GraphParams["ShowPreAggregatedData"] = []
def loadMetadata(self,df_index,header=None):
jsondata=None
if self.DataFile[df_index] is not None and os.path.exists(self.DataFile[df_index]['MetadataFile']):
with open(self.DataFile[df_index]['MetadataFile']) as json_file:
jsondata=json.load(json_file)
if jsondata is not None and header is not None:
if header in jsondata:
jsondata=jsondata[header]
else:
jsondata=None
return jsondata
def updateMetadata(self,header,data,df_index):
jsondata=self.loadMetadata(df_index)
if jsondata is None:
jsondata=dict()
jsondata[header]=data
with open(self.DataFile[df_index]['MetadataFile'], "w") as outfile:
json.dump(jsondata,outfile)
def updateGraphList(self,df_index):
if self.DataFile[df_index] is not None:
self.SavedGraphList= self.getGraphList(df_index,'SavedGraphs')
self.HistoricalGraphList= self.getGraphList(df_index,'HistoricalGraphs')
else:
self.SavedGraphList= dict()
self.HistoricalGraphList= dict()
def getGraphList(self,df_index,type):
# type can be SavedGraphs/HistoricalGraphs
x=self.loadMetadata(df_index,type)
if x is None:
return dict()
else:
return x
def set_Graphid(self):
x=self.GraphParams.copy()
x['GraphId']=""
x['Name']=""
self.GraphParams['GraphId']=zlib.adler32(bytes(json.dumps(x),'UTF-8'))
return id
def update_dtypes(self,df1):
for col in self.dtypes:
if col in df1.columns:
if self.dtypes[col] == 'datetime':
df1[col]=pd.to_datetime(df1[col])
else:
df1[col]=df1[col].astype(self.dtypes[col])
return df1
def get_dypes(self,cols):
if cols is None:
dtypes=self.df[self.default_df_index].dtypes.to_frame('dtypes')['dtypes'].astype(str).to_dict()
else:
dtypes=self.df[self.default_df_index][cols].dtypes.to_frame('dtypes')['dtypes'].astype(str).to_dict()
return dtypes
def update_dtype(self,cols,dtype,custom_datetime_fmt):
update_done=False
for col in cols:
for idx in self.df_indexes:
if self.df[idx] is not None:
if dtype == 'datetime_custom_format':
self.df[idx][col]=pd.to_datetime(self.df[idx][col],format=custom_datetime_fmt,errors='coerce')
else:
self.df[idx][col]=self.df[idx][col].astype(self.AvailableDataTypes[dtype])
update_done=True
if update_done:
dtypes=self.df[self.default_df_index].dtypes.to_frame('dtypes').reset_index().set_index('index')['dtypes'].astype(str).to_dict()
self.updateMetadata("ColumnsDataTypes",dtypes,self.default_df_index)
def init_constants(self):
self.dtypes= {
'MasterJobId' : str ,
'jobid' : str ,
'jobindex' : float ,
'token' : str ,
'cluster' : str ,
'mem_bucketed' : float ,
'step' : str ,
'submit_time' : 'datetime' ,
'mem_before_bucket' : str ,
'lineno' : float ,
'mem_selection_reason' : str ,
'status' : str ,
'completion_time' : 'datetime' ,
'EosFlowVersion' : str ,
'PegasusVersion' : str ,
'Sandboxpath' : str ,
'RepeatabilityMode' : bool ,
'MasterStartTime' : 'datetime' ,
'LastRecordedTime' : 'datetime' ,
'status_bjobs' : str ,
'start_time' : 'datetime',
'CR_ProjectID' : str ,
'CR_TaskID' : str ,
'CR_JobId' : str ,
'CPU_Architecture' : str ,
'Grafana_Tag' : str ,
'Project_Task_Tag' : str ,
'CRRunningStartTime' : 'datetime',
'new_status' : str ,
'PATH' : str ,
'CORNERVT' : str ,
'PACKAGE' : str ,
'INSTANCE' : str ,
'ARC' : str ,
'VT' : str ,
'CORNER' : str ,
'EXTRACTION' : str ,
'SIM_CFG' : str ,
'TOKEN_esti' : str ,
'MEM_REQ_SIZE_esti' : float,
'MAX_MEM_esti' : float,
'PATH_esti' : str ,
'delta_SIM_vs_esti' : float,
'%age_SIM_vs_ESTI' : float,
'eosFlow' : str ,
'userid' : str ,
'AetherShutdown' : bool,
'DatabaseLocked' : bool,
'MasterStatus' : str ,
'MEM_REQ_TYPE' : str ,
'MEM_REQ_SIZE' : float,
'CPU_CNT' : float,
'CPU_TIME' : float,
'MEM_USAGE' : float,
'HOST_ID' : str ,
'SUBMIT_TIME' : 'datetime',
'START_TIME' : 'datetime',
'END_TIME' : 'datetime',
'RESUBMIT_COUNT' : float ,
'MAX_CPU' : float ,
'MAX_MEM' : float,
'EXIT_INFO' : float,
'STATUS' : str ,
'RunTime' : float,
'TurnAroundTime' : float,
'RunTimeBin(Hrs)' : str
}
self.GraphParams = dict()
self.operators = [
["ge ", ">="],
["le ", "<="],
["lt ", "<"],
["gt ", ">"],
["ne ", "!="],
["eq ", "="],
["contains "],
["not_contains "],
["isin "],
["notin "],
["datestartswith "],
]
self.GraphTypeMap = {
"Bar": go.Bar,
"BarH": go.Bar,
"BarStacked": go.Bar,
"BarStackedH": go.Bar,
"Line": go.Scattergl,
"Area": go.Scatter,
"Scatter": go.Scattergl,
"Pie": go.Pie,
"Histogram": go.Histogram,
}
self.GraphModeMap = {
"Bar": "",
"BarH": "",
"BarStacked": "",
"BarStackedH": "",
"Pie": "",
"Histogram": "",
"Line": "lines",
"Area": "lines",
"Scatter": "markers",
}
self.aggregateFuncs = [
'mean',
'sum',
'count' ,
'std' ,
'var',
'sem',
'first',
'last',
'min',
'max'
]
self.NumericaggregateFuncs = [
'mean',
'sum',
'std' ,
'var',
'sem',
]
self.GraphParamsOrder2 = [
"Xaxis",
"GraphType",
"Primary_Yaxis",
"Primary_Legends",
"Aggregate_Func"
]
self.AvailableDataTypes= {
'string':str,
'int' : int,
'float': float,
'datetime' : 'datetime64[ns]',
'datetime_custom_format' : 'datetime64[ns]',
'boolean': bool
}
self.separatorMap={
"<tab>": "\t",
"<space>" : " ",
",<comma>": ",",
";<semi-colon>": ";",
":<colon>": ":",
"#<hash>": "#",
}
self.GraphParamsOrder = self.GraphParamsOrder2 + [ "Secondary_Legends"]
def read_file_in_df(self, FileInfo):
dtypes=self.loadMetadata(self.default_df_index,'ColumnsDataTypes')
mtime = os.path.getmtime(FileInfo['Path'])
if mtime > FileInfo['LastModified']:
Info("Reading file " + str(FileInfo['Path']) + " skiprows=" + str(FileInfo['SkipRows']) )
FileInfo['LastModified'] = mtime
if FileInfo['isXlsx']:
if FileInfo['Sheet']==None:
raise ValueError("SheetName is not defined")
df=pd.read_excel(FileInfo['Path'],sheet_name=FileInfo['Sheet'],skiprows=FileInfo['SkipRows'],dtype=dtypes)
df.columns = df.columns.astype(str)
DebugMsg3("DF head=", df.head())
else:
DebugMsg3("Reading File123")
sep= FileInfo['Sheet']
if FileInfo['Sheet']==None:
raise ValueError("Separator is not defined")
df=pd.read_csv(FileInfo['Path'], sep=self.separatorMap[sep],skiprows=FileInfo['SkipRows'],dtype=dtypes)
df.columns = df.columns.astype(str)
replace_dict=dict()
if FileInfo['ReplaceWithNan'] is not None:
for nan_value in FileInfo['ReplaceWithNan'].split(","):
replace_dict[nan_value]=np.nan
df = df.replace(replace_dict)
df = df.convert_dtypes(convert_integer=False,convert_floating=False,convert_string=False)
df = df.replace({pd.NA: np.nan})
self.DF_read_copy[FileInfo['Path']] = self.update_dtypes(df)
else:
Info("File not changed")
return self.DF_read_copy[FileInfo['Path']].copy()
def getDataFileName(self,datafile):
name= (datafile['Path'] + "#"
+ str(datafile['isXlsx']) + "#"
+ str(datafile['Sheet']) + "#"
+ str(datafile['SkipRows']) + "#"
+ str(datafile['ReplaceWithNan']) + "#"
)
return name
def update_df(self,Datafile,df_index):
self.df[df_index] = self.read_file_in_df(Datafile)
self.filtered_df[df_index] = self.df[df_index].copy()
self.plot_df[df_index]=self.filtered_df[df_index]
self.table_df=self.filtered_df[df_index]
def loadLastLoadedFiles(self):
filelist=dict()
if os.path.exists(self.RecentFilesListPath):
with open(self.RecentFilesListPath) as json_file:
filelist=json.load(json_file)
if "LastLoadedFile" in filelist:
for df_index in filelist["LastLoadedFile"]:
name=filelist["LastLoadedFile"][df_index]
self.DataFile[df_index]=filelist["recent"][name]
self.update_df(self.DataFile[df_index],df_index)
def updateRecentFiles(self,df_index):
filelist=dict()
if os.path.exists(self.RecentFilesListPath):
with open(self.RecentFilesListPath) as json_file:
filelist=json.load(json_file)
if "recent" not in filelist:
filelist["recent"]=dict()
if "LastLoadedFile" not in filelist:
filelist["LastLoadedFile"]=dict()
if self.DataFile[df_index] is not None:
name= self.getDataFileName(self.DataFile[df_index])
filelist["LastLoadedFile"][df_index]=name
filelist["recent"][name]=self.DataFile[df_index].copy()
filelist["recent"][name]['LastModified'] = 0
else:
del(filelist["LastLoadedFile"][df_index])
with open(self.RecentFilesListPath, "w") as outfile:
json.dump(filelist,outfile)
def readFileInitDash(self,df_index):
if self.df[df_index] is None:
if self.DataFile[df_index] is not None:
self.df[df_index] = self.read_file_in_df(self.DataFile[df_index])
else:
self.df[df_index]=pd.DataFrame()
self.figs = dict()
def get_groupid(self, group):
return "TopLevelID"
# return "-".join(group)
def hasDuplicates(self,df):
s=set()
i=0
for x in df.index:
i+=1
s.add(str(list(df.loc[x])))
if len(s) < i:
return True
return False
def extract_data(self, df , keep_cols=[]):
if len(self.GraphParams["Xaxis"]) ==0 or ( '#index' in self.GraphParams["Xaxis"]):
df['#index']=df.index.copy()
self.GraphParams["Xaxis"]=['#index']
DebugMsg("Test1",self.GraphParams['Xaxis'])
DebugMsg("Test1",self.GraphParams['Primary_Legends'])
filters_tmp_p = list(OrderedDict.fromkeys(self.GraphParams["Xaxis"] + self.GraphParams["Primary_Legends"]))
filters_tmp_p2=list(OrderedDict.fromkeys(filters_tmp_p + keep_cols))
DebugMsg("Test1 df columns",df.columns)
DebugMsg("Test1 filters_tmp_p2",filters_tmp_p2)
DebugMsg("Test1 filters_tmp_p",filters_tmp_p)
DebugMsg("Test1 keep_cols",keep_cols)
DebugMsg("Test1 Primary_Yaxis",self.GraphParams["Primary_Yaxis"])
DebugMsg("Test1 Scatter_Labels",self.GraphParams["Scatter_Labels"])
DebugMsg("Test1 Aggrega",self.GraphParams["Aggregate_Func"])
df1 = None
if len(self.GraphParams["Primary_Yaxis"]) > 0:
df_p = None
reqd_cols= list(OrderedDict.fromkeys(filters_tmp_p2 + self.GraphParams["Primary_Yaxis"] + self.GraphParams["Scatter_Labels"])) ## make list unique preserving order
if self.aggregate:
# for col in self.GraphParams["Primary_Legends"]:
# df[col] = df[col].astype(str).replace("nan", "#blank")
for col in (keep_cols + self.GraphParams["Scatter_Labels"] + self.GraphParams["Primary_Yaxis"]):
if col not in filters_tmp_p:
if self.GraphParams['Aggregate_Func'] in self.NumericaggregateFuncs:
df[col]=pd.to_numeric(df[col],errors='coerce')
df_p = (
df[ reqd_cols].groupby(filters_tmp_p)
.agg(self.GraphParams['Aggregate_Func'])
)
df_p=df_p.reset_index()
df_p=df_p[reqd_cols]
else:
if self.GraphParams['GraphType'] != 'Scatter' and self.hasDuplicates(df[filters_tmp_p]):
raise ValueError("Data contains duplicate values, Please use Aggregated Functions or plot a scatter chart")
df_p = df[reqd_cols]
#pass
df1 = df_p
DebugMsg("Test1 Aggrega",self.GraphParams["Aggregate_Func"])
# fig = make_subplots()
if df1 is not None:
if len(self.GraphParams["Xaxis"]) > 1:
self.newXAxisColName = "#" + "-".join(self.GraphParams["Xaxis"])
df1[self.newXAxisColName] = ""
df1 = df1.sort_values(by=self.GraphParams["Xaxis"])
for col in self.GraphParams["Xaxis"]:
df1[self.newXAxisColName] = (
df1[self.newXAxisColName] + df1[col].astype(str) + ","
)
elif len(self.GraphParams["Xaxis"])==1:
self.newXAxisColName = self.GraphParams["Xaxis"][0]
else :
self.newXAxisColName = "#index"
df1[self.newXAxisColName]=df1.index.copy()
return df1
def split_filter_part(self,filter_part):
for operator_type in self.operators:
for operator in operator_type:
if operator in filter_part:
ret_operator=operator_type[0].strip()
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find("{") + 1 : name_part.rfind("}")]
value_part = value_part.strip()
v0 = value_part[0]
str_value=False
if v0 == value_part[-1] and v0 in ("'", '"', "`"):
value = value_part[1:-1].replace("\\" + v0, v0)
str_value=True
if ret_operator == 'contains' or ret_operator == 'not_contains':
value = str(value_part)
elif ret_operator == 'isin' or ret_operator == 'notin':
value = value_part.split(",")
elif not str_value:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, ret_operator, value
return [None] * 3
def create_eval_func(self,df,filter_expr):
retval=filter_expr
DebugMsg("Filter Expr init: " , retval)
matches= re.findall("(\{)(\S*?)(}\s+contains\s+)(\"!\s+)(\S*)(\")",retval)
for groups in matches:
if is_string_dtype(df[groups[1]]):
retval=retval.replace("".join(groups),"~df['" + groups[1] + "'].str.contains(\"" + groups[4] + "\")")
elif | is_numeric_dtype(df[groups[1]]) | pandas.api.types.is_numeric_dtype |
from pymongo import MongoClient
import pandas as pd
from collections import Counter
# NLP libraries
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
import string
import csv
import json
# from datetime import datetime
import datetime
from collections import deque
import pymongo
"""TIME SERIES DESCRIPTIVE ANALYSIS SECTION"""
"""TIME SERIES DESCRIPTIVE ANALYSIS - RANSOMWARE HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesRansomware():
print("Finding tweets with #ransomware hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"ransomware",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex':'ransomware','$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput2.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = pd.DatetimeIndex(datesQuery)
# print('idx:')
# print(idx)
# the actual series (at series of 1s for the moment)
timeSeries01 = pd.Series(ones, index=idx)
print(timeSeries01.head())
print("Counting tweets per day - executing descriptive analysis - Re-sampling / Bucketing..")
# Resampling / bucketing
per_day = timeSeries01.resample('1D').sum().fillna(0)
print('Time Series created:')
print(per_day.head())
print('Creating data frame..')
s = pd.DataFrame(per_day)
print('Data frame:')
print(s.head())
print('Writing CSV file for time series analysis of tweets with Ransomware hashtags')
s.to_csv('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesRansomware.csv')
print('Writing Ransomware Time Series Descriptive Analysis CSV file completed!')
# function for converting CSV to JSON
def csvToJsonRansomware():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonTimeSeries = []
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesRansomware.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
for row in readCSV:
row[0] = row[0] + ' 14:00:00.000'
datetimeObject = datetime.datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')
millisec = datetimeObject.timestamp() * 1000
row[0] = millisec
row[1] = int(float(row[1]))
# print(row)
jsonTimeSeries.append(row)
# removing the head (first object) with not useful data - Data cleaning
del jsonTimeSeries[0]
# print('New file --> Time Series:')
# print(jsonTimeSeries)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesRansomware.json', 'w') as file:
json.dump(jsonTimeSeries, file, indent=4)
print('Writing Time Series Ransomware JSON file completed!')
print()
print('Next:')
"""TIME SERIES DESCRIPTIVE ANALYSIS - MALWARE HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesMalware():
print("Finding tweets with #malware hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"malware",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex': 'malware', '$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput2.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = pd.DatetimeIndex(datesQuery)
# print('idx:')
# print(idx)
# the actual series (at series of 1s for the moment)
timeSeries01 = | pd.Series(ones, index=idx) | pandas.Series |
#!/usr/bin/python
# <EMAIL>
#====================SET============================#
C_END = "\033[0m"
C_BOLD = "\033[1m"
C_RED = "\033[31m"
#==================================================#
import inspect
import sys
import os
import glob
import pandas as pd
import numpy as np
def DebugPrinter(arg):
callerframerecord = inspect.stack()[1]
frame = callerframerecord[0]
info=inspect.getframeinfo(frame)
# __FILE__ -> info.filename
# __FUCNTION__ -> info.function
# __LINE__ -> info.lineno
print(C_BOLD+C_RED+'{}'.format(arg)+C_END)
print(C_BOLD+C_RED+'{} {} {}'.format(info.filename, info.function, info.lineno)+C_END)
class makeCsvUsingParsedFile:
def __init__(self,path,log_list,streamline,tag):
self.path=path
self.tag=tag
self.log_list=log_list
self.app_data=[] # clmatmulHybridclGPU_16_1024_1024.log
self.gpuUtil=None # gpuUtil.log
self.cpuUtil=None # cpuUtilization.log
self.ffmpeg=None # ffmpeg.log
self.ffmpeg_fps=None # ffmpeg fps data
self.framework=None # framework_result.log : Framework에서 decomposer()가 받은 프로파일용 정보
self.kernel=None # matmul_kernel_result.log : OpenCL APP에서 NDRangeKernel() 앞 뒤 start, end 정보 등 App 프로파일용 정보
self.streamline=None # streamline_[App Title].log
self.streamline_mode=streamline
self.toDataFrame(log_list)
self.exp_start=0.0
self.exp_end=0.0
self.getExpTime()
self.save_csv={}
# /* for debug */
#print(self.framework)
#print(self.kernel)
#print(self.streamline)
#print(self.app_data)
if(len(self.app_data)!=0):
print(self.app_data)
self.app_data=self.sort_merge(self.app_data,'start')
self.stat_merge() # Modify Log files
self.get_statistics() # Get Statistics
def toDataFrame(self,log_list):
for log in log_list:
fname=os.path.basename(log)
if(fname.find('gpuUtil.log')!=-1):
self.gpuUtil=pd.read_csv(log)
elif(fname.find('ffmpeg.log')!=-1):
self.ffmpeg=pd.read_csv(log)
elif(fname.find('cpuUtil')!=-1):
self.cpuUtil=pd.read_csv(log)
elif(fname.find('ffmpeg-')!=-1):
self.ffmpeg_fps=pd.read_csv(log)
elif(fname.find('framework_result')!=-1): # framework_result.log : Framework에서 decomposer가 받은 프로파일용 로그 정보
self.framework=pd.read_csv(log)
elif(fname.find('kernel_result')!=-1): # matmul_kernel_result.log : kernel을 프로파일 한 로그 정보
self.kernel=pd.read_csv(log)
elif(fname.find('streamline')!=-1): # Processed log file by ARM-STREAMLINE
self.streamline=pd.read_csv(log)
else:
# Other Apps : clmatmulHybridclGPU_16_16_1024_1024_1024_256_256.log
#print('\n'+C_BOLD+C_RED+fname+C_END)
self.app_data.append(pd.read_csv(log))
def getExpTime(self):
gpu_start=0.0
gpu_end=0.0
cpu_start=0.0
cpu_end=0.0
if(self.gpuUtil is not None):
self.gpuUtil=self.gpuUtil.set_index('timestamp')
self.gpuUtil=self.gpuUtil.sort_index()
gpu_start=self.gpuUtil.index[0]
gpu_end=self.gpuUtil.index[-1]
if(self.cpuUtil is not None):
self.cpuUtil=self.cpuUtil.set_index('timestamp')
self.cpuUtil=self.cpuUtil.sort_index()
cpu_start=self.cpuUtil.index[0]
cpu_end=self.cpuUtil.index[-1]
if(gpu_start<=cpu_start):
self.exp_start=cpu_start
else:
self.exp_start=gpu_start
if(cpu_end<=gpu_end):
self.exp_end=cpu_end
else:
self.exp_end=gpu_end
if(self.ffmpeg_fps is not None):
self.ffmpeg_fps=self.ffmpeg_fps.set_index('timestamp')
self.ffmpeg_fps=self.ffmpeg_fps.sort_index()
def sort_merge(self,data,standard):
for i,temp in enumerate(data):
if(i==0):
continue
data[0]=pd.concat([data[0],data[i]],join='outer',sort=True)
return data[0].sort_values(by=standard)
def stat_merge(self):
#=========각 log 파일 가공=========================#
# 1. app data 가공 ex) clmatmulHybridclGPU_16_16_1024_1024_1024_256_256.log
if(len(self.app_data)!=0):
self.app_data['idx']=np.arange(len(self.app_data))
self.app_data=self.app_data.set_index('idx')
cmd=self.app_data.loc[0,'cmd']
if(cmd.lower().find('convolution')!=-1):
splited_cmd=cmd.split(' ') # convolution_simple_LG 7
if(len(splited_cmd)!=-1):
self.app_data['filter']=splited_cmd[1]
# 2. kernel_result 가공 ex) matmul_kernel_result.log
if(self.kernel is not None):
# time 구하기 #
self.kernel['time']=self.kernel['end']-self.kernel['start']
# KPS 구하기 #
kps_list=[]
for i in range(0,len(self.kernel)):
try:
kps_list.append(1/(self.kernel.iloc[i]['end']-self.kernel.iloc[i]['start']))
except ZeroDivisionError:
kps_list.append(0)
self.kernel['kps']=kps_list
self.kernel['idx']=np.arange(len(self.kernel))
self.kernel=self.kernel.set_index('idx')
# 3. streamline_XX.log ex)streamline_matmulGEMM1_1024_100.log
if(self.streamline is not None):
self.streamline['idx']=np.arange(len(self.streamline))
self.streamline=self.streamline.set_index('idx')
for i in range(0,len(self.streamline)): # Convert time format from %M:%S.%mS to $S.$mS
if(str(self.streamline.loc[i,'Time']).find(':')!=-1):
now_time=str(self.streamline.loc[i,'Time']).split(':')
result_time=0.0
depth=len(now_time)-1
for j in range(0,len(now_time)-1):
result_time+=float(now_time[j])*60.0*depth
depth-=1
result_time+=float(now_time[-1])
result_time=round(result_time,4)
self.streamline.loc[i,'Time']=result_time
def get_statistics(self):
#=============================모든 정보를 statistics에 합친다=====================================#
# csv에 가공해서 넣을 값은 여기서 edit #
# 1. self.kernel에서 get AVG KPS --> 하나의 statistics csv로
if(len(self.app_data)!=0):
for i in range(0,len(self.app_data)):
avg_kps=self.kernel[(self.kernel['start']>=self.app_data.iloc[i]['start'])&(self.kernel['end']<=self.app_data.iloc[i]['end'])]['kps'].mean()
self.app_data.loc[i,'avg_kps']=avg_kps
if(str(self.app_data.loc[i,'avg_kps'])=='nan'):
self.app_data.loc[i,'avg_kps']=0
# 2. self.kernel에서 get AVG time --> 하나의 statistics csv로
if(self.kernel is not None):
kps_sum=self.kernel['kps'].sum()
self.save_csv['Avg.kps']=round(kps_sum/len(self.kernel),4)
self.save_csv['Avg.time[sec]']=round(self.kernel['time'].mean(),4)
# 3. self.framework에서 get Subframework size --> statistics csv로
if(self.framework is not None):
self.save_csv['work_dim']=self.framework.loc[0,'work_dim']
self.save_csv['local_x']=self.framework.loc[0,'local_x']
self.save_csv['local_y']=self.framework.loc[0,'local_y']
self.save_csv['local_z']=self.framework.loc[0,'local_z']
self.save_csv['sub_x']=self.framework.loc[0,'sub_x']
self.save_csv['sub_y']=self.framework.loc[0,'sub_y']
self.save_csv['sub_z']=self.framework.loc[0,'sub_z']
# 4. self.gpuUtil에서 get Avg_Gpu_Util --> statistics csv로
if(self.gpuUtil is not None):
# gpu util은 커널이 돌아 갈때만 체크 하자.
kernel_start=self.kernel.iloc[0]['start']
kernel_end=self.kernel.iloc[-1]['end']
gpu_util=self.gpuUtil[(self.gpuUtil.index>=kernel_start)&(self.gpuUtil.index<=kernel_end)]['util'].mean()
if(str(gpu_util)=='nan'):
self.save_csv['Avg_Gpu_Util']=0.0
else:
self.save_csv['Avg_Gpu_Util']=round(gpu_util,2)
gpu_util_max=self.gpuUtil['util'].max()
self.save_csv['Peak_Gpu_Util']=gpu_util_max
# 5. self.streamline 에서 구하고 싶은 값 가공 후 -> statistics csv 로
if(self.streamline is not None):
Read_Miss_Ratio=[]
Read_Hit_Ratio=[]
Write_Miss_Ratio=[]
Write_Hit_Ratio=[]
for i in range(0,len(self.streamline)):
if(self.streamline_mode=='ge'):
if(int(self.streamline.loc[i,'Mali L2 Cache Lookups:Read lookup'])<100):
Read_Miss_Ratio.append(np.nan)
Read_Hit_Ratio.append(np.nan)
else:
try:
now_Read_Miss_Ratio=int(self.streamline.loc[i,'Mali External Bus Accesses:Read transaction'])/int(self.streamline.loc[i,'Mali L2 Cache Lookups:Read lookup'])
except ZeroDivisionError:
now_Read_Miss_Ratio=0.0
Read_Miss_Ratio.append(round(now_Read_Miss_Ratio*100.0,2))
Read_Hit_Ratio.append(round(100-Read_Miss_Ratio[-1],2))
if(int(self.streamline.loc[i,'Mali L2 Cache Lookups:Write lookup'])<100):
Write_Miss_Ratio.append(np.nan)
Write_Hit_Ratio.append(np.nan)
else:
try:
now_Write_Miss_Ratio=int(self.streamline.loc[i,'Mali External Bus Accesses:Write transaction'])/int(self.streamline.loc[i,'Mali L2 Cache Lookups:Write lookup'])
except ZeroDivisionError:
now_Write_Miss_Ratio=0.0
Write_Miss_Ratio.append(round(now_Write_Miss_Ratio*100.0,2))
Write_Hit_Ratio.append(round(100-Write_Miss_Ratio[-1],2))
elif(self.streamline_mode=='ce'):
if(int(self.streamline.loc[i,'Mali L2 Cache Reads:L2 read lookups'])<100):
Read_Miss_Ratio.append(np.nan)
Read_Hit_Ratio.append(np.nan)
else:
now_Read_Hit_Ratio=int(self.streamline.loc[i,'Mali L2 Cache Reads:L2 read hits'])/int(self.streamline.loc[i,'Mali L2 Cache Reads:L2 read lookups'])
Read_Hit_Ratio.append(round(now_Read_Hit_Ratio*100.0,2))
Read_Miss_Ratio.append(round(100-Read_Hit_Ratio[-1],2))
if(int(self.streamline.loc[i,'Mali L2 Cache Writes:L2 write lookups'])<100):
Write_Miss_Ratio.append(np.nan)
Write_Hit_Ratio.append(np.nan)
else:
now_Write_Hit_Ratio=int(self.streamline.loc[i,'Mali L2 Cache Writes:L2 write hits'])/int(self.streamline.loc[i,'Mali L2 Cache Writes:L2 write lookups'])
Write_Hit_Ratio.append(round(now_Write_Hit_Ratio*100.0,2))
Write_Miss_Ratio.append(round(100-Write_Hit_Ratio[-1],2))
self.streamline['ReadMissRatio(%)']=Read_Miss_Ratio
self.streamline['WriteMissRatio(%)']=Write_Miss_Ratio
self.streamline['ReadHitRatio(%)']=Read_Hit_Ratio
self.streamline['WriteHitRatio(%)']=Write_Hit_Ratio
AvgReadHitRatio=0.0
AvgReadHitRatioCnt=0
AvgWriteHitRatio=0.0
AvgWriteHitRatioCnt=0
for i in range(0,len(self.streamline)):
if(str(self.streamline.loc[i,'ReadHitRatio(%)'])!='nan'):
AvgReadHitRatio+=self.streamline.loc[i,'ReadHitRatio(%)']
AvgReadHitRatioCnt+=1
if(str(self.streamline.loc[i,'WriteHitRatio(%)'])!='nan'):
AvgWriteHitRatio+=self.streamline.loc[i,'WriteHitRatio(%)']
AvgWriteHitRatioCnt+=1
try:
self.save_csv['Avg.ReadHitRatio(%)']=round(AvgReadHitRatio/AvgReadHitRatioCnt,2)
except ZeroDivisionError:
self.save_csv['Avg.ReadHitRatio(%)']=0.0
try:
self.save_csv['Avg.WriteHitRatio(%)']=round(AvgWriteHitRatio/AvgWriteHitRatioCnt,2)
except ZeroDivisionError:
self.save_csv['Avg.WriteHitRatio(%)']=0.0
self.save_csv['Avg.GPU_UTIL']=self.streamline[self.streamline['GPU Vertex-Tiling-Compute:Activity']>1.0]['GPU Vertex-Tiling-Compute:Activity'].mean()
#=================================#
#=========Common Colums============#
self.save_csv['App']=self.tag # 따로 뭐 저장할 공통 Columns 정보가 있다면 여기다 적기
#==================================#
idx=self.path.rfind('logs')
csv_path=self.path[0:idx]+'statistics'
if(os.path.exists(csv_path)==False):
os.system('mkdir -p '+csv_path)
self.save_csv=pd.DataFrame(self.save_csv,index=[0])
print(self.save_csv)
self.save_csv.to_csv(csv_path+'/'+os.path.basename(self.path)+'.csv',sep=',',na_rep=np.nan)
self.save_csv.to_csv(self.path+'/report.csv',sep=',',na_rep=np.nan)
def makeCSV(instance,tag):
makeCsvUsingParsedFile(instance.processPath,instance.doneList,instance.streamline,tag)
def combineCsv(csvObject):
if(os.path.exists(csvObject.path)==False):
DebugPrinter('[ERROR] path {} isn\'t exists')
exit(1)
if(os.path.exists(csvObject.resultCsv)):
os.system('rm '+csvObject.resultCsv)
csvList=glob.glob(csvObject.path+'/*.csv')
if(len(csvList)==0):
DebugPrinter('[Error] There aren\'t any csv files')
exit(1)
for i,nowCsv in enumerate(csvList):
if(i==0):
os.system('cp {} {}'.format(nowCsv,csvObject.resultCsv))
continue
makeResult(nowCsv,csvObject.resultCsv)
if(i==len(csvList)-1):
print(C_BOLD+C_RED+'[INFO] Making result.pkl Done!'+C_END)
def makeResult(now_csv,result):
now_csv=pd.read_csv(now_csv)
try:
now_csv=now_csv.drop(['Unnamed: 0'],axis='columns')
except:
pass
try:
now_csv=now_csv.drop(['idx'],axis='columns')
except:
pass
result_csv= | pd.read_csv(result) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 04:11:27 2017
@author: konodera
nohup python -u 501_concat.py &
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
import multiprocessing as mp
import gc
import utils
utils.start(__file__)
#==============================================================================
# def
#==============================================================================
def user_feature(df, name):
if 'train' in name:
name_ = 'trainT-0'
elif name == 'test':
name_ = 'test'
df = pd.merge(df, pd.read_pickle('../feature/{}/f101_order.p'.format(name_)),# same
on='order_id', how='left')
# timezone
df = pd.merge(df, pd.read_pickle('../input/mk/timezone.p'),
on='order_hour_of_day', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f102_user.p'.format(name)),
on='user_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f103_user.p'.format(name)),
on='user_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f104_user.p'.format(name)),
on='user_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f105_order.p'.format(name_)),# same
on='order_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f110_order.p'.format(name_)),# same
on='order_id', how='left')
gc.collect()
return df
def item_feature(df, name):
# aisle = pd.read_pickle('../input/mk/goods.p')[['product_id', 'aisle_id']]
# aisle = pd.get_dummies(aisle.rename(columns={'aisle_id':'item_aisle'}), columns=['item_aisle'])
# df = pd.merge(df, aisle, on='product_id', how='left')
organic = pd.read_pickle('../input/mk/products_feature.p')
df = | pd.merge(df, organic, on='product_id', how='left') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# # OpenMC Program for BurnUp analysis and Benchmarking
#
# In[1]:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 11:40:23 2020
@author: feryantama
"""
# ## 1. Initialization
#
# Initialize package we used in this program
# In[2]:
import numpy as np
import pandas as pd
from random import seed, random
import math
import os
import openmc.deplete
import openmc
import argparse
'''
from scipy.interpolate import interp2d
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import Colorbar
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from IPython.display import Image
import matplotlib.pyplot as plt
from vtk.util import numpy_support
import vtk
import matplotlib.animation as animation
'''
# This block will create new directory for a burn steps and determine which DEM timesteps will be used for the simulation. All input stored as argparse to enable running from shell script.
# In[3]:
header=os.getcwd()
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
parser=argparse.ArgumentParser(description=__doc__,
formatter_class=CustomFormatter)
##REQUIRED
parser.add_argument('stopstep', type=int,
help='steps of burn = ')
parser.add_argument('deltas', type=int,
help='how many steps of DEM for every days = ',default=200000)
parser.add_argument('initial', type=int,
help='initial steps of DEM')
parser.add_argument('batch', type=int,
help='Simulation batch including the skipped batch')
parser.add_argument('particle', type=int,
help='Particle simulated per batch')
parser.add_argument('cross_section',type=str,
help='Cross Section Data cross_sections.xml location')
parser.add_argument('--timeline', nargs="+", type=float,required=True,
help='list of {t(step-1),t(step),t(step+1)} separated by space')
##MODE
parser.add_argument('--control', type=bool,
help="Is it using control rods ?",default=False)
parser.add_argument('--burnup',type=bool,
help="run burn up mode ?",default=False)
parser.add_argument('--tally',type=bool,
help="run tally mode ?",default=False)
parser.add_argument('--post',default='Default',const='Default',
nargs='?',choices=['Post_Only','Run_Only','Default'],
help="post processing or other choices: Post_Only, Run_Only, Default")
parser.add_argument('--criticality', type=bool,
help="run criticality only", default=False)
##AUXILIARY (POWER & CONTROL PERC.)
parser.add_argument('--skipped', type=int,
help='Skipped Cycles',default=10)
parser.add_argument('--Burn_power', type=float,
help='Power in burn code in MW',default=10)
parser.add_argument('--controlstep', type=float,
help="control rods position step 0-9 ",default=0)
#args = parser.parse_args()
args=parser.parse_args(args=(['17','10000','8600000','210','5000',
'/media/feryantama/Heeiya/openmc/TEST/data/lib80x_hdf5/cross_sections.xml',
'--timeline']+
[str(x) for x in range(0,35,7)]+
[str(x) for x in range(38,78,10)]+
[str(x) for x in range(98,378,30)]+
['--burnup','True',
'--tally','True',
'--post','Post_Only']))
stp=args.stopstep
deltas=args.deltas
initial=args.initial
if args.burnup==True and args.tally==True:
if not os.path.isdir('step_'+str(stp)+'-DEPLETE'):
os.makedirs('step_'+str(stp)+'-DEPLETE',mode=0o777)
os.chdir(header+'/step_'+str(stp)+'-DEPLETE')
if args.tally==True and args.control==False and args.criticality==False and args.burnup==False:
if not os.path.isdir('step_'+str(stp)+'-TALLY'):
os.makedirs('step_'+str(stp)+'-TALLY',mode=0o777)
os.chdir(header+'/step_'+str(stp)+'-TALLY')
if args.tally==True and args.control==True:
if not os.path.isdir('step_'+str(int(args.controlstep))+'-CONTROL'):
os.makedirs('step_'+str(int(args.controlstep))+'-CONTROL',mode=0o777)
os.chdir(header+'/step_'+str(int(args.controlstep))+'-CONTROL')
if args.tally==True and args.criticality==True:
#if not os.path.isdir(header+'step_'+str(initial+(deltas*stp))+'-CRITICALITY'):
# os.makedirs('step_'+str(initial+(deltas*stp))+'-CRITICALITY',mode=0o777)
os.chdir(header+'/step_'+str(initial+(deltas*stp))+'-CRITICALITY')
#os.environ['OPENMC_CROSS_SECTIONS']='/home/feryantama/Desktop/HTR2020/data/lib80x_hdf5/cross_sections.xml'
os.environ['OPENMC_CROSS_SECTIONS']=args.cross_section
chain = openmc.deplete.Chain.from_xml(header+"/chain_casl_pwr.xml")
if args.post=='Post_Only' or args.post=='Default':
from scipy.interpolate import interp2d
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import Colorbar
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from IPython.display import Image
import matplotlib.pyplot as plt
from vtk.util import numpy_support
import vtk
import matplotlib.animation as animation
# ## 2. Function or Definition
# ### - Polar Angle Calculation
# This function used to determine $\theta$ (polar angle) of a cartesian coordinate relative to the centerline (0,0).
# In[4]:
def polar_angle(x,y):
theta=math.atan(y/x)
if x>=0 and y>=0:
theta=theta
if x<0 and y>=0:
theta=theta+math.pi
if x<0 and y<0:
theta=theta+math.pi
if x>=0 and y<0:
theta=theta+math.pi*2
return theta
# ### - Make Pebble Dataframe from .vtk
# This function will read .vtk dump from LIGGGHTS or LAMMPS stored in the DEM_post directory under the working directory. Since .vtk dump file used cartesian coordinate to determine the center of a pebble, this function will also convert the coordinate into cylindrical coordinate ($\theta, r, z$) utilizing (**def polar_angle**) mentioned above. The pebble center coordinate will be stored as dataframe.
# In[5]:
def make_pebbledf(filepath):
point_position=1e+20
endpoint_position=2e+20
id_position=1e+20
endid_position=2e+20
type_position=1e+20
endtype_position=2e+20
pointList=list()
idList=list()
typeList=list()
with open(filepath) as fp:
cnt=1
for line in fp:
for part in line.split():
if "POINTS" in part:
point_data=line.replace('POINTS ','').replace(' float\n','')
point_data=int(point_data)
point_position=cnt
endpoint_position=math.ceil(point_position+point_data/3)
elif "id" in part:
id_data=line.replace('id 1 ','').replace(' int\n','')
id_data=int(id_data)
id_position=cnt
endid_position=math.ceil(id_position+id_data/9)
elif "type" in part:
type_data=line.replace('type 1 ','').replace(' int\n','')
type_data=int(type_data)
type_position=cnt
endtype_position=math.ceil(type_position+type_data/9)
if (cnt>point_position and cnt<endpoint_position+1):
pointList.extend(line.replace(' \n','').split(' '))
elif (cnt>id_position and cnt<endid_position+1):
idList.extend(line.replace(' \n','').split(' '))
elif (cnt>type_position and cnt<endtype_position+1):
typeList.extend(line.replace(' \n','').split(' '))
cnt+=1
pointarr=np.zeros([point_data,3])
idarr=np.zeros([point_data])
typearr=np.zeros([point_data])
rho=np.zeros([point_data])
theta=np.zeros([point_data])
cnt=0
for i in range (0,point_data):
pointarr[i,0]=float(pointList[cnt*3+0])*100
pointarr[i,1]=float(pointList[cnt*3+1])*100
pointarr[i,2]=float(pointList[cnt*3+2])*100
rho[i]=math.sqrt((pointarr[i,0]**2)+(pointarr[i,1]**2))
theta[i]=(polar_angle(pointarr[i,0],pointarr[i,1]))
typearr[i]=int(typeList[i])
idarr[i]=int(idList[i])
cnt+=1
datasets=np.column_stack((pointarr,typearr,rho,theta))
Pebbledf=pd.DataFrame(data=datasets,index=idarr,columns=['x','y','z','type','rho','theta'])
return(Pebbledf)
# ### - Grouping the Pebble According to its center cylindrical coordinate
# To simplify the material tracking algorithm (*explained later*), the pebble should be spatially grouped. This definition allows the user to group the pebble based on its axial ($z$) and radial ($r$) position. there are **ax_size** axial segment and **rd_size** radial segment thats makes up **ax_size X rd_size** ammount of group. as the pebble increase after timestep 0, the newly added pebble will grouped only by its radial position into **rd_size** group.
#
# overall there are:
#
# **ax_size $\times$ rd_size + (stp $\times$ rd_size)** *(group of pebble)*
#
# where **stp** is burn up step.
# Every pebble which has been labeled or grouped in past burn up step will retain its group even the position has changed. Therefore, this function will record the pebble grouping for each pebble number to ASCII file.
# In[15]:
def segmentation(Pebbledf,ax_size,rd_size,header,stp):
print("========Start Segmentation========")
if stp==0:
count_list=np.zeros(len(Pebbledf))
highest=0; lowest=0
for i in range (0,len(Pebbledf)):
if Pebbledf.iloc[i][3]==1 and Pebbledf.iloc[i][2]>=highest:
highest=Pebbledf.iloc[i][2]
dlt=(highest-lowest)/5
axiallist=np.linspace(lowest+dlt,highest-dlt,ax_size-1)
axiallist=np.insert(axiallist,0,-600,axis=0)
axiallist=np.insert(axiallist,len(axiallist),600,axis=0)
radiallist=np.linspace(25,90,rd_size)
radiallist=np.insert(radiallist,0,0,axis=0)
count=1
for i in range (0,len(axiallist)-1):
for j in range (0,len(radiallist)-1):
for k in range (0,len(Pebbledf)):
if (Pebbledf.iloc[k][3]!=2 and
Pebbledf.iloc[k][2]>axiallist[i] and
Pebbledf.iloc[k][2]<=axiallist[i+1] and
Pebbledf.iloc[k][4]>radiallist[j] and
Pebbledf.iloc[k][4]<=radiallist[j+1]):
count_list[k]=count
count+=1
labdf=pd.DataFrame(data=count_list,columns=['label'],index=Pebbledf.index)
labdf.to_csv('Prop.Label',index=True,header=True, sep='\t')
print("========Finish Segmentation=======")
Pebbledf=pd.concat([Pebbledf,labdf],axis=1)
if stp!=0:
labdf=pd.read_csv(header+'/step_'+str(stp-1)+'-DEPLETE'+'/Prop.Label',sep='\t',index_col='Unnamed: 0' )
print("=========Skip Segmentation========")
Pebbledf=pd.concat([Pebbledf,labdf],axis=1)
Pebbledf=Pebbledf.replace(np.nan, 'c', regex=True)
count_list=list(Pebbledf.iloc[:,6])
for i in range (0,len(Pebbledf)):
if Pebbledf.iloc[i][6]=='c':
if Pebbledf.iloc[i][3]==2:
count_list[i]=0
if Pebbledf.iloc[i][3]==1:
radiallist=np.linspace(25,90,rd_size)
radiallist=np.insert(radiallist,0,0,axis=0)
for j in range(0,len(radiallist)-1):
if (Pebbledf.iloc[i][4]>radiallist[j] and
Pebbledf.iloc[i][4]<=radiallist[j+1]):
count_list[i]=max(labdf['label'])+j+1
labdf_new= | pd.DataFrame(data=count_list,columns=['label'],index=Pebbledf.index) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import click
import datetime
import locale
import numpy as np
import os
from pathlib import Path
import pandas as pd
import plotly.graph_objects as go
CSV_NAME_MAP = {
"pl_PL": {
"p2.5": "dolne 2.5% modelowań",
"p25": "dolne 25% modelowań",
"p75": "górne 25% modelowań",
"p97.5": "górne 2.5% modelowań",
"mean": "średnia z modelowań"
},
"en_GB": {
"p2.5": "2.5% percentile",
"p25": "25% percentile",
"p75": "75% percentile",
"p97.5": "97.5% percentile",
"mean": "point prediction"
},
"de_DE": {
"p2.5": "Untere 2,5% der Modellierung",
"p25": "Die unteren 25% der Modellierung",
"p75": "Top 25% der Modellierung",
"p97.5": "Top 2,5% der Modellierung",
"mean": "Durchschnitt der Modellierung"
}
}
MOVING_AVG_STR = {
"pl_PL": "7-dn. średnia zdiagnozowanych zakażeń",
"en_GB": "7 day moving average of detected cases",
"de_DE": "7 Tage gleitender Durchschnitt der erkannten Fälle"
}
MOVING_AVG_STR2 = {
"pl_PL": "7-dn. średnia<br>zdiagnozowanych zakażeń",
"en_GB": "7 day moving average<br>of detected cases",
"de_DE": "7 Tage gleitender Durchschnitt<br>der erkannten Fälle"
}
MOVING_AVG_D_STR = {
"pl_PL": "7-dn. średnia przypadków śmiertelnych",
"en_GB": "7 day moving average of death cases",
"de_DE": "7 Tage gleitender Durchschnitt der Todesfälle"
}
MOVING_AVG_D_STR2 = {
"pl_PL": "7-dn. średnia<br>przypadków śmiertelnych",
"en_GB": "7 day moving average<br>of death cases",
"de_DE": "7 Tage gleitender Durchschnitt<br>der Todesfälle"
}
NEW_CASES_STR = {
"pl_PL": "dzienne wykryte przypadki zachorowań",
"en_GB": "daily detected cases",
"de_DE": "täglich erkannte Fälle"
}
NEW_DEATHS_STR = {
"pl_PL": "dzienne przypadki śmiertelne w związku z COVID",
"en_GB": "daily deaths related to COVID",
"de_DE": "tägliche Todesfälle im Zusammenhang mit COVID"
}
COLUMNS = ["p2.5", "p25", "mean", "p75", "p97.5"]
LINE_COLORS = {
"p2.5": 'rgb(166,97,26)',
"p25": 'rgb(223,194,125)',
"p75": 'rgb(128,205,193)',
"p97.5": 'rgb(1,133,113)',
"mean": 'blue',
}
FILL_COLORS = {
"p2.5": None,
"p25": 'rgba(166,97,26,0.2)',
"mean": 'rgba(223,194,125,0.2)',
"p75": 'rgba(128,205,193,0.2)',
"p97.5": 'rgba(1,133,113,0.2)',
}
XAXIS_STR = {
"pl_PL": "Data",
"en_GB": "date",
"de_DE": "Datum"
}
PROGNOSIS_DIRS = {
"pl_PL": "_prognosis_pl",
"en_GB": "_prognosis_en",
"de_DE": "_prognosis_de"
}
PROGNOSIS_TEMPLATES_DIR = '_prognosis_templates'
YAXIS_STR = MOVING_AVG_STR
YAXIS_D_STR = MOVING_AVG_D_STR
LAYOUT_TEMPLATE = {
"xaxis": {
"title": 'Data',
},
"yaxis": {
"title": '7-dn. średnia nowych zakażeń',
"hoverformat": ".0f"
},
"legend": {
"orientation": "h",
"xanchor": "center",
"y": -.6,
"x": 0.5
},
"hovermode": "x",
"hoverlabel_namelength": -1,
}
def prepare_layout(language):
layout = LAYOUT_TEMPLATE
layout['xaxis']['title'] = XAXIS_STR[language]
layout['yaxis']['title'] = YAXIS_STR[language]
return layout
def prepare_layout_d(language):
layout = LAYOUT_TEMPLATE
layout['xaxis']['title'] = XAXIS_STR[language]
layout['yaxis']['title'] = YAXIS_D_STR[language]
return layout
def prepare_title(language):
title = MOVING_AVG_STR2[language]
return title
def prepare_title_d(language):
title = MOVING_AVG_D_STR2[language]
return title
def handle_dates(x, format='%d/%m/%y'):
newdate = pd.to_datetime(x, format=format)
return newdate
# return [d.strftime('%d %B, %Y') for d in newdate]
def apply_str_on_dates(newdate, format='%d %b %Y'):
return newdate.strftime(format=format)
@click.group()
def cli1():
pass
@click.group()
def cli2():
pass
@click.group()
def cli3():
pass
@click.group()
def cli4():
pass
@cli1.command('main')
@click.argument("input_csv", type=str, default='scenario.csv')
@click.argument("cloned_repo_path", required=True, type=str, default='/mnt/e/Projects/MOCOS/mocos-covid19.github.io')
def main_function(input_csv, cloned_repo_path):
fun(input_csv, cloned_repo_path)
def cases(input_csv, language):
traces = []
df=pd.read_csv(input_csv)
df = df.iloc[:30] # show only next thirty days even if you have more
print(df.iloc[:7]['dates'])
df['dates']=df['dates'].apply(handle_dates)
df['dates1']=df['dates'].apply(apply_str_on_dates)
dates_with_14_days_before = sorted(list(set(df['dates'].apply(lambda x: x - pd.Timedelta('14days')).apply(apply_str_on_dates).to_numpy()).union(set(df['dates1'].to_numpy()))))
# print(df['dates1'])
df2 = | pd.read_csv('https://raw.githubusercontent.com/KITmetricslab/covid19-forecast-hub-de/master/data-truth/MZ/truth_MZ-Incident%20Cases_Poland.csv') | pandas.read_csv |
"""
NAME : Molecular Arrangement and Fringe Identification Analysis from Molecular Dynamics (MAFIA-MD)
AUTHORS : <NAME>, Dr. <NAME> and Dr. <NAME>
MAFIA-MD is a post-processing utility to capture ring structures from molecular trajectory files (.xyz) generated by
reactive molecular dynamics simulation of Hydrocarbons.
"""
import copy
import time
import tkinter as tk
from tkinter import *
from tkinter import filedialog, Tk
from tkinter import messagebox
from tkinter import ttk
import matplotlib
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem.Draw import DrawingOptions, MolDrawing
# External tool to calculate chemical bond information
from external_tool import xyz2mol
matplotlib.use('TkAgg')
matplotlib.interactive(True)
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.ticker import PercentFormatter
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
import os
import networkx as nx
import numpy as np
import pandas as pd
from scipy.spatial import distance_matrix
from scipy import stats
from numpy import linalg as LA
from collections import OrderedDict
# These global variables are required for Fringe Spacing Functions
global pointlist # list of aromatic carbon indices
global surface_normal # list of surface_normal values of individual rings
global test_param
text_param = 0
pointlist = []
surface_normal = []
def export_fringeSpacingHist(points, file) :
a = points
hist, bins = np.histogram(a, bins=[3, 3.25, 3.5, 3.75, 4.0, 4.25, 4.5, 4.75, 5.0, 5.25, 5.5, 5.75, 6.0])
print("\nFringe Spacing Histogram \n")
print("Bins:\t", bins)
print("Hist:\t", hist)
print("\nFringe Spacing Histogram \n")
plt.rcParams.update({'font.size' : 15})
# Creating dataset
b = np.unique(np.concatenate(surface_normal, axis=0), axis=0)
# Creating plot
# fig = plt.figure(figsize=(10, 5))
try :
kde = stats.gaussian_kde(a, weights=np.ones(len(a)) / len(a))
xx = np.linspace(3, 6, 1000)
fig, ax = plt.subplots(figsize=(10, 5))
density, bins, _ = ax.hist(a, weights=np.ones(len(a)) / len(a),
bins=[3, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, 5, 5.25, 5.5, 5.75, 6],
width=0.15, align="mid")
count, _ = np.histogram(a, bins)
for x, y, num in zip(bins, density, count) :
if num != 0 :
plt.text(x + 0.05, y + 0.001, num, fontsize=10, rotation=0) # x,y,str
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
ax2 = ax.twinx()
ax2.plot(xx, kde(xx), '--', color='red')
fig.patch.set_facecolor('white')
axisbg = '#ababab'
ax.set_title(file.split('.')[0])
ax.set_xlabel('Fringe Spacing (Å)')
ax.set_ylabel('Percentage of Fringes')
ax2.set_ylabel('Probability Density Function (PDF)\n of the Distribution', color='red')
xtick = (np.arange(3, 6.25, step=0.25))
plt.xticks(fontsize=14)
ax.set_xticks(xtick)
# saving the plot to the output folder
fig.savefig(e7.get() + '/' + e11.get() + '-Fringe_Spacing_Hist-' + file.split('.')[0] + '.png', format='png',
transparent=True)
fig.show()
# plt.close(fig) #recommended to uncomment this for running batch processing containing logs of trajectory files
except ValueError : # required because KDE estimation do not work when there is fringe only in one bin
fig, ax = plt.subplots(figsize=(10, 5))
density, bins, _ = ax.hist(a, weights=np.ones(len(a)) / len(a),
bins=[3, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, 5, 5.25, 5.5, 5.75, 6],
width=0.15, align="mid")
count, _ = np.histogram(a, bins)
for x, y, num in zip(bins, density, count) :
if num != 0 :
plt.text(x + 0.05, y + 0.001, num, fontsize=10, rotation=0) # x,y,str
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
fig.patch.set_facecolor('white')
axisbg = '#ababab'
ax.set_title(file.split('.')[0])
ax.set_xlabel('Fringe Spacing (Å)')
ax.set_ylabel('Percentage of Fringes')
xtick = (np.arange(3, 6.25, step=0.25))
plt.xticks(fontsize=14)
ax.set_xticks(xtick)
# saving the plot to the output folder
fig.savefig(e7.get() + '/' + e11.get() + '-Fringe_Spacing_Hist-' + file.split('.')[0] + '.png', format='png',
transparent=True)
fig.show()
# plt.close(fig)
def direction_vector(a, b) : # unit vector perpendicular to vector a and b
cross_product = []
unit_vector = []
cross_product = a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0]
dot_product = a[1] * b[1] + a[0] * b[0] * a[2] * b[2]
unit_vector = cross_product / (LA.norm(a) * LA.norm(b))
return (unit_vector)
def cross_product(a, b) :
cross_product = a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0]
return cross_product
def dot_product(a, b) :
dot_product = a[1] * b[1] + a[0] * b[0] * a[2] * b[2]
return dot_product
def vector_angle(a, b) : # angle between vector a and b
# May cause some warning sometimes: RuntimeWarning: invalid value encountered in arccos
# This is because of the internal algorithm used for calculating the norm carries some round off error
# For details: https://github.com/davidsandberg/facenet/issues/692
# Therefore, the rounded off values are clipped at the limit [-1,1]
return np.absolute(
180 * (1 / np.pi) * np.arccos(
np.clip((a[0] * b[0] + a[1] * b[1] + a[2] * b[2]) / (LA.norm(a) * LA.norm(b)), -1, 1)))
def angle(a, b) : # angle between vector a and b
a = np.absolute(
180 * (1 / np.pi) * np.arccos(
np.clip(((a[0] * b[0] + a[1] * b[1] + a[2] * b[2]) / (LA.norm(a) * LA.norm(b))), -1, 1)))
if a >= 90 :
return 180 - a
else :
return a
def mean_surface_vector(n_vertices, coord) :
mini_surface_vector = []
center = np.mean(coord, axis=0)
mini_surface_vector.append(direction_vector(center - coord[n_vertices - 1], center - coord[0]))
for i in range(n_vertices - 1) :
mini_surface_vector.append(direction_vector(center - coord[i], center - coord[i + 1]))
mean_surface_vector = np.mean(mini_surface_vector, axis=0)
return mean_surface_vector
def fringe_spacing_data(planar_ring, distance) :
points = []
surface_vector = []
for rings in planar_ring :
space = []
for index in rings : # get the distance of aromatic carbons from origin
radius = distance[index]
space.append(radius)
# coord = np.asarray(space)
# points.append(np.mean(coord, axis=0))
# surface_vector.append(direction_vector(coord[2] - coord[0], coord[4] - coord[1]))
#
# pointlist.append(points)
# surface_normal.append(surface_vector)
n_vertices = np.asarray(space).shape[0]
coord = np.asarray(space)
points.append(np.mean(coord, axis=0))
surface_vector.append(mean_surface_vector(n_vertices, coord))
pointlist.append(points)
surface_normal.append(surface_vector)
def fringe_spacing(pointlist, surface_normal, file) :
points = []
surface_vector = []
fringeSpacing = []
if len(pointlist) > 0 :
points = np.unique(np.concatenate(pointlist, axis=0), axis=0)
surface_vector = np.unique(np.concatenate(surface_normal, axis=0), axis=0)
else :
print("No Aromatic Surface, hence no fringe data \n")
return False
# calculation of fringe spacing: angle between planes is less than 10º and 3<=distance<=6
for i in range(len(points)) :
for j in range(i + 1, len(points) - 1) :
if 3 <= np.absolute(LA.norm(points[j + 1] - points[i])) <= 6 :
if (vector_angle(surface_vector[j + 1], surface_vector[i]) <= 10) or (
170 <= vector_angle(surface_vector[j + 1], surface_vector[i])) :
print(np.absolute(LA.norm(points[j + 1] - points[i])), '\t',
vector_angle(surface_vector[j + 1], surface_vector[i]))
fringeSpacing.append(np.absolute(LA.norm(points[j + 1] - points[i])))
if len(fringeSpacing) > 0 :
export_fringeSpacingHist(fringeSpacing, file)
else :
print("No Fringe will be Formed \n")
return False
# The ring detection is processed by class FindRing
class FindRing :
def __init__(self, bond_distance_upper, bond_distance_lower, cluster_size, span, axis, file_name, fileOut) :
self.sanity = None
self.count = None
self.planeAllowance = None
self.file = None
self.bond_distance_upper = bond_distance_upper
self.bond_distance_lower = bond_distance_lower
self.cluster_size = cluster_size
self.span = span
self.axis = axis
self.file_name = file_name
self.fileOut = fileOut
self.sep = None
global test_param # To reject sanity check result from being considered in Fringe Spacing Calculation
test_param = None # To reject sanity check result from being considered in Fringe Spacing Calculation
# read data from the data (.xyz) file
# the code only considers one single timeStep at a single run
# the format of the input file should be as follows:
# The first row of the data file should be the number of atom
# The second row of the data file indicates the timeStep
# The code assumes that the co-ordinates start from the third row.
# Returns the input carbon coordinates in the form of panda dataframe
def get_data(self) :
with open(self.file_name) as f :
totalAtom = f.readline()
df = pd.read_csv(
filepath_or_buffer=self.file_name,
header=None,
sep=self.sep,
skiprows=2,
engine='python'
)
# getting rid of Hydrogen atom coordinates
# if the atom type is denoted by numbers, then atom type 1 is assumed to be Carbon, and 2 is considered to be Hydrogen
# Otherwise, C is Carbon, H is Hydrogen
if df.shape[1] == 4 :
df = df[~df[0].astype(str).str.startswith('2')]
df = df[~df[0].astype(str).str.startswith('h')]
df = df[~df[0].astype(str).str.startswith('H')]
df = df[df.columns[-3 :]]
TotalTOCarbon = (int(totalAtom) / df.shape[0])
CH_Ratio = (1 / (TotalTOCarbon - 1))
# print('C/H Ratio=' + str(1 / (TotalTOCarbon - 1)))
return df, CH_Ratio
# Get the timeStep from the input data file
# Read and Return timestep
def get_timeStep(self, file_name, string_to_search) :
line_number = 0
TimeStep = None
with open(file_name, 'r') as read_obj :
for line in read_obj :
line_number += 1
if string_to_search in line :
TimeStep = line.rstrip('\n')
if line_number == 5 :
break
return TimeStep
# Writes the results into .xyz formatted file
# Returns the aromatic carbon coordinates in panda dataframe
def write_to_xyz(self, data, frames) :
result = pd.concat(frames)
result = result.drop_duplicates()
result = result.reset_index(drop=True)
XYZ = result
add_number = pd.DataFrame({len(result)}, index=[0])
if self.get_timeStep(self.file_name, 'Timestep') == None :
declare_TimeStep = | pd.DataFrame({'Atoms. Timestep: 0'}, index=[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.024011, "end_time": "2021-02-02T22:30:31.951734", "exception": false, "start_time": "2021-02-02T22:30:31.927723", "status": "completed"} tags=[]
# # QA queries on new CDR_deid COPE Survey
#
# Quality checks performed on a new CDR_deid dataset using QA queries
# + papermill={"duration": 0.709639, "end_time": "2021-02-02T22:30:32.661373", "exception": false, "start_time": "2021-02-02T22:30:31.951734", "status": "completed"} tags=[]
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + papermill={"duration": 0.023643, "end_time": "2021-02-02T22:30:31.880820", "exception": false, "start_time": "2021-02-02T22:30:31.857177", "status": "completed"} tags=["parameters"]
# Parameters
project_id = ""
com_cdr = ""
deid_cdr = ""
deid_sandbox=""
# deid_base_cdr=""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# + [markdown] papermill={"duration": 0.02327, "end_time": "2021-02-02T22:30:32.708257", "exception": false, "start_time": "2021-02-02T22:30:32.684987", "status": "completed"} tags=[]
# # 1 Verify that the COPE Survey Data identified to be suppressed as de-identification action in OBSERVATION table have been removed from the de-id dataset.
#
# see spread sheet COPE - All Surveys Privacy Rules for details
#
# https://docs.google.com/spreadsheets/d/1UuUVcRdlp2HkBaVdROFsM4ZX_bfffg6ZoEbqj94MlXU/edit#gid=0
#
# Related tickets [DC-892] [DC-1752]
#
# [DC-1752] Refactor analysis 1 so that it provides the observation_source_concept_id, concept_code, concept_name, vocabulary_id, row count per cope survey concept (example query below). Reword the title text to read: Verify that the COPE Survey concepts identified to be suppressed as de-identification action have been removed.
#
# [DC-1784] 1310144, 1310145, 1310148, 715725, 715724
#
# The following concepts should be suppressed
#
# 715711, 1333327, 1333326, 1333014, 1333118, 1332742,1333324 ,1333012 ,1333234,
#
# 903632,702686,715714, 715724, 715725, 715726, 1310054, 1310058, 1310066, 1310146, 1310147, 1333234, 1310065,
#
# 596884, 596885, 596886, 596887, 596888, 596889, 1310137,1333016,1310148,1310145,1310144
# +
query = f'''
SELECT observation_source_concept_id, concept_name,concept_code,vocabulary_id,observation_concept_id,COUNT(1) AS n_row_not_pass FROM
`{project_id}.{deid_cdr}.observation` ob
JOIN `{project_id}.{deid_cdr}.concept` c
ON ob.observation_source_concept_id=c.concept_id
WHERE observation_source_concept_id IN
(715711, 1333327, 1333326, 1333014, 1333118, 1332742,1333324 ,1333012 ,1333234,
903632,702686,715714, 715724, 715725, 715726, 1310054, 1310058, 1310066, 1310146, 1310147, 1333234, 1310065,
596884, 596885, 596886, 596887, 596888, 596889, 1310137,1333016,1310148,1310145,1310144)
OR observation_concept_id IN
(715711, 1333327, 1333326, 1333014, 1333118, 1332742,1333324 ,1333012 ,1333234,
903632,702686,715714, 715724, 715725, 715726, 1310054, 1310058, 1310066, 1310146, 1310147, 1333234, 1310065,
596884, 596885, 596886, 596887, 596888, 596889, 1310137,1333016,1310148,1310145,1310144)
GROUP BY 1,2,3,4,5
ORDER BY n_row_not_pass DESC
'''
df1=pd.read_gbq(query, dialect='standard')
if df1['n_row_not_pass'].sum()==0:
df = df.append({'query' : 'Query1 No COPE in deid_observation table', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query1 No COPE in deid_observation table' , 'result' : ''},
ignore_index = True)
df1
# + [markdown] papermill={"duration": 0.023633, "end_time": "2021-02-02T22:30:36.860798", "exception": false, "start_time": "2021-02-02T22:30:36.837165", "status": "completed"} tags=[]
# # 2 Verify if a survey version is provided for the COPE survey.
#
# [DC-1040]
#
# expected results: all the person_id and the questionnaire_response_id has a survey_version_concept_id
# original sql missed something.
#
# these should be generalized 2100000002,2100000003,2100000004
# -
query = f'''
WITH df1 as (
SELECT distinct survey_version_concept_id
FROM `{project_id}.{deid_cdr}.concept` c1
LEFT JOIN `{project_id}.{deid_cdr}.concept_relationship` cr ON cr.concept_id_2 = c1.concept_id
JOIN `{project_id}.{deid_cdr}.observation` ob on ob.observation_concept_id=c1.concept_id
LEFT JOIN `{project_id}.{deid_cdr}.observation_ext` ext USING(observation_id)
WHERE
cr.concept_id_1 IN (1333174,1333343,1333207,1333310,1332811,1332812,1332715,1332813,1333101,1332814,1332815,1332816,1332817,1332818)
AND cr.relationship_id = "PPI parent code of"
)
SELECT COUNT (*) AS n_row_not_pass FROM df1
WHERE survey_version_concept_id=0 or survey_version_concept_id IS NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query2 survey version provided', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query2 survey version provided', 'result' : ''},
ignore_index = True)
df1
# +
# new cdr
query = f'''
SELECT
distinct survey_version_concept_id
FROM `{project_id}.{deid_cdr}.observation` d
JOIN `{project_id}.{deid_cdr}.observation_ext` e
ON e.observation_id = d.observation_id
'''
df1=pd.read_gbq(query, dialect='standard')
df1.style.format("{:.0f}")
# + [markdown] papermill={"duration": 0.023649, "end_time": "2021-02-02T22:30:39.115495", "exception": false, "start_time": "2021-02-02T22:30:39.091846", "status": "completed"} tags=[]
# # 3 Verify that all structured concepts related to COVID are NOT suppressed in EHR tables
#
# DC-891
#
# 756055,4100065,37311061,439676,37311060,45763724
#
# update, Remove analyses 3, 4, and 5 as suppression of COVID concepts is no longer part of RT privacy requirements,[DC-1752]
# +
query = f'''
SELECT measurement_concept_id, concept_name,concept_code,vocabulary_id,COUNT(1) AS n_row_not_pass FROM
`{project_id}.{deid_cdr}.measurement` ob
JOIN `{project_id}.{deid_cdr}.concept` c
ON ob.measurement_concept_id=c.concept_id
WHERE measurement_concept_id=756055
GROUP BY 1,2,3,4
ORDER BY n_row_not_pass DESC
'''
df1=pd.read_gbq(query, dialect='standard')
if df1['n_row_not_pass'].sum()==0:
df = df.append({'query' : 'Query3 No COPE in deid_measurement table', 'result' : ''},
ignore_index = True)
else:
df = df.append({'query' : 'Query3 No COPE in deid_measurement table' , 'result' : 'PASS'},
ignore_index = True)
df1
# + [markdown] papermill={"duration": 0.023649, "end_time": "2021-02-02T22:30:39.115495", "exception": false, "start_time": "2021-02-02T22:30:39.091846", "status": "completed"} tags=[]
# # 4 Verify that all structured concepts related to COVID are NOT suppressed in EHR condition_occurrence
#
# DC-891
#
# 756055,4100065,37311061,439676,37311060,45763724
#
# update, Remove analyses 3, 4, and 5 as suppression of COVID concepts is no longer part of RT privacy requirements,[DC-1752]
# +
query = f'''
SELECT condition_concept_id, concept_name,concept_code,vocabulary_id,COUNT(1) AS n_row_not_pass FROM
`{project_id}.{deid_cdr}.condition_occurrence` ob
JOIN `{project_id}.{deid_cdr}.concept` c
ON ob.condition_concept_id=c.concept_id
WHERE condition_concept_id IN (4100065, 37311061, 439676)
GROUP BY 1,2,3,4
ORDER BY n_row_not_pass DESC
'''
df1=pd.read_gbq(query, dialect='standard')
if df1['n_row_not_pass'].sum()==0:
df = df.append({'query' : 'Query4 COVID concepts suppression in deid_observation table', 'result' : ''},
ignore_index = True)
else:
df = df.append({'query' : 'Query4 COVID concepts suppression in deid_observation table' , 'result' : 'PASS'},
ignore_index = True)
df1
# + [markdown] papermill={"duration": 0.023649, "end_time": "2021-02-02T22:30:39.115495", "exception": false, "start_time": "2021-02-02T22:30:39.091846", "status": "completed"} tags=[]
# # 5 Verify that all structured concepts related to COVID are NOT suppressed in EHR observation
#
# DC-891
#
# 756055,4100065,37311061,439676,37311060,45763724
#
# update, Remove analyses 3, 4, and 5 as suppression of COVID concepts is no longer part of RT privacy requirements,[DC-1752]
# +
query = f'''
SELECT observation_concept_id, concept_name,concept_code,vocabulary_id,observation_source_concept_id,COUNT(1) AS n_row_not_pass FROM
`{project_id}.{deid_cdr}.observation` ob
JOIN `{project_id}.{deid_cdr}.concept` c
ON ob.observation_concept_id=c.concept_id
WHERE observation_concept_id IN (37311060, 45763724) OR observation_source_concept_id IN (37311060, 45763724)
GROUP BY 1,2,3,4,5
ORDER BY n_row_not_pass DESC
'''
df1=pd.read_gbq(query, dialect='standard')
if df1['n_row_not_pass'].sum()==0:
df = df.append({'query' : 'Query5 COVID concepts suppression in observation table', 'result' : ''},
ignore_index = True)
else:
df = df.append({'query' : 'Query5 COVID concepts suppression in observation table' , 'result' : 'PASS'},
ignore_index = True)
df1
# -
# # 6 Verify these concepts are NOT suppressed in EHR observation
#
# [DC-1747]
# these concepts 1333015, 1333023 are not longer suppressed
#
# 1332737, [DC-1665]
#
# 1333291
#
# 1332904,1333140 should be generalized to 1332737
#
# 1332843 should be generalized.
# +
query = f'''
SELECT observation_source_concept_id, concept_name,concept_code,vocabulary_id,observation_concept_id,COUNT(1) AS n_row_not_pass FROM
`{project_id}.{deid_cdr}.observation` ob
JOIN `{project_id}.{deid_cdr}.concept` c
ON ob.observation_source_concept_id=c.concept_id
WHERE observation_source_concept_id IN (1333015, 1333023, 1332737,1333291,1332904,1333140,1332843) OR observation_concept_id IN (1333015, 1333023,1332737,1333291,1332904,1333140,1332843 )
GROUP BY 1,2,3,4,5
ORDER BY n_row_not_pass DESC
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
import pandas as pd
def process(feats, out_overlap, out_weights, out_feats):
#read features
overlap = pd.DataFrame(pd.np.zeros([0, 6]))
overlap.columns = ["drug1", "drug2", "mode", "overlap", "no_feats1", "no_feats2"]
weights = pd.DataFrame( | pd.np.zeros([0, 5]) | pandas.np.zeros |
import sys
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.model_selection import ParameterGrid
class EVI(BaseEstimator):
"""Class for evaluating multi-modal data integration approaches for combining unspliced, spliced, and RNA velocity gene expression modalities
Parameters
----------------------------
adata: AnnData
Annotated data object
x1key: str (default = None)
string referring to the layer of first matrix in the AnnData object. Can be X, Ms, spliced, unspliced, velocity, or None
x2key: str (default = None)
string referring to the layer of second matrix in the AnnData object. Can be X, Ms, spliced, unspliced, velocity, or None
X1: (default = None)
matrix referring to the first data type if x1key unspecified
X2: (default = None)
matrix referring to the second data type if x2key unspecified
logX1: bool (default = None)
boolean referring to whether the first data type should be log transformed. If data type is Ms or velocity, this should be False.
logX2: bool (default = None)
boolean referring to whether the second data type should be log transformed. If data type is Ms or velocity, this should be False.
labels_key: str (default = None)
string referring to the key in adata.obs of ground truth labels
labels: (default = None)
array referring to the labels for every cell
int_method: function (default = None)
function housed in the evi.tl.merge script that specifies the integration method to perform. Can be one of the following (or you may provide your own):
evi.tl.expression
evi.tl.moments
evi.tl.concat_merge
evi.tl.sum_merge
evi.tl.cellrank
evi.tl.snf
evi.tl.precise
evi.tl.precise_consensus
evi.tl.grassmann
evi.tl.integrated_diffusion
int_method_params: dictionary (default = None)
dictionary referring to the integration method hyperparameters. For more information on method-specific hyperparameters, see the evi.tl.merge script for the method of interest. Can be:
evi.tl.expression example: {'k': 10}
evi.tl.moments example: {'k': 10}
evi.tl.concat_merge example: {'k': 10}
evi.tl.sum_merge example: {'k': 10}
evi.tl.cellrank example: {'lam':0.7, 'scheme': 'correlation', 'mode':'deterministic'}
evi.tl.snf example: {'k': 10, 'mu' : 0.5, 'K': 50}
evi.tl.precise example: {'n_pvs': 30}
evi.tl.precise_consensus example: {'n_pvs': 30}
evi.tl.grassmann example: {'k': 10, 't' : 100, 'K': 50, 'lam': 1}
evi.tl.integrated_diffusion example: {'k': 10, 'n_clusters' : 5, 'K': 50}
eval_method: function (default = None)
function housed in the evi.tl.infer script that specifies the evaluation method to perform. Can be one of the following (or you may provide your own):
label propagation classification: evi.tl.lp
support vector machine classification: evi.tl.svm
trajectory inference evaluation: evi.tl.ti
eval_method_params: dictionary (default = None)
dictionary referring to the evaluation method hyperparameters. For more information on evaluation method -specific hyperparameters, see the evi.tl.infer script for the method of interest. Can be:
evi.tl.lp example: {'train_size': 0.5, 'random_state': 0, 'metrics': ['F1', 'balanced_accuracy', 'auc', 'precision', 'accuracy']}
evi.tl.svm example: {'random_state': 0, 'metrics': ['F1', 'balanced_accuracy', 'auc', 'precision', 'accuracy']}
evi.tl.ti example: {'root_cluster': root_cluster, 'n_dcs': 20, 'connectivity_cutoff':0.05, 'root_cell': 646, 'ground_trajectory': ground_trajectory} or
{'root_cluster': [root_cluster], 'n_dcs': [20], 'connectivity_cutoff':[0.05], 'root_cell':[646, 10, 389], 'ground_trajectory': [ground_trajectory]}
n_jobs: int (default = 1)
number of jobs to use in computation
Attributes
----------------------------
model.integrate()
performs integration of gene expression modalities
Returns:
W: sparse graph adjacency matrix of combined data
embed: embedding of combined data
model.evaluate_integrate()
performs integration of gene expression modalities and then evaluates method according to the evaluation criteria or task of interest
Returns:
score_df: dataframe of classification or trajectory inferences scores
Examples
----------------------------
1. Example for SVM classification using one data modality - spliced gene expression:
model = evi.tl.EVI(adata = adata, x1key = 'spliced', logX1 = True,
labels_key = 'condition_broad', int_method = evi.tl.expression,
int_method_params = {'k': 10}, eval_method = evi.tl.svm,
eval_method_params = {'random_state': 0, 'metrics': ['F1', 'balanced_accuracy', 'auc', 'precision', 'accuracy']}, n_jobs = -1)
W, embed = model.integrate()
df = model.evaluate_integrate()
2. Example for label propagation classification following spliced and unspliced integration using PRECISE:
model = evi.tl.EVI(adata = adata, x1key = 'spliced', x2key = 'unspliced', logX1 = True, logX2 = True,
labels_key = 'condition_broad', int_method = evi.tl.precise,
int_method_params = {'n_pvs': 30}, eval_method = evi.tl.lp,
eval_method_params = {'train_size': 0.5, 'random_state': 0, 'metrics': ['F1', 'balanced_accuracy', 'auc', 'precision', 'accuracy']}, n_jobs = -1)
W, embed = model.integrate()
df = model.evaluate_integrate()
3. Example for trajectory inference following integration of moments of spliced and RNA velocity data using SNF:
eval_method_params = {'root_cluster': 'LTHSC_broad', 'n_dcs': 20, 'connectivity_cutoff':0.05, 'root_cell': 646}
ground_trajectory = evi.tl.add_ground_trajectory('gt_nestorowa.h5ad') #add h5ad trajectory inference object
eval_method_params['ground_trajectory'] = ground_trajectory #append trajectory object to evaluation method dictionary
model = evi.tl.EVI(adata = adata, x1key = 'Ms', x2key = 'velocity',
logX1 = False, logX2 = False, labels_key = 'cell_types_broad_cleaned',
int_method = evi.tl.snf, int_method_params = {'k':10, 'mu':0.7, 'K': 50},
eval_method = evi.tl.ti, eval_method_params = eval_method_params, n_jobs = -1)
df = model.evaluate_integrate()
----------
"""
def __init__(
self,
adata=None,
x1key=None,
x2key=None,
X1=None,
X2=None,
logX1=None,
logX2=None,
int_method=None,
int_method_params=None,
eval_method=None,
eval_method_params=None,
labels_key=None,
labels=None,
n_jobs=1,
**int_kwargs
):
self.adata = adata
self.x1key = x1key
self.x2key = x2key
self.X1 = X1
self.X2 = X2
self.logX1 = logX1
self.logX2 = logX2
self.int_method = int_method
self.int_method_params = int_method_params
self.eval_method = eval_method
self.eval_method_params = eval_method_params
self.int_kwargs = int_method_params
self.labels_key = labels_key
self.labels = labels
self.n_jobs = n_jobs
def evaluate_integrate(self):
sys.stdout.write('integration method: {}'.format(self.int_method.__name__)+'\n')
self.score_df = | pd.DataFrame() | pandas.DataFrame |
""" Profile a single GConv layer """
import os
import sys
import argparse
import copy
import time
import shutil
import json
import logging
logging.getLogger().setLevel(logging.DEBUG)
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from gumi import model_utils
from gumi.ops import *
from gumi.model_runner.parser import create_cli_parser
parser = create_cli_parser(prog="CLI tool for profiling GConv models.")
args = parser.parse_args()
# CUDA
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
cudnn.benchmark = True
class Profiler(object):
def __init__(self, args):
self.args = args
def get_conv_module(
self, in_channels, out_channels, kernel_size, groups=1, mode=None, **kwargs
):
""" Create convolution modules based on different configurations. """
if mode is None or mode == "conv":
return nn.Conv2d(
in_channels,
out_channels,
kernel_size,
groups=groups,
bias=False,
**kwargs
)
elif mode == "gconv":
return GroupConv2d(
in_channels,
out_channels,
kernel_size,
groups=groups,
bias=False,
**kwargs
)
elif mode == "mconv":
return MaskConv2d(
in_channels,
out_channels,
kernel_size,
groups=groups,
bias=False,
**kwargs
)
elif mode == "pwise":
return GroupPointwiseConv(
in_channels,
out_channels,
kernel_size,
groups=groups,
bias=False,
**kwargs
)
else:
raise ValueError("mode={} cannot be recognised".format(mode))
def run_layer(
self,
in_channels,
*args,
batch_size=32,
in_size=224,
use_cuda=True,
iters=10000,
**kwargs
):
""" First create module, then run it for given iterate times """
mod = self.get_conv_module(in_channels, *args, **kwargs)
x = torch.rand((batch_size, in_channels, in_size, in_size))
if use_cuda:
mod.cuda()
x = x.cuda()
logging.info(
"==> Start timing of {in_channels:} x {out_channels:} x {kernel_size:} G={groups:} in mode={mode:} ...".format(
in_channels=in_channels,
out_channels=args[0],
kernel_size=args[1],
groups=kwargs.get("groups"),
mode=kwargs.get("mode"),
)
)
if use_cuda:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for _ in range(iters):
mod.forward(x)
end.record()
# synchronize
torch.cuda.synchronize()
elapsed = start.elapsed_time(end)
else:
start = time.time()
for _ in range(iters):
mod.forward(x)
end = time.time()
elapsed = (end - start) * 1e3
print(
"Elapsed time: {:10.2f} sec (total) {:6.2f} ms (per run) {:6.2f} FPS.".format(
elapsed * 1e-3, elapsed / iters, iters * batch_size / elapsed * 1e3
)
)
del mod
del x
return elapsed / iters
def run(self):
""" Iterate every layer given. """
# setup
scales = [
(64, 56),
(128, 28),
(256, 14),
(512, 7),
]
groups = (1, 2, 4, 8, 16, 32, 64)
modes = ("conv", "gconv", "mconv", "pwise")
# collect run-time
data = []
columns = ["Channels", "Scale", "G", "Mode", "Time"]
for channels, in_size in scales:
for g in groups:
for mode in modes:
elapsed_ms = self.run_layer(
channels,
channels,
3,
in_size=in_size,
padding=1,
groups=g,
mode=mode,
)
data.append([channels, in_size, g, mode, elapsed_ms])
return | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = | Timedelta("-1 days, 10:11:12") | pandas.Timedelta |
from flask import Flask, render_template, request, session, redirect, url_for
from datetime import datetime, timedelta
import pandas as pd
import sqlite3, hashlib, os, random, os, dotenv
app = Flask(__name__)
app.secret_key = "super secret key"
dotenv.load_dotenv()
MAPBOX_TOKEN = os.getenv('MAPBOX_TOKEN')
conn = sqlite3.connect('data/web.db', check_same_thread=False)
@app.route('/dashboard/')
def dashboard():
w_all = pd.read_sql("select * from w_all", conn)
w_sales_history = | pd.read_sql("select * from w_sales_history", conn) | pandas.read_sql |
import numpy as np
import pandas as pd
from settings.config import RECOMMENDATION_LIST_SIZE, KL_LABEL, HE_LABEL, CHI_LABEL, FAIRNESS_METRIC_LABEL, \
VARIANCE_TRADE_OFF_LABEL, \
COUNT_GENRES_TRADE_OFF_LABEL, TRADE_OFF_LABEL, evaluation_label, MACE_LABEL, FIXED_LABEL, MAP_LABEL, \
MRR_LABEL, order_label, MC_LABEL
from conversions.pandas_to_models import items_to_pandas
from evaluation.mace import ace
from evaluation.map import average_precision
from evaluation.misscalibration import mc
from evaluation.mrr import mrr
from settings.language_strings import LANGUAGE_CHI, LANGUAGE_HE, LANGUAGE_KL, LANGUAGE_COUNT_GENRES, LANGUAGE_VARIANCE
from posprocessing.greedy_algorithms import surrogate_submodular
from posprocessing.lambda_value import count_genres, variance
def personalized_trade_off(user_preference_distribution, reco_items, config, n=RECOMMENDATION_LIST_SIZE):
lmbda = 0.0
if config[TRADE_OFF_LABEL] == COUNT_GENRES_TRADE_OFF_LABEL:
lmbda = count_genres(user_preference_distribution)
else:
lmbda = variance(user_preference_distribution)
return surrogate_submodular(user_preference_distribution, reco_items, config, n, lmbda=lmbda)
def postprocessing_calibration(user_prefs_distr_df, candidates_items_mapping, test_items_ids, baseline_label):
# print('Pos processing - Start')
config = dict()
evaluation_results_df = | pd.DataFrame() | pandas.DataFrame |
# %% load in libraries
from bs4 import BeautifulSoup
import pandas as pd
import time
from selenium import webdriver
import random
import numpy as np
# %% set up selenium
from selenium import webdriver
driver = webdriver.Firefox()
# %%
driver.get('https://www.doximity.com/residency/programs/009b631d-3390-4742-b583-820ccab9a18b-university-of-california-san-francisco-anesthesiology')
# %%
specialties_df = | pd.read_csv('specialties_doximity.csv') | pandas.read_csv |
import re
import fnmatch
import os, sys, time
import pickle, uuid
from platform import uname
import pandas as pd
import numpy as np
import datetime
from math import sqrt
from datetime import datetime
import missingno as msno
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa import seasonal
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller, kpss
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from scipy import signal
import pmdarima as pm
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.patches as mpatches
from pandas.plotting import lag_plot
import seaborn as sns
from pylab import rcParams
from sklearn.ensemble import RandomForestRegressor
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, LabelEncoder, OneHotEncoder
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error
# import keras
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.layers import LSTM
# from keras.layers import Dropout
# from keras.layers import *
# from keras.callbacks import EarlyStopping
from src.Config import Config
from .Model import Model
class Logger(object):
info = print
critical = print
error = print
class Train(Config):
REGRESSION_ALGORITHMS = dict(
# # Supervised Learning
XGBR = dict(alg=XGBRegressor, args=dict(silent=1, random_state=self.MODELLING_CONFIG["RANDOM_STATE"], objective="reg:squarederror"), scaled=False),
LGBMR = dict(alg=LGBMRegressor, args=dict(random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False),
RFR = dict(alg=RandomForestRegressor, args=dict(n_estimators=100, random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False),
RFR_tuned = dict(alg=RandomForestRegressor, args=dict(random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False,
param_grid={
'n_estimators': [20, 50, 100, 200, 500], #100
'max_depth':[2, 4, None, 8], #None
'min_samples_split':[0.5, 2, 4, 10], #2
'max_features':[1, 2, None, 3, 6], #auto
},
),
XGBR_tuned = dict(alg=XGBRegressor, args=dict(silent=1, random_state=self.MODELLING_CONFIG["RANDOM_STATE"], objective="reg:squarederror"), scaled=False,
param_grid={
'learning_rate':[0.01, 0.05, 0.1, 0.3],#, 0.5, 0.9],
'max_depth': [2, 3, 6, 10, 13], #3
'n_estimators': [20, 50, 200],#, 500], #100
#'booster': ['gbtree', 'dart'], #'gbtree'
'colsample_bytree': [0.2, 0.5, 0.8, 1.0],
'subsample': [0.2, 0.5, 0.8, 1.0],
# 'early_stopping_rounds': [200],
},
),
LGBMR_tuned = dict(alg=LGBMRegressor, args=dict(random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False,
param_grid={
'learning_rate':[0.01, 0.05, 0.1, 0.3, 0.9], #0.1
'max_depth':[2, 3, 6],
'n_estimators': [20, 50, 100, 200], #100
'num_leaves': [4, 8, 64], #31
# 'subsample': [0.2, 0.5, 0.8, 1.0],
# 'bagging_fraction': [0.2, 0.5, 0.8, 1.0],
# 'early_stopping_rounds': [200]
#'boosting' : ['gbdt', 'dart', 'goss'],
},
),
)
SARIMA = pm.auto_arima
FORECAST_ALGORITHMS = dict(
# # Forecasting
ARIMA = dict(alg=ARIMA, args=dict(order=(Config.MODELLING_CONFIG['ARIMA_P'], Config.MODELLING_CONFIG['ARIMA_D'], Config.MODELLING_CONFIG['ARIMA_Q']))),
SARIMA = dict(alg=SARIMA, args=dict(start_p=1, d=0, start_q=1, max_p=5, max_d=2, max_q=5, m=7,
start_P=0, D=0, start_Q=0, max_P=5, max_D=2, max_Q=5,
seasonal=True, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True)),
HOLT_WINTER = dict(alg=ExponentialSmoothing, args=dict(seasonal_periods=Config.MODELLING_CONFIG["HOLT_WINTER_SEASON"], trend=Config.MODELLING_CONFIG["HOLT_WINTER_TREND"], seasonal=Config.MODELLING_CONFIG["HOLT_WINTER_SEASONAL"])),
)
def __init__(self, var, logger=Logger(), suffix=""):
self.logger = logger
self.models = {}
self.axis_limit = [1e10, 0]
self.suffix = suffix
self.meta = dict(
var = var,
stime = datetime.now(),
user = os.getenv('LOGNAME') or os.getlogin(),
sys = uname()[1],
py = '.'.join(map(str, sys.version_info[:3])),
)
@staticmethod
def vars(types=[], wc_vars=[], qreturn_dict=False):
""" Return list of variable names
Acquire the right features from dataframe to be input into model.
Featurs will be acquired based the value "predictive" in the VARS dictionary.
Parameters
----------
types : str
VARS name on type of features
Returns
-------
Features with predictive == True in Config.VARS
"""
if types==None:
types = [V for V in Config.VARS]
selected_vars = []
for t in types:
for d in Config.VARS[t]:
if not d.get('predictive'):
continue
if len(wc_vars) != 0:
matched_vars = fnmatch.filter(wc_vars, d['var'])
if qreturn_dict:
for v in matched_vars:
dd = d.copy()
dd['var'] = v
selected_vars.append(dd)
else:
selected_vars.extend(matched_vars)
else:
if qreturn_dict and not d in selected_vars:
selected_vars.append(d)
else:
if not d['var'] in selected_vars:
selected_vars.append(d['var'])
return selected_vars
def read_csv_file(self, vars, fname=None, feature_engineer=Config.MODELLING_CONFIG["FEATURE_ENGINEERING"], **args):
"""Read in csv files
Read in csv files from multiple data sources
Parameters
----------
source_type : str
Option to decide whether to read in single or multiple csv files
fname : str (default=None)
Name of csv file:
- If "source_type" = "single", key in the csv file name without extension
- If "source_type" = "multiple", do not need to key in anything, just leave it in default
Returns
-------
data : object
Dataframe
"""
self.meta['feature_engineer'] = feature_engineer
self.logger.info("Preparing data for modeling ...")
self.sources = [vars]
if self.meta['feature_engineer'] == True:
self.sources.append("Feature_Engineer")
elif self.meta['feature_engineer'] == False:
self.sources = self.sources
try:
fname = "{}.csv".format(fname)
self.data = pd.read_csv(os.path.join(Config.FILES["DATA_LOCAL"], fname))
cols = self.vars(self.sources, self.data.columns)
self.data = self.data[cols + ["Date", "District", "Prod_Sales"]]
if self.data.size == 0:
self.logger.warning("no data found in file {}".format(fname))
if self.logger == print:
exit()
except FileNotFoundError:
self.logger.critical("file {} is not found".format(fname))
if self.logger == print:
exit()
fname = os.path.join(self.FILES["DATA_LOCAL"], "{}{}.csv".format(Config.FILES["MERGED_DATA"], self.suffix))
self.data.to_csv(fname)
self.logger.info("done.")
return
def run(self, algorithms=['ARIMA'], district=None, metric_eval="test", cv_type="loo", model_type=Config.MODELLING_CONFIG["MODEL_TYPE"]):
"""Initiate the modelling
Set the arguments in running the model.
Most of the model testing can be configure in this method
Parameters
----------
algorithms : array-like, (default='LGBMR')
Models to run;
Models to run include hyperparameters set and any tuning can be adjusted in the algorithms.
district : str
District with product sales on either BIODIESEL or PRIMAX-95
metric_eval : str, optional (default='test')
To determine whether to run cross-validation on per-district model;
- If "test", no cross validation will be performed for per-district model.
- If "cv", cross validation will be performed for per-district model.
cv_type : str, optional (default='loo')
Type of cross validation method to used in modelling;
- If "loo", Leave One Out cross validation will be performed for per-district model.
- If "kf", K-Fold cross validation will be performed for per-district model.
Returns
-------
Model results with best algorithm, metrics and saved model file in pickle format
"""
self.data.reset_index(inplace=True)
self.data[["Date"]] = pd.to_datetime(self.data["Date"])
assert metric_eval in self.MODELLING_CONFIG["METRIC_EVAL_TYPE"]
assert cv_type in self.MODELLING_CONFIG["CV_FOLD_TYPE"]
self.metric_eval = metric_eval
self.cv_type = cv_type
self.meta['metric_eval'] = metric_eval
self.meta['cv_type'] = cv_type
self.meta['SPLIT_RATIO'] = self.MODELLING_CONFIG["SPLIT_RATIO"]
self.meta["model_type"] = self.MODELLING_CONFIG["MODEL_TYPE"]
if district == None:
district = self.data["District"].unique()
self.data = self.data[self.data["District"].isin(district)]
if self.meta["model_type"] == "Forecasting":
self.forecasting(self.data, algorithms)
elif self.meta["model_type"] == "Supervised":
self.regression(self.data, algorithms)
self.sort_models()
self.get_results()
self.meta['runtime'] = datetime.now() - self.meta['stime']
self.meta['algorithms'] = algorithms
self.logger.info("Training finished in {}.".format(self.meta['runtime']))
def regression(self, data, algorithms, column_name="District"):
"""Run the regression
Run the regression model on each clustering_type defined with different models.
Parameters
----------
data : str
Merged dataframe
algorithms : str
Types of models
column_name : str, optional (default='District')
Unique column to used to subset the dataframe
"""
self.logger.info("Training using regression algorithms with evaluation type on '{}':".format(self.meta['metric_eval']))
# # loop over algorithms in supervised learning
for algorithm in algorithms:
start = time.time()
self.logger.info(" Training using regression algorithm {} ...".format(algorithm))
# # loop over district
n_districts = data[column_name].nunique()
i_district = 0
for district, group_data in data.groupby(column_name):
self.logger.info(" Building model for {} {} with total {} records ({} out of {}):"\
.format(column_name, district, group_data.shape[0], i_district, n_districts))
start_district = time.time()
group_data = group_data.dropna(axis='columns', how = 'all')
if not "{}".format(self.meta["var"]) in group_data.columns:
self.logger.info(" There is no {} measurement for district : {}. Skipping...".format(self.meta["var"], district))
continue
if 'Feature_Engineer' not in self.sources:
self.sources.append('Feature_Engineer')
predictives = [col for col in group_data.columns if col in self.vars(self.sources) and col != self.meta["var"]]
vars_impute_interp = []
vars_impute_knn = []
for var in self.vars(self.sources, group_data.columns):
v = next(v for source in self.sources for v in self.vars([source], group_data, True) if v['var'] == var)
if algorithm in self.MODELLING_CONFIG["IMPUTE_ALGORITHMS"]:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
else:
vars_impute_knn.append(var)
else:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
elif v.get("impute", '') == 'knn':
vars_impute_knn.append(var)
else:
pass # no imputation
if vars_impute_interp != []:
try:
self.logger.info(" interpolation for {} ...".format(', '.join(vars_impute_interp)))
group_data.loc[:, vars_impute_interp] = group_data.loc[:, vars_impute_interp].interpolate(limit_direction='both')
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
if vars_impute_knn != []:
try:
self.logger.info(" KNN imputation for {} ...".format(', '.join(vars_impute_knn)))
group_data.loc[:, vars_impute_knn] = self.knn_impute(group_data.loc[:, vars_impute_knn])
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
group_data = group_data[group_data[self.meta["var"]].notnull()]
self.logger.info(" Remove observations with null value for response; new # of observations: {} (previously {})".format(group_data.shape[0], self.data.shape[0]))
k = max(len(predictives) + 1, 5)
kk = group_data.shape[0]*(Config.MODELLING_CONFIG["SPLIT_RATIO"])
if kk < k:
self.logger.info(" Skipping model for {} {}; too few points: {}; minimum {} points required." \
.format(column_name, district, group_data.shape[0], int(k / (1-Config.MODELLING_CONFIG["SPLIT_RATIO"]))+1))
continue
# # create model object, set response and independent variables (predictives)
if not district in self.models:
self.models[district] = []
model = Model(self.REGRESSION_ALGORITHMS[algorithm], district, self.meta["var"], predictives)
model.set_props(algorithm, group_data)
if self.REGRESSION_ALGORITHMS[algorithm]['scaled']:
model.regression_scalar(group_data)
else:
model.regression_tree(group_data, self.meta['metric_eval'], self.meta['cv_type'])
self.models[district].append(model)
self.logger.info(" Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics.items()])))
if hasattr(model, 'metrics_holdout'):
self.logger.info(" Holdout Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics_holdout.items()])))
self.logger.info(" {} {} trained using '{:d}' records in {:0.1f}s".format(district, column_name, group_data.shape[0], time.time()-start_district))
i_district += 1
#if i_district > 2: break
self.logger.info(" {} {}(s) trained using {} algorithm in {:0.2f}s".format(i_district, column_name, algorithm, time.time()-start))
def forecasting(self, data, algorithms, column_name="District", univariate=Config.MODELLING_CONFIG["UNIVARIATE_OPTION"], seasonal=Config.MODELLING_CONFIG["SEASONAL_OPTION"]):
"""Run the regression / forecasting / heuristics model
Run the regression model on each clustering_type defined with different models.
Parameters
----------
data : str
Merged dataframe
algorithms : str
Types of models
column_name : str, optional (default='District')
Unique column to used to subset the dataframe
"""
self.meta["univariate"] = self.MODELLING_CONFIG["UNIVARIATE_OPTION"]
self.meta["seasonal"] = self.MODELLING_CONFIG["SEASONAL_OPTION"]
self.logger.info("Training using forecasting algorithms with evaluation type on '{}':".format(self.meta['metric_eval']))
# # loop over algorithms in forecasting algorithms
for algorithm in algorithms:
start = time.time()
self.logger.info(" Training using forecasting algorithm {} ...".format(algorithm))
# # loop over district
n_districts = data[column_name].nunique()
i_district = 0
for district, group_data in data.groupby(column_name):
self.logger.info(" Building model for {} {} with total {} records ({} out of {}):"\
.format(column_name, district, group_data.shape[0], i_district, n_districts))
start_district = time.time()
group_data = group_data.dropna(axis='columns', how = 'all')
if not "{}".format(self.meta["var"]) in group_data.columns:
self.logger.info(" There is no {} measurement for string : {}. Skipping...".format(self.meta["var"], district))
continue
if self.meta["univariate"] == True:
predictives = [col for col in group_data.columns if col in self.meta["var"]]
elif self.meta["univariate"] == False:
predictives = [col for col in group_data.columns if col in self.vars(self.sources) and col != self.meta["var"]]
vars_impute_interp = []
vars_impute_knn = []
for var in self.vars(self.sources, group_data.columns):
v = next(v for source in self.sources for v in self.vars([source], group_data, True) if v['var'] == var)
if algorithm in self.MODELLING_CONFIG["IMPUTE_ALGORITHMS"]:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
else:
vars_impute_knn.append(var)
else:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
elif v.get("impute", '') == 'knn':
vars_impute_knn.append(var)
else:
pass # no imputation
if vars_impute_interp != []:
try:
self.logger.info(" interpolation for {} ...".format(', '.join(vars_impute_interp)))
group_data.loc[:, vars_impute_interp] = group_data.loc[:, vars_impute_interp].interpolate(limit_direction='both')
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
if vars_impute_knn != []:
try:
self.logger.info(" KNN imputation for {} ...".format(', '.join(vars_impute_knn)))
group_data.loc[:, vars_impute_knn] = self.knn_impute(group_data.loc[:, vars_impute_knn])
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
group_data = group_data[group_data[self.meta["var"]].notnull()]
self.logger.info(" Remove observations with null value for response; new # of observations: {} (previously {})".format(group_data.shape[0], self.data.shape[0]))
k = max(len(predictives) + 1, 5)
kk = group_data.shape[0]*(Config.MODELLING_CONFIG["SPLIT_RATIO"])
if kk < k:
self.logger.info(" Skipping model for {} {}; too few points: {}; minimum {} points required." \
.format(column_name, district, group_data.shape[0], int(k / (1-Config.MODELLING_CONFIG["SPLIT_RATIO"]))+1))
continue
# # create model object, set response and independent variables (predictives)
if not district in self.models:
self.models[district] = []
model = Model(self.FORECAST_ALGORITHMS[algorithm], district, self.meta["var"], predictives)
model.set_props(algorithm, group_data)
model.forecast_model(group_data, self.meta["seasonal"])
self.models[district].append(model)
self.logger.info(" Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics.items()])))
if hasattr(model, 'metrics_holdout'):
self.logger.info(" Holdout Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics_holdout.items()])))
self.logger.info(" {} {} trained using '{:d}' records in {:0.1f}s".format(district, column_name, group_data.shape[0], time.time()-start_district))
i_district += 1
#if i_district > 2: break
self.logger.info(" {} {}(s) trained using {} algorithm in {:0.2f}s".format(i_district, column_name, algorithm, time.time()-start))
def sort_models(self):
"""Sort the models base on the selected metric
The results from model will be sorted from the metric score;
The primary metric score defined is R2 score;
You can select the threshold of metric to be displayed in chart.
"""
self.meta["METRIC_BEST"] = self.MODELLING_CONFIG["METRIC_BEST"]
self.meta["METRIC_BEST_THRESH"] = self.MODELLING_CONFIG.get("METRIC_BEST_THRESH", None)
self.logger.info(" Sorting models per district base on metric '{}'".format(self.meta["METRIC_BEST"]))
reverse = False if self.meta["METRIC_BEST"] in ["MAE", "MAPE", "RMSE", "MSE"] else True
self.best_district = []
for district in self.models:
self.models[district].sort(key=lambda x: x.metrics[self.meta["METRIC_BEST"]], reverse=reverse)
if self.meta["METRIC_BEST_THRESH"] != None:
metric_value = self.models[district][0].metrics[self.meta["METRIC_BEST"]]
if (not reverse and metric_value < self.meta["METRIC_BEST_THRESH"] ) or \
(reverse and metric_value > self.meta["METRIC_BEST_THRESH"] ):
self.best_district.append(district)
min_x = min(self.models[district][0].actual.min(), self.models[district][0].pred.min())
if min_x < self.axis_limit[0]:
self.axis_limit[0] = min_x
max_x = max(self.models[district][0].actual.max(), self.models[district][0].pred.max())
if max_x > self.axis_limit[1]:
self.axis_limit[1] = max_x
def save_models(self, fname=""):
"""Saving the trained models to pickle files
Model will be saved as a pickle file with extension on .sa
Parameters
----------
fname : str (default='')
Read in model file with extension .sa
Returns
-------
Models file in .sa extension format
"""
self.meta["n_models"] = len(self.models)
training = dict(
models = {w: [self.models[w][0]] for w in self.models},
meta = self.meta
)
if fname == "":
fname = os.path.join(self.FILES["DATA"], self.FILES["MODELS"], self.meta["var"] + '_' + '.' + Config.NAME["short"].lower())
if os.path.exists(fname):
os.remove(fname)
with open(fname, 'wb') as handle:
pickle.dump(training, handle, protocol=pickle.HIGHEST_PROTOCOL)
self.logger.info("Training and its models saved to file '{}'.".format(fname))
def load_models(self, path, append=False):
"""Loading the trained models from pickle files
Model with extension on .sa will be loaded as input in dashboard
Parameters
----------
path : str
Input directory where model files are stored
----------
"""
file_path = os.path.join(path, self.meta["var"] + '.' + Config.NAME["short"].lower()) if not os.path.isfile(path) else path
with open(file_path, 'rb') as handle:
training = pickle.load(handle)
if not append:
self.models = training["models"]
self.meta = training["meta"]
self.meta["n_models"] = len(self.models)
else:
if training["meta"]["var"] != self.meta["var"]:
self.logger.critical(" existing training is for response '{}', \
while the loading train is for response '{}'.".format(self.meta["var"], training["meta"]["var"]))
self.models.update(training["models"])
self.meta['runtime'] += training["meta"]['runtime']
self.meta["n_models"] += len(training["models"])
def predict(self, df_test, metric=Config.MODELLING_CONFIG["PREDICT_METRIC_CONF"]):
"""Predict sales
Use model (.sa) file created from different model types, then use the model file to do prediction of sales.
Parameters
----------
df_test : object (default=district_test_{source}.csv & reservoir_{source}.csv)
Merge dataframe from input data source
Returns
-------
df_result : object
Dataframe on predicted sales for each cluster column with its best metrics
"""
cluster_col = 'District'
df_result = pd.DataFrame({"{}".format(cluster_col): [], "DATE": [], self.meta["var"]: [], "METRIC":[]})
for district_name, district_data in df_test.groupby("{}".format(cluster_col)):
if district_name in self.models:
model_accu = [self.models[district_name][0].metrics[self.MODELLING_CONFIG["METRIC_BEST"]]]*district_data.shape[0]
preds = pd.DataFrame({
"{}".format(cluster_col): pd.Series([district_name]*district_data.shape[0]),
"DATE": pd.Series(district_data.index),
self.meta["var"]: pd.Series(self.models[district_name][0].predict(district_data)),
"METRIC": pd.Series(model_accu),
})
preds[self.meta["var"]] = preds[self.meta["var"]].round(1)
df_result = pd.concat([df_result, preds])
if metric in ['False', False]:
df_result.drop(columns=["METRIC"], inplace=True)
return df_result
def evaluate(self, actual_all, pred_all):
"""Evaluate the prediction result between actual and predicted value
Acquiring the sales value from test data (actual value).
Then, with the predicted sakes value, we evaluate the prediction error.
Parameters
----------
actual_all : object
Dataframe of test data with sales
pred_all : object
Dataframe of predicted sales
"""
results = []
for district, actual in actual_all.groupby("District"):
pred = pred_all[pred_all["District"]==district][self.meta["var"]]
inds = actual[self.meta["var"]].notnull()
metrics = self.models[district][0].evaluate(actual[self.meta["var"]][inds], pred[inds])
results.append((district, metrics[Config.MODELLING_CONFIG["METRIC_BEST"]]))
return pd.DataFrame.from_records(results, columns=["District", "Metric_new"])
def knn_impute(self, data, k=None):
"""KNN imputation on missing values
KNN imputation will utilize nearby columns as input parameters for imputation on missing cells;
However, KNN imputation is very time-exhastive method.
Parameters
----------
data : str
Any dataframe
dataframe_in : boolean (default=True)
Option to select whether to do KNN-imputation to whole dataframe, or just specific column
col : str
If the "dataframe_in" = False, then we need to put in a column to perform specific column imputation
near_neigh : int (default=3)
Number of nearby columns to be used as input for KNN imputation
Returns
-------
array : object
Dataframe with KNN imputation on columns
"""
if k == None:
k = self.MODELLING_CONFIG["KNN_NEIGHBOUR"]
# data = data.dropna(thresh=0.7*len(data), axis=1)
encoding = LabelEncoder() if self.MODELLING_CONFIG["ENCODING_ALG"].upper() == 'ORDINAL' else OneHotEncoder()
data = data.ffill().bfill()
data = encoding.fit_transform(data.values)
data = data.dropna(axis=1, how="all", thresh=(data.shape[0])*self.MODELLING_CONFIG["IMPUTE_MISSING_PERCENT_THRES"])
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data.values)
knn_imputer = KNNImputer(n_neighbors=k)
scaled_data = knn_imputer.fit_transform(scaled_data)
scaled_data = scaler.inverse_transform(scaled_data)
scaled_data = encoding.inverse_transform(scaled_data)
return scaled_data
def get_results(self):
"""get results for metric"""
results_list = []
for district in self.models:
# # loop over the models per district
for model in self.models[district]:
# # loop over the metrics
for m, val in model.metrics.items():
result = dict(Algorithm=model.algorithm, District=district, created=model.created,
start_time=model.start_time, end_time=model.end_time, n_records=model.n_records,
metric_name=m, metric_value=val)
results_list.append(result)
self.results = pd.DataFrame(results_list)
fname = os.path.join(Config.FILES["DATA_LOCAL"], "{}{}.csv".format("test_results", self.suffix))
self.results.to_csv(fname)
# set the best algorithm for each item
self.bests = {}
best_district = None
for metric, group_data in self.results.groupby('metric_name'):
if metric == "MAPE":
best_model = group_data.set_index("Algorithm").groupby(['District'])['metric_value'].agg('idxmin').rename("best_model")
best_accuracy = group_data.groupby(['District'])['metric_value'].min().rename("best_accuracy")
else:
best_model = group_data.set_index("Algorithm").groupby(['District'])['metric_value'].agg('idxmax').rename("best_model")
best_accuracy = group_data.groupby(['District'])['metric_value'].max().rename("best_accuracy")
self.bests[metric] = pd.concat([best_model, best_accuracy], axis = 1)
if self.best_district != []:
self.bests[metric] = self.bests[metric].loc[self.best_district]
def ADF_Stationarity_Test(self, time_series, print_results=True):
"""Augmented Dickey-Fuller (ADF) test
Dickey-Fuller test is a type of unit root test. Unit root are a cause for non-stationary,
the ADF test will test test if unit root is present.
.. note::
A time series is stationary if a single shift in time doesn't change the time series statistical properties,
in which case unit root does not exist
The Null and Alternate hypothesis of the Augmented Dickey-Fuller test is defined as follow:
* **Null Hypothesis**: There is the presence of a unit root
* **Alternate Hypothesis**: There is not unit root
Parameters
----------
time_series : array-like
An array of time series data to be tested on stationarity
print_results : boolean (default=True)
Option to decide whether to print the results of ADF testing
Returns
-------
p_value :
P-value determined from ADF test
is_stationary :
Boolean on whether the time series is stationary or not
dfResults :
Dictionary of result from ADF testing
"""
adfTest = adfuller(timeseries, autolag='AIC')
self.p_value = adfTest[1]
if (self.p_value < self.MODELLING_CONFIG["SIGNIFICANCE_LEVEL"]):
self.is_stationary = True
else:
self.is_stationary = False
if print_results:
dfResults = pd.Series(adfTest[0:4], index=['ADF Test Statistic','P-Value','Lags Used','Observations Used'])
## Add Critical Values
for key,value in adfTest[4].items():
dfResults['Critical Value (%s)'%key] = value
print('Augmented Dickey-Fuller Test Results:')
print(dfResults)
@staticmethod
def boxplot_metric(data, title):
"""Plot boxplot of models"""
fig, ax = plt.subplots(figsize=(15, 3))
g = sns.boxplot(x='metric_value', y='Algorithm', data=data, ax=ax, orient='h')
ax.set_xlabel(title)
ax.set(xlim=Config.METRIC_THRESH_PLOT.get(title, None))
for p in g.patches:
g.annotate(format(p.get_height(), '.2f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext=(0, 10), textcoords='offset points')
patches = []
for alg, val in data.groupby("Algorithm"):
patches.append(mpatches.Patch(label="{}: {:0.2f}".format(alg, val.metric_value.median())))
plt.legend(handles=patches, title="Medians")
return fig
@staticmethod
def barplot_metric(data, title):
"""Plot barchart of models"""
n_models = data["District"].nunique()
fig, ax = plt.subplots(figsize=(15, n_models/2))
g = sns.barplot(y='District', x='metric_value', hue='Algorithm', data=data, ax=ax, edgecolor='black', orient='h')
ax.set_xlabel(title)
ax.set(xlim=Config.METRIC_THRESH_PLOT.get(title, None))
ax.set_ylabel("CLUSTER")
return fig
def get_districts_for_plt(self, num_districts=20):
""" Select num_districts for pred vs. actual plot according to the quantiles"""
num_districts = min(len(self.models), num_districts)
inds = np.linspace(0, len(self.models)-1, num=num_districts)
districts_metric = [(w, self.models[w][0].metrics[self.MODELLING_CONFIG["METRIC_BEST"]]) for w in self.models if bool(self.models[w]) == True]
reverse = False if self.MODELLING_CONFIG["METRIC_BEST"] in ["MAE", "MAPE", "RMSE", "MSE"] else True
districts_metric.sort(key=lambda tup: tup[1], reverse=reverse)
districts = np.array([w[0] for w in districts_metric])[inds.astype(int)]
return districts
def piechart_metric(self, metric):
"""Plot piechart of models"""
fig, ax = plt.subplots(figsize=(3, 3))
self.bests[metric]["best_model"].value_counts(normalize=True).plot(kind='pie', autopct='%.2f', ax=ax, title=metric, fontsize=9)
ax.set_ylabel("Top Algorithms %")
return fig
def actual_pred_plot(self, District, thesh=0.1):
"""Plot scatter lot of actual versus prediction"""
model = self.models[District][0]
data = | pd.DataFrame.from_dict({'Actual': model.actual, 'Prediction': model.pred}) | pandas.DataFrame.from_dict |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
import pandas.core.ops as ops
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])],
ids=["add", "mul"],
)
def test_add_mul(dtype, opname, exp):
a = pd.array([0, 1, None, 3, 4], dtype=dtype)
b = pd.array([1, 2, 3, None, 5], dtype=dtype)
# array / array
expected = pd.array(exp, dtype=dtype)
op = getattr(operator, opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
op = getattr(ops, "r" + opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_sub(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a - b
expected = pd.array([1, 1, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_div(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a / b
expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398, GH#22793
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = FloatingArray(
np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"),
np.array([False, False, False, True]),
)
if negative:
expected *= -1
tm.assert_extension_array_equal(result, expected)
def test_floordiv(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a // b
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
expected = pd.array([0, 2, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_mod(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a % b
expected = pd.array([0, 0, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_pow_scalar():
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a**0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**np.nan
expected = FloatingArray(
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"),
np.array([False, False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0**a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1**a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA**a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan**a
expected = FloatingArray(
np.array([1, np.nan, np.nan, np.nan], dtype="float64"),
np.array([False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
def test_pow_array():
a = pd.array([0, 0, 0, 1, 1, 1, None, None, None])
b = pd.array([0, 1, None, 0, 1, None, 0, 1, None])
result = a**b
expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na():
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = pd.array([np.nan, np.nan], dtype="Int64")
result = np.array([1.0, 2.0]) ** arr
expected = pd.array([1.0, np.nan], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("other", [0, 0.5])
def test_numpy_zero_dim_ndarray(other):
arr = pd.array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
# Test generic characteristics / errors
# -----------------------------------------------------------------------------
def test_error_invalid_values(data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
# invalid scalars
msg = "|".join(
[
r"can only perform ops with numeric values",
r"IntegerArray cannot perform the operation mod",
r"unsupported operand type",
r"can only concatenate str \(not \"int\"\) to str",
"not all arguments converted during string",
"ufunc '.*' not supported for the input types, and the inputs could not",
"ufunc '.*' did not contain a loop with signature matching types",
"Addition/subtraction of integers and integer-arrays with Timestamp",
]
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
str_ser = pd.Series("foo", index=s.index)
# with pytest.raises(TypeError, match=msg):
if all_arithmetic_operators in [
"__mul__",
"__rmul__",
]: # (data[~data.isna()] >= 0).all():
res = ops(str_ser)
expected = pd.Series(["foo" * x for x in data], index=s.index)
tm.assert_series_equal(res, expected)
else:
with pytest.raises(TypeError, match=msg):
ops(str_ser)
msg = "|".join(
[
"can only perform ops with numeric values",
"cannot perform .* with this index type: DatetimeArray",
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *",
"unsupported operand type",
r"can only concatenate str \(not \"int\"\) to str",
"not all arguments converted during string",
"cannot subtract DatetimeArray from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# Various
# -----------------------------------------------------------------------------
# TODO test unsigned overflow
def test_arith_coerce_scalar(data, all_arithmetic_operators):
op = tm.get_op_from_name(all_arithmetic_operators)
s = pd.Series(data)
other = 0.01
result = op(s, other)
expected = op(s.astype(float), other)
expected = expected.astype("Float64")
# rmod results in NaN that wasn't NA in original nullable Series -> unmask it
if all_arithmetic_operators == "__rmod__":
mask = (s == 0).fillna(False).to_numpy(bool)
expected.array._mask[mask] = False
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = tm.get_op_from_name(all_arithmetic_operators)
s = | pd.Series([1, 2, 3], dtype="Int64") | pandas.Series |
import streamlit as st
import pandas as pd
import yfinance as yf
import datetime
import os
from pathlib import Path
import requests
import hvplot.pandas
import numpy as np
import matplotlib.pyplot as plt
from MCForecastTools_2Mod import MCSimulation
import plotly.express as px
from statsmodels.tsa.arima_model import ARIMA
import pmdarima as pm
from sklearn.linear_model import LinearRegression
#i commented out line 95-96 in the MCForecast file to avoid printing out lines "Running simulation number"
# title of the project and introduction on what to do
st.title('Dividends Reinvestment Dashboard')
st.write('Analysis of the **Power** of **Dividend Reinvestment**.')
st.write('Select from the list of stocks that pays dividends.')
st.write('You will then be able to select between three options.')
st.write('***Choose wisely***.')
# chosen stock and crypto tickers. choice of the 3 different options
tickers = ("AAPL","F","JPM","LUMN","MO","MSFT","T","XOM")
crypto = ("BTC-USD", "ETH-USD", "BNB-USD")
options = ("Keep the cash", "Same Stock", "Crypto")
# box selection for the stock to invest in
dropdown_stocks = st.selectbox('Pick your stock', tickers)
# starting date of the stock history. This is interactive and can be changed by the user
start = st.date_input('Start Date', value= pd.to_datetime('2011-01-01'))
end = st.date_input('End Date', value= pd.to_datetime('today'))
currentYear = datetime.datetime.now().year
# option to have a fix time period for historical data
# start= pd.to_datetime('2011-01-01')
# end= pd.to_datetime('today')
# this is a cache so the page does not reload the entire data if it is not changed
@st.cache
# function to let the user choose the stock
def close_price(dropdown_stocks):
data = yf.download(dropdown_stocks, period = "today", interval= "1d")['Adj Close'][0]
price = data
return round(data,2)
tickerData = yf.Ticker(dropdown_stocks) # Get ticker data
stock_name = tickerData.info['shortName']
# this will display the chosen stock, the value of the stock, and a line chart of the price history
if len(dropdown_stocks) > 0:
df = yf.download(dropdown_stocks, start, end)['Adj Close']
st.subheader(f'Historical value of {dropdown_stocks} ({stock_name})')
st.info('The current value is ${}'.format(close_price(dropdown_stocks)))
st.line_chart(df)
# df_history = tickerData.history(start = start, end = end)
# st.dataframe(df_history)
# Showing what is the yearly dividend % for the chosen stock
st.text(f'The average yearly dividend {dropdown_stocks} is:')
tickerData = yf.Ticker(dropdown_stocks) # Get ticker data
tickerDf = tickerData.history(period='1d', start=start, end=end) #get the historical prices for this ticker
# Calculate the yearly % after getting the value from yahoo finance
string_summary = tickerData.info['dividendYield']
yearly_div = (string_summary) * 100
st.info(f'{yearly_div: ,.2f}%')
# Asking the user for desired amount of share to purchase, showing 100 shares to start. minimum will be 10 shares
share_amount= st.number_input('How many shares do you want?',value=100, min_value=10)
st.header('You selected {} shares.'.format(share_amount))
@st.cache
# Calculating the value of the investment compare to the amount of share selected, giving the amount
def amount(share_amount):
value = close_price(dropdown_stocks) * share_amount
price = value
return round(value,2)
def regression(stock_df, forecast_years):
stock = yf.Ticker(dropdown_stocks)
stock_df = stock.history(start = start, end = end)
stock_df["Time"] = stock_df.index
stock_df["Time"] = stock_df["Time"].dt.year
dividends = stock_df.loc[stock_df["Dividends"] > 0]
dividends = dividends.drop(columns = ["Open", "High", "Low", "Close", "Volume", "Stock Splits"])
dividends = dividends.groupby(["Time"]).sum()
dividends["Years"] = dividends.index
index_col = []
for i in range(len(dividends.index)):
index_col.append(i)
dividends["Count"] = index_col
x_amount = dividends["Count"].values.reshape(-1,1)
y_amount = dividends["Dividends"].values.reshape(-1,1)
amount_regression = LinearRegression().fit(x_amount,y_amount)
yfit_amount = amount_regression.predict(x_amount)
amount_regression.coef_ = np.squeeze(amount_regression.coef_)
amount_regression.intercept_ = np.squeeze(amount_regression.intercept_)
fig = px.scatter(dividends, y = "Dividends", x = "Years", trendline = "ols")
st.write(fig)
amount_forecast = []
forecasted_year = []
for i in range(len(dividends.index) + forecast_years):
value = (amount_regression.coef_ * (i) + amount_regression.intercept_ )
amount_forecast.append(round(value,3))
forecasted_year.append(i+dividends["Years"].min())
forecasted_data = pd.DataFrame(columns = ["Year", "Forecasted Rates"])
forecasted_data["Year"] = forecasted_year
forecasted_data["Forecasted Rates"] = amount_forecast
return forecasted_data
initial_investment = (amount(share_amount))
st.info('Your initial investment is ${}'.format(amount(share_amount)))
# Showing amount of yearly dividend in $
st.text(f'Your current yearly dividend for the amount of shares you selected is $:')
# Calculate the yearly $ after getting the value from yahoo finance
string_summary2 = tickerData.info['dividendRate']
yearly_div_amount = (string_summary2) * (share_amount)
st.info(f'${yearly_div_amount}')
#Predict stock using series of Monte Carlo simulation. Only works with one stock at a time.
def mc_stock_price(years):
stock = yf.Ticker(dropdown_stocks)
stock_hist = stock.history(start = start, end = end)
stock_hist.drop(columns = ["Dividends","Stock Splits"], inplace = True)
stock_hist.rename(columns = {"Close":"close"}, inplace = True)
stock_hist = pd.concat({dropdown_stocks: stock_hist}, axis = 1)
#defining variables ahead of time in preparation for MC Simulation series
Upper_Yields = []
Lower_Yields = []
Means = []
Years = [currentYear]
iteration = []
for i in range(years+1):
iteration.append(i)
#beginning Simulation series and populating with outputs
#for x in range(number of years)
for x in range(years):
MC_looped = MCSimulation(portfolio_data = stock_hist,
num_simulation= 100,
num_trading_days= 252*x+1)
MC_summary_stats = MC_looped.summarize_cumulative_return()
Upper_Yields.append(MC_summary_stats["95% CI Upper"])
Lower_Yields.append(MC_summary_stats["95% CI Lower"])
Means.append(MC_summary_stats["mean"])
Years.append(currentYear+(x+1))
potential_upper_price = [element * stock_hist[dropdown_stocks]["close"][-1] for element in Upper_Yields]
potential_lower_price = [element * stock_hist[dropdown_stocks]["close"][-1] for element in Lower_Yields]
potential_mean_price = [element * stock_hist[dropdown_stocks]["close"][-1] for element in Means]
prices_df = pd.DataFrame(columns = ["Lower Bound Price", "Upper Bound Price", "Forecasted Average Price"])
prices_df["Lower Bound Price"] = potential_lower_price
prices_df["Forecasted Average Price"] = potential_mean_price
prices_df["Upper Bound Price"] = potential_upper_price
fig = px.line(prices_df)
fig.update_layout(
xaxis = dict(
tickmode = 'array',
tickvals = iteration,
ticktext = Years
)
)
st.write(fig)
return prices_df
def cumsum_shift(s, shift = 1, init_values = [0]):
s_cumsum = pd.Series(np.zeros(len(s)))
for i in range(shift):
s_cumsum.iloc[i] = init_values[i]
for i in range(shift,len(s)):
s_cumsum.iloc[i] = s_cumsum.iloc[i-shift] + s.iloc[i]
return s_cumsum
def predict_crypto(crypto, forecast_period=0):
forecast_days = forecast_period * 365
btc_data = yf.download(crypto, start, end)
btc_data["Date"] = btc_data.index
btc_data["Date"] = pd.to_datetime(btc_data["Date"])
btc_data["year"] = btc_data["Date"].dt.year
years = btc_data["year"].max() - btc_data["year"].min()
btc_data.reset_index(inplace = True, drop = True)
btc_data.drop(columns = ["Open", "High", "Low", "Adj Close", "Volume"], inplace = True)
btc_rolling = btc_data["Close"].rolling(window = round(len(btc_data.index)/100)).mean().dropna()
btc_change = btc_rolling.pct_change().dropna()
btc_cum = (1 + btc_change).cumprod()
list = []
for i in range(years):
list.append(btc_cum[i*round(len(btc_cum)/years):(i+1)*round(len(btc_cum)/years)].mean())
slope = (list[-1]-list[0])/len(list)
list2 = []
for i in range(years):
list2.append((slope*i)+list[0])
upper = []
lower = []
for i in range(years):
lower.append((slope*i) + (list[0]-slope))
upper.append((slope*i) + (list[0]+slope))
counter = 0
positions = []
for i in range(1, years):
if (list[i] >= lower[i]) & (list[i] <= upper[i]):
positions.append(i*round(len(btc_cum)/years))
positions.append((i+1)*round(len(btc_cum)/years))
counter+=1
if (counter < years/2):
btc_rolling = btc_data["Close"][positions[-2]:].rolling(window = round(len(btc_data.index)/100)).mean().dropna()
if forecast_period == 0:
auto_model = pm.auto_arima(btc_rolling)
model_str = str(auto_model.summary())
model_str = model_str[model_str.find("Model:"):model_str.find("Model:")+100]
start_find = model_str.find("(") + len("(")
end_find = model_str.find(")")
substring = model_str[start_find:end_find]
arima_order = substring.split(",")
for i in range(len(arima_order)):
arima_order[i] = int(arima_order[i])
arima_order = tuple(arima_order)
train = btc_rolling[:int(0.8*len(btc_rolling))]
test = btc_rolling[int(0.8*len(btc_rolling)):]
# test_length =
model = ARIMA(train.values, order=arima_order)
model_fit = model.fit(disp=0)
# if ( float(0.2*len(btc_rolling)) < int(0.2*len(btc_rolling))):
fc, se, conf = model_fit.forecast(len(test.index), alpha=0.05) # 95% conf
# else:
# fc, se, conf = model_fit.forecast((int(0.2*len(btc_rolling))), alpha=0.05)
fc_series = pd.Series(fc, index=test.index)
lower_series = pd.Series(conf[:, 0], index=test.index)
upper_series = pd.Series(conf[:, 1], index=test.index)
plt.rcParams.update({'font.size': 40})
fig = plt.figure(figsize=(40,20), dpi=100)
ax = fig.add_subplot(1,1,1)
l1 = ax.plot(train, label = "Training")
l2 = ax.plot(test, label = "Testing")
l3 = ax.plot(fc_series, label = "Forecast")
ax.fill_between(lower_series.index, upper_series, lower_series,
color='k', alpha=.15)
ax.set_title('Forecast vs Actuals')
fig.legend(loc='upper left', fontsize=40), (l1,l2,l3)
plt.rc('grid', linestyle="-", color='black')
plt.grid(True)
st.write(fig)
else:
auto_model = pm.auto_arima(btc_rolling)
model_str = str(auto_model.summary())
model_str = model_str[model_str.find("Model:"):model_str.find("Model:")+100]
start_find = model_str.find("(") + len("(")
end_find = model_str.find(")")
substring = model_str[start_find:end_find]
arima_order = substring.split(",")
for i in range(len(arima_order)):
arima_order[i] = int(arima_order[i])
arima_order = tuple(arima_order)
train = btc_rolling[:int(0.8*len(btc_rolling))]
test = btc_rolling[int(0.8*len(btc_rolling)):]
model = ARIMA(train.values, order=arima_order)
model_fit = model.fit(disp=0)
fighting = np.arange(0, (test.index[-1] + forecast_days) - test.index[0])
empty_df = pd.DataFrame(fighting)
empty_df.index = np.arange(test.index[0], test.index[-1] + forecast_days)
if ( float(0.2*len(btc_rolling)) > int(0.2*len(btc_rolling)) ):
fc, se, conf = model_fit.forecast(len(empty_df.index), alpha=0.05) # 95% conf
else:
fc, se, conf = model_fit.forecast(len(empty_df.index), alpha=0.05)
fc_series = pd.Series(fc, index=empty_df.index)
lower_series = pd.Series(conf[:, 0], index=empty_df.index)
upper_series = | pd.Series(conf[:, 1], index=empty_df.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 1 16:06:36 2017
@author: Gonxo
This file is merges the different grid and solar feed into a single feed. Also
it deseasonalizes the data in hour of the daya, day of the week, and month of
the year. Finally it computes ANOVA tests to check seasonal variations significancy.
NOTE: This code has been writen in a windows machine. To use it in a linux
machine change the directory address form '\\' to '/'
"""
import pandas as pd
import time
import mlfunctions as mlf
start = time.time()
# Set up the destination and secrects directory
dataDir = 'C:\\Users\\Gonxo\\ML-energy-use\\DATA_DIRECTORY'
secretsDir = 'C:\\Users\\Gonxo\\ML-energy-use\\SECRETS_DIRECTORY'
apiDic = pd.read_csv(secretsDir+'\\apiKeyDictionary.csv',sep=None, engine='python')
sourceFile = dataDir+'\\15min_noNaNs_201703081045.h5'
print('here')
store = pd.HDFStore('C:\\Users\\Gonxo\\ML-energy-use\\DATA_DIRECTORY\\aggregated_fs.h5')
print('here')
# Loading relevant data after removing NaNs adding grid power and solar power.
# House consumption = Grid power + solar power
# HC = house_consumption
with | pd.HDFStore(sourceFile) | pandas.HDFStore |
import os
import unittest
import random
import sys
import site # so that ai4water directory is in path
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import scipy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.preprocessing import DataHandler, SiteDistributedDataHandler
from ai4water.preprocessing.datahandler import MultiLocDataHandler
from ai4water.datasets import load_u1, arg_beach
os.environ['PYTHONHASHSEED'] = '313'
random.seed(313)
np.random.seed(313)
# todo, check last dimension of x,y
# todo test with 3d y
def _check_xy_equal_len(x, prev_y, y, lookback, num_ins, num_outs, num_examples, data_type='training'):
feat_dim = 1
if lookback > 1:
assert x.shape[1] == lookback
feat_dim = 2
assert x.shape[
feat_dim] == num_ins, f"for {data_type} x's shape is {x.shape} while num_ins of dataloader are {num_ins}"
if y is not None:
assert y.shape[1] == num_outs, f"for {data_type} y's shape is {y.shape} while num_outs of dataloader are {num_outs}"
else:
assert num_outs == 0
y = x # just for next statement to run
if prev_y is None:
prev_y = x # just for next statement to run
assert x.shape[0] == y.shape[0] == prev_y.shape[
0], f"for {data_type} xshape: {x.shape}, yshape: {y.shape}, prevyshape: {prev_y.shape}"
if num_examples:
assert x.shape[
0] == num_examples, f'for {data_type} x contains {x.shape[0]} samples while expected samples are {num_examples}'
return
def assert_xy_equal_len(x, prev_y, y, data_loader, num_examples=None, data_type='training'):
if isinstance(x, np.ndarray):
_check_xy_equal_len(x, prev_y, y, data_loader.lookback, data_loader.num_ins, data_loader.num_outs, num_examples,
data_type=data_type)
elif isinstance(x, list):
while len(y)<len(x):
y.append(None)
for idx, i in enumerate(x):
_check_xy_equal_len(i, prev_y[idx], y[idx], data_loader.lookback[idx], data_loader.num_ins[idx],
data_loader.num_outs[idx], num_examples, data_type=data_type
)
elif isinstance(x, dict):
for key, i in x.items():
_check_xy_equal_len(i, prev_y.get(key, None), y.get(key, None), data_loader.lookback[key], data_loader.num_ins[key],
data_loader.num_outs[key], num_examples, data_type=data_type
)
elif x is None: # all should be None
assert all(v is None for v in [x, prev_y, y])
else:
raise ValueError
def _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, tot_obs):
val_examples = 0
if val_ex:
val_examples = val_x.shape[0]
test_examples = 0
if test_ex:
test_examples = test_x.shape[0]
xyz_samples = train_x.shape[0] + val_examples + test_examples
# todo, whould be equal
assert xyz_samples == tot_obs, f"""
data_loader has {tot_obs} examples while sum of train/val/test examples are {xyz_samples}."""
def check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader):
if isinstance(train_x, np.ndarray):
_check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader.tot_obs_for_one_df())
elif isinstance(train_x, list):
for idx in range(len(train_x)):
_check_num_examples(train_x[idx], val_x[idx], test_x[idx], val_ex, test_ex,
data_loader.tot_obs_for_one_df()[idx])
return
def check_inverse_transformation(data, data_loader, y, cols, key):
if cols is None:
# not output columns, so not checking
return
# check that after inverse transformation, we get correct y.
if data_loader.source_is_df:
train_y_ = data_loader.inverse_transform(data=pd.DataFrame(y.reshape(-1, len(cols)), columns=cols), key=key)
train_y_, index = data_loader.deindexify(train_y_, key=key)
compare_individual_item(data, key, cols, train_y_, data_loader)
elif data_loader.source_is_list:
#for idx in range(data_loader.num_sources):
# y_ = y[idx].reshape(-1, len(cols[idx]))
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for idx, y in enumerate(train_y_):
compare_individual_item(data[idx], f'{key}_{idx}', cols[idx], y, data_loader)
elif data_loader.source_is_dict:
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for src_name, val in train_y_.items():
compare_individual_item(data[src_name], f'{key}_{src_name}', cols[src_name], val, data_loader)
def compare_individual_item(data, key, cols, y, data_loader):
if y is None:
return
train_index = data_loader.indexes[key]
if y.__class__.__name__ in ['DataFrame']:
y = y.values
for i, v in zip(train_index, y):
if len(cols) == 1:
if isinstance(train_index, pd.DatetimeIndex):
# if true value in data is None, y's value should also be None
if np.isnan(data[cols].loc[i]).item():
assert np.isnan(v).item()
else:
_t = round(data[cols].loc[i].item(), 0)
_p = round(v.item(), 0)
if not np.allclose(data[cols].loc[i].item(), v.item()):
print(f'true: {_t}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(v, np.ndarray):
v = round(v.item(), 3)
_true = round(data[cols].loc[i], 3).item()
_p = round(v, 3)
if _true != _p:
print(f'true: {_true}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(train_index, pd.DatetimeIndex):
assert abs(data[cols].loc[i].sum() - np.nansum(v)) <= 0.00001, f'{data[cols].loc[i].sum()},: {v}'
else:
assert abs(data[cols].iloc[i].sum() - v.sum()) <= 0.00001
def check_kfold_splits(data_handler):
if data_handler.source_is_df:
splits = data_handler.KFold_splits()
for (train_x, train_y), (test_x, test_y) in splits:
... # print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
return
def assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader):
if isinstance(train_y, list):
assert isinstance(val_y, list)
assert isinstance(test_y, list)
train_y = train_y[0]
val_y = val_y[0]
test_y = test_y[0]
if isinstance(train_y, dict):
train_y = list(train_y.values())[0]
assert isinstance(val_y, dict)
isinstance(test_y, dict)
val_y = list(val_y.values())[0]
test_y = list(test_y.values())[0]
if out_cols is not None:
b = train_y.reshape(-1, )
if val_y is None:
a = test_y.reshape(-1, )
else:
a = val_y.reshape(-1, )
if not len(np.intersect1d(a, b)) == 0:
raise ValueError(f'train and val have overlapping values')
if data_loader.val_data != 'same' and out_cols is not None and val_y is not None and test_y is not None:
a = test_y.reshape(-1,)
b = val_y.reshape(-1,)
assert len(np.intersect1d(a, b)) == 0, 'test and val have overlapping values'
return
def build_and_test_loader(data, config, out_cols, train_ex=None, val_ex=None, test_ex=None, save=True,
assert_uniqueness=True, check_examples=True,
true_train_y=None, true_val_y=None, true_test_y=None):
config['teacher_forcing'] = True # todo
if 'val_fraction' not in config:
config['val_fraction'] = 0.3
if 'test_fraction' not in config:
config['test_fraction'] = 0.3
data_loader = DataHandler(data=data, save=save, verbosity=0, **config)
#dl = DataLoader.from_h5('data.h5')
train_x, prev_y, train_y = data_loader.training_data(key='train')
assert_xy_equal_len(train_x, prev_y, train_y, data_loader, train_ex)
val_x, prev_y, val_y = data_loader.validation_data(key='val')
assert_xy_equal_len(val_x, prev_y, val_y, data_loader, val_ex, data_type='validation')
test_x, prev_y, test_y = data_loader.test_data(key='test')
assert_xy_equal_len(test_x, prev_y, test_y, data_loader, test_ex, data_type='test')
if check_examples:
check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader)
if isinstance(data, str):
data = data_loader.data
check_inverse_transformation(data, data_loader, train_y, out_cols, 'train')
if val_ex:
check_inverse_transformation(data, data_loader, val_y, out_cols, 'val')
if test_ex:
check_inverse_transformation(data, data_loader, test_y, out_cols, 'test')
check_kfold_splits(data_loader)
if assert_uniqueness:
assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader)
if true_train_y is not None:
assert np.allclose(train_y, true_train_y)
if true_val_y is not None:
assert np.allclose(val_y, true_val_y)
if true_test_y is not None:
assert np.allclose(test_y, true_test_y)
return data_loader
class TestAllCases(object):
def __init__(self, input_features, output_features, lookback=3, allow_nan_labels=0, save=True):
self.input_features = input_features
self.output_features = output_features
self.lookback = lookback
self.allow_nan_labels=allow_nan_labels
self.save=save
self.run_all()
def run_all(self):
all_methods = [m for m in dir(self) if callable(getattr(self, m)) and not m.startswith('_') and m not in ['run_all']]
for m in all_methods:
getattr(self, m)()
return
def test_basic(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
val_examples = 22 - (self.lookback - 2) if self.lookback>1 else 22
test_examples = 30 - (self.lookback - 2) if self.lookback>1 else 30
if self.output_features == ['c']:
tty = np.arange(202, 250).reshape(-1, 1, 1)
tvy = np.arange(250, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
check_examples=True,
)
assert loader.source_is_df
return
def test_with_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random'}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 20, 30,
save=self.save,
)
assert loader.source_is_df
return
def test_drop_remainder(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'batch_size': 8,
'drop_remainder': True,
'train_data': 'random'}
loader = build_and_test_loader(data, config, self.output_features,
48, 16, 24,
check_examples=False,
save=self.save,
)
assert loader.source_is_df
return
def test_with_same_val_data(self):
# val_data is "same" as and train_data is make based upon fractions.
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same'}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 29, 29,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
def test_with_same_val_data_and_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_data': 'same'}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 30, 30,
check_examples=False,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_val_data(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 29,
true_train_y=tty,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_no_val_data_with_random(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_fraction': 0.0}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 30,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data(self):
# we don't want any test_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'test_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 29, 0,
true_train_y=tty,
true_val_y=tvy,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data_with_random(self):
# we don't want any test_data
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'test_fraction': 0.0,
'transformation': 'minmax'}
tr_examples = 15- (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_dt_index(self):
# we don't want any test_data
#print('testing test_with_dt_index', self.lookback)
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'test_fraction': 0.0,
'transformation': 'minmax'}
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals(self):
#print('testing test_with_intervals', self.lookback)
examples = 35
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index= | pd.date_range('20110101', periods=35, freq='D') | pandas.date_range |
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier, LGBMRegressor
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GroupKFold
from src.python.space_configs import space_lightgbm, tune_model, gp_minimize, forest_minimize
from ml_metrics import rmse
train_df = pd.read_parquet('input/processed/train_encoded_features.parquet.gzip')
train_y = np.log1p(train_df["totals.transactionRevenue"])
y_clf = (train_df['totals.transactionRevenue'] > 0).astype(np.uint8)
train_id = train_df["fullVisitorId"]
train_df = train_df.drop(["totals.transactionRevenue", 'date'], axis=1)
test_df = pd.read_parquet('input/processed/test_encoded_features.parquet.gzip')
test_id = test_df["fullVisitorId"].values.astype(str)
test_df = test_df.drop(['date'], axis=1)
# Classify non-zero revenues
folds = GroupKFold(n_splits=10)
oof_clf_preds = np.zeros(train_df.shape[0])
sub_clf_preds = np.zeros(test_df.shape[0])
excluded_features = ['fullVisitorId']
train_features = [_f for _f in train_df.columns if _f not in excluded_features]
for fold_, (trn_, val_) in enumerate(folds.split(y_clf, y_clf, groups=train_df['fullVisitorId'])):
trn_x, trn_y = train_df[train_features].iloc[trn_], y_clf.iloc[trn_]
val_x, val_y = train_df[train_features].iloc[val_], y_clf.iloc[val_]
clf = LGBMClassifier(n_jobs=-1,
n_estimators=10000,
random_state=56,
max_depth=8,
min_child_samples=40,
reg_alpha=0.4,
reg_lambda=0.1,
num_leaves=290,
learning_rate=0.01,
subsample=0.8,
colsample_bytree=0.9,
silent=True)
clf.fit(trn_x, trn_y, eval_set=[(val_x, val_y)], early_stopping_rounds=25, verbose=25)
oof_clf_preds[val_] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]
print(roc_auc_score(val_y, oof_clf_preds[val_]))
sub_clf_preds += clf.predict_proba(test_df[train_features],
num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
roc_auc_score(y_clf, oof_clf_preds)
# Add PRedictions to data set
train_df['non_zero_proba'] = oof_clf_preds
test_df['non_zero_proba'] = sub_clf_preds
# Predict Revenues by folds.
for fold_, (trn_, val_) in enumerate(folds.split(train_y, train_y, groups=train_df['fullVisitorId'])):
if fold_ == 0:
trn_x, trn_y = train_df[train_features].iloc[trn_], train_y.iloc[trn_].fillna(0)
val_x, val_y = train_df[train_features].iloc[val_], train_y.iloc[val_].fillna(0)
print("Tuning LGBMRegressor")
space = space_lightgbm()
clf = LGBMRegressor(n_jobs=-1, random_state=56, objective='regression', verbose=-1)
model = tune_model(trn_x, trn_y, val_x, val_y, clf,
space=space, metric=rmse, n_calls=50, min_func=forest_minimize)
oof_reg_preds = np.zeros(train_df.shape[0])
sub_reg_preds = np.zeros(test_df.shape[0])
importances = pd.DataFrame()
for fold_, (trn_, val_) in enumerate(folds.split(train_y, train_y, groups=train_df['fullVisitorId'])):
trn_x, trn_y = train_df[train_features].iloc[trn_], train_y.iloc[trn_].fillna(0)
val_x, val_y = train_df[train_features].iloc[val_], train_y.iloc[val_].fillna(0)
reg = model
reg.fit(trn_x, trn_y, eval_set=[(val_x, val_y)], early_stopping_rounds=50, verbose=50)
imp_df = pd.DataFrame()
imp_df['feature'] = train_features
imp_df['gain'] = reg.booster_.feature_importance(importance_type='gain')
imp_df['fold'] = fold_ + 1
importances = | pd.concat([importances, imp_df], axis=0, sort=False) | pandas.concat |
# 导入类库
import os.path
import os
import datetime
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from gensim.models import Word2Vec
from pandas import read_csv
import re
import pandas as pd
import numpy as np
import itertools
import sys
def get_dict(file): #该函数可以获得蛋白质ID与其序列对应的字典或
if file=='df_molecule.csv':
fig_dict=pd.read_csv(file)[['Molecule_ID','Fingerprint']].T.to_dict('series')
elif file=='df_protein_train.csv' or file=='df_protein_test.csv' :
pro=open(file,'r').read().upper() #将蛋白质序列文件中的小写改为大写字母
pro_out=open(file,'w')
pro_out.write(pro)
pro_out.close()
fig_dict=pd.read_csv(file)[['PROTEIN_ID','SEQUENCE']].T.to_dict('series')
else:
print('文件格式错误')
sys.exit()
return fig_dict
def get_new_pro(id_pro, pramate_file): #该函数可以获得蛋白质序列进行数字化处理后的矩阵
pro_result={}
for key,valuex in id_pro.items():
value=list(valuex)[-1]
length=len(value)
pro_mol={'G':75.07,'A':89.09,'V':117.15,'L':131.17,'I':131.17,'F':165.19,'W':204.23,'Y':181.19,'D':133.10,'N':132.12,'E':147.13,'K':146.19,'Q':146.15,'M':149.21,'S':105.09,'T':119.12,'C':121.16,'P':115.13,'H':155.16,'R':174.20}
pramate_file_dict = pd.read_csv(pramate_file, index_col='aa').T.to_dict('series')
pro_n_8_maxitic=np.array([pramate_file_dict[value[0]],pramate_file_dict[value[1]]])
pro_line=np.array([pro_mol[value[0]],pro_mol[value[1]]])
for i in value[2:]:
pro_n_8_maxitic=np.row_stack((pro_n_8_maxitic,pramate_file_dict[i])) #得到n*属性 的计算矩阵
pro_line= np.append(pro_line,pro_mol[i])
Lag=list(np.dot(pro_line,pro_n_8_maxitic)/float(length))
Lag=[ str(i) for i in Lag ]
pro_result[str(key)] =str(key)+','+','.join(Lag)
return pro_result
def get_AC_figuer(file_fig_dict): #该函数可以获得分子指纹进行数字化处理后的矩阵
fig = []
for i in itertools.product('01', repeat=8):
fig.append(''.join(list(i)))
out={}
for k, vx in file_fig_dict.items():
fig_nu_dict = {}
v=''.join([ str(i) for i in list(vx)[1:] ]).replace(', ','')
s = 0
e = 8
for ii in range(len(v) - 7):
read = v[s:e]
if read in fig_nu_dict:
fig_nu_dict[read] = fig_nu_dict[read] + 1
else:
fig_nu_dict[read] = 1
s = s + 1
e = e + 1
fig_list=[]
for i in fig:
if i in fig_nu_dict:
fig_list.append(str(fig_nu_dict[i]))
else:
fig_list.append('0')
out[str(k)]=str(k)+','+','.join(fig_list)
return out
def merge_file(new_fig,new_pro,pro_mol_id_file,out_file): #该函数将蛋白质序列数字矩阵,分子指纹矩阵,小分子18个属性进行融合
df=pd.read_csv(pro_mol_id_file)
new_pro=pd.read_csv('new_pro.list',sep='\t')
new_fig=pd.read_csv('new_fig.list',sep='\t')
nu_18=pd.read_csv('df_molecule.csv')[['Molecule_ID','cyp_3a4','cyp_2c9','cyp_2d6','ames_toxicity','fathead_minnow_toxicity','tetrahymena_pyriformis_toxicity','honey_bee','cell_permeability','logP','renal_organic_cation_transporter','CLtotal','hia','biodegradation','Vdd','p_glycoprotein_inhibition','NOAEL','solubility','bbb']]
df['Protein_ID']=df['Protein_ID'].astype(int)
result=pd.merge(new_pro,df,on='Protein_ID')
result=pd.merge(new_fig, result, on='Molecule_ID')
result=pd.merge(nu_18, result, on='Molecule_ID')
del result['Molecule_ID']
del result['Protein_ID']
result.to_csv(out_file,header=True,index=False)
def pro_mol_result(df_protein,df_molecule,df_affinity,df_out): #该函数调用其它函数生成最后的分析矩阵
new_fig=pd.DataFrame([get_AC_figuer(get_dict(df_molecule))]).T[0].str.split(',', expand=True)
new_fig.columns = ['Molecule_ID'] + ['Molecule_%s'%i for i in range(256)]
new_fig.to_csv('new_fig.list',sep='\t',index=False)
new_pro=pd.DataFrame([get_new_pro(get_dict(df_protein),'aa2.csv')]).T[0].str.split(',', expand=True)
new_pro.columns = ['Protein_ID'] + ['Protein_%s'%i for i in range(14)]
new_pro.to_csv('new_pro.list',sep='\t',index=False)
merge_file(new_fig,new_pro,df_affinity,df_out)
os.remove('new_fig.list')
os.remove('new_pro.list')
"蛋白质参数14,单次最好成绩1.31"
def protein_14():
print('-------------------------------------蛋白质参数14,单次最好成绩1.31-----------------------------------------------------')
dataset = pd.read_csv('df_train.csv')
result = pd.read_csv('df_test.csv')
# 设置参数数量
NUM = 288
array = dataset.values
X = array[:, 0:NUM] # 总共356个参数需要导入进去
Y = array[:, NUM] # 第357列是ki值
validation_size = 0.2 # 数据八二分
seed = 7
# 分离线上线下测试数据
X_model, X_pred, Y_model, Y_pred = train_test_split(X, Y, test_size=validation_size,
random_state=seed) # 建立模型model 线下验证pred
print('这是线上分离自验证数据...')
print('模型集', X_model.shape)
print('线上验证集 ', X_pred.shape)
# 确认因变量值
print(Y)
train = lgb.Dataset(X_model, label=Y_model)
valid = train.create_valid(X_pred, label=Y_pred)
for i in range(5):
params = {
'boosting_type': 'gbdt',
'objective': 'rmse',
'metric': 'rmse',
'min_child_weight': 3,
'num_leaves': 20,
'lambda_l2': 10,
'subsample': 0.7,
'colsample_bytree': 0.7,
'learning_rate': 0.05,
'seed': 2017,
'nthread': 12,
'bagging_fraction': 0.7,
'bagging_freq': 100,
}
starttime = datetime.datetime.now()
num_round = 40000
gbm = lgb.train(params,
train,
num_round,
verbose_eval=500,
valid_sets=[train, valid],
early_stopping_rounds=200
)
endtime = datetime.datetime.now()
print((endtime - starttime))
nowTime = datetime.datetime.now().strftime('%m%d%H%M') # 现在
name = 'result/' + 'lgb_' + nowTime + '-' + str(num_round) + '.csv'
result = pd.read_csv('df_test.csv')
pred = gbm.predict(result.values[:, :NUM])
result = read_csv('dataset/df_affinity_test_toBePredicted.csv')
result['Ki'] = pred
result.to_csv(name, index=False)
print(result.head(10))
print('输出完成...')
"词向量处理方式,单次训练结果上交最好1.29"
def wordvec_way():
print('-------------------------------------词向量处理方式,单次训练结果上交最好1.29-----------------------------------------------------')
# 数据读取
df_protein_train = pd.read_csv('dataset/df_protein_train.csv') # 1653
df_protein_test = pd.read_csv('dataset/df_protein_test.csv') # 414
protein_concat = pd.concat([df_protein_train, df_protein_test])
df_molecule = pd.read_csv('dataset/df_molecule.csv') # 111216
df_affinity_train = pd.read_csv('dataset/df_affinity_train.csv') # 165084
df_affinity_test = pd.read_csv('dataset/df_affinity_test_toBePredicted.csv') # 41383
df_affinity_test['Ki'] = -11
data = pd.concat([df_affinity_train, df_affinity_test])
# 1、Fingerprint分子指纹处理展开
feat = []
for i in range(0, len(df_molecule)):
feat.append(df_molecule['Fingerprint'][i].split(','))
feat = pd.DataFrame(feat)
feat = feat.astype('int')
feat.columns = ["Fingerprint_{0}".format(i) for i in range(0, 167)]
feat["Molecule_ID"] = df_molecule['Molecule_ID']
data = data.merge(feat, on='Molecule_ID', how='left')
# 2、df_molecule其他特征处理
feat = df_molecule.drop('Fingerprint', axis=1)
data = data.merge(feat, on='Molecule_ID', how='left')
# 3、protein 蛋白质 词向量训练
n = 128
texts = [[word for word in re.findall(r'.{3}', document)]
for document in list(protein_concat['Sequence'])]
model = Word2Vec(texts, size=n, window=4, min_count=1, negative=3,
sg=1, sample=0.001, hs=1, workers=4)
vectors = pd.DataFrame([model[word] for word in (model.wv.vocab)])
vectors['Word'] = list(model.wv.vocab)
vectors.columns = ["vec_{0}".format(i) for i in range(0, n)] + ["Word"]
wide_vec = pd.DataFrame()
result1 = []
aa = list(protein_concat['Protein_ID'])
for i in range(len(texts)):
result2 = []
for w in range(len(texts[i])):
result2.append(aa[i])
result1.extend(result2)
wide_vec['Id'] = result1
result1 = []
for i in range(len(texts)):
result2 = []
for w in range(len(texts[i])):
result2.append(texts[i][w])
result1.extend(result2)
wide_vec['Word'] = result1
del result1, result2
wide_vec = wide_vec.merge(vectors, on='Word', how='left')
wide_vec = wide_vec.drop('Word', axis=1)
wide_vec.columns = ['Protein_ID'] + ["vec_{0}".format(i) for i in range(0, n)]
del vectors
name = ["vec_{0}".format(i) for i in range(0, n)]
feat = pd.DataFrame(wide_vec.groupby(['Protein_ID'])[name].agg('mean')).reset_index()
feat.columns = ["Protein_ID"] + ["mean_ci_{0}".format(i) for i in range(0, n)]
data = data.merge(feat, on='Protein_ID', how='left')
#################################### lgb ############################
train_feat = data[data['Ki'] > -11].fillna(0)
testt_feat = data[data['Ki'] <= -11].fillna(0)
label_x = train_feat['Ki']
label_y = testt_feat['Ki']
submission = testt_feat[['Protein_ID', 'Molecule_ID']]
len(testt_feat)
train_feat = train_feat.drop('Ki', axis=1)
testt_feat = testt_feat.drop('Ki', axis=1)
train_feat = train_feat.drop('Protein_ID', axis=1)
testt_feat = testt_feat.drop('Protein_ID', axis=1)
train_feat = train_feat.drop('Molecule_ID', axis=1)
testt_feat = testt_feat.drop('Molecule_ID', axis=1)
# lgb算法
train = lgb.Dataset(train_feat, label=label_x)
test = lgb.Dataset(testt_feat, label=label_y, reference=train)
for i in range(5):
params = {
'boosting_type': 'gbdt',
'objective': 'regression_l2',
'metric': 'l2',
'min_child_weight': 3,
'num_leaves': 2 ** 5,
'lambda_l2': 10,
'subsample': 0.7,
'colsample_bytree': 0.7,
'learning_rate': 0.05,
'seed': 1600,
'nthread': 12,
'bagging_fraction': 0.8,
'bagging_freq': 100,
}
num_round = 25000
gbm = lgb.train(params,
train,
num_round,
verbose_eval=200,
valid_sets=[train, test]
)
preds_sub = gbm.predict(testt_feat)
# 结果保存
nowTime = datetime.datetime.now().strftime('%m%d%H%M') # 现在
name = 'result/lgb_' + nowTime + '.csv'
submission['Ki'] = preds_sub
submission.to_csv(name, index=False)
"将两种模型求平均,单词最好1.25"
def result_analyze():
print('-------------------------------------将两种模型求平均,单词最好1.25-----------------------------------------------------')
file_dir = 'result'
for root, dirs, files in os.walk(file_dir):
print(root) # 当前目录路径
print(dirs) # 当前路径下所有子目录
print(files) # 当前路径下所有非目录子文件
sum = np.zeros(41383)
for i in files:
# os.path.splitext():分离文件名与扩展名
if os.path.splitext(i)[1] == '.csv':
print(i);
result = | pd.read_csv('result/' + i) | pandas.read_csv |
# Author: <NAME> and <NAME>
# Plant and Food Research New Zealand and UNSW Sydney
#!/usr/bin/env python3
#! module load pfr-python3/3.6.5
print('Here we go...')
import sys
import pyqrcode
import os, os.path # For PATH etc. https://docs.python.org/2/library/os.html
import sys # For handling command line args
from glob import glob # For Unix style finding pathnames matching a pattern (like regexp)
import cv2 # Image processing, OpenCV
import pyzbar.pyzbar as pyzbar # QR code processing
from pyzbar.pyzbar import ZBarSymbol
import numpy as np
from numpy import array
import datetime as dt
from datetime import datetime
import re # Regular expressions used for file type matching
import random # For making random characters
import string # For handling strings
import shutil # For file copying
from fpdf import FPDF
from PIL import Image # For image rotation
import math # For radians / degrees conversions
from input.image_manipulations import *
from input.functions import *
import skimage
from skimage import measure
import pandas as pd
from tqdm import tqdm_notebook as tqdm
import matplotlib.pyplot as plt
from input.image_manipulations import *
from input.functions import *
# GET ARGUMENTS -----------------------------------------------------------------------
args = sys.argv
print(str(args[1]))
# Set the maximum number of iterations
max_iter = 1000
if len(args) > 1:
try:
max_iter = int(args[1])
except:
print('max_iter argument not an integer.')
sys.exit()
# How often to save a CSV (number of images)
save_every = int(max_iter / 2)
if len(args) > 2:
try:
save_every = int(args[2])
except:
print('save_every argument not an integer.')
# SETUP FOLDERS -----------------------------------------------------------------------
if not os.path.exists('input'):
os.makedirs('input')
if not os.path.exists('output'):
os.makedirs('output')
run_datetime = datetime.now().strftime("%Y-%m-%d_%H-%M")
run_id = randomStringswithDigitsAndSymbols(1, 5)[0] # Necessary for simultaneous jobs
results_id = run_datetime + '_' + run_id
print(results_id)
file_output_path = os.path.join(os.sep,'output','projects','qr_metadata','run_results',('qr_eval_' + results_id))
print(file_output_path)
file_input_path = os.path.join('input','backgrounds')
if not os.path.exists(file_output_path):
os.makedirs(file_output_path)
if not os.path.exists(file_input_path):
os.makedirs(file_input_path)
# SETUP INPUT IMAGES -----
print(os.path.abspath(file_input_path))
input_files = [f for f in os.listdir(file_input_path) if
re.search(r'.*\.(jpg|png|bmp|dib|jpe|jpeg|jp2|tif|tiff)$', f)]
input_files = list(map(lambda x: os.path.join(file_input_path, x), input_files))
num_input_files = len(input_files)
if num_input_files < 1:
print("Warning: No image files in input directory: ", file_input_path)
exit(0)
# SETUP VARIABLES ------------------------------------------------------------------
start_time = dt.datetime.now()
max_image_dimension = 2000 # Maximum image dimension, if greater than this will be resized before processing (output image is also resized)
print(str(num_input_files), "background files in", file_input_path, "directory")
bg_file_list = input_files
code_centre = (200, 100) # Origin of the centre of the code (x, y) (x to the right, y down)
# ANALYSIS SETUP -------------------------------------------------------------------------------
results = pd.DataFrame()
image_count = 0
save_count = 1
max_images = int((max_iter - max_iter % len(bg_file_list)) / len(bg_file_list)) # Set maximum number of iterations per background image
# RUN ANALYSIS -------------------------------------------------------------------------------------
for i in range(max_images):
#Randomise variables
code_chars = np.random.randint(32,513)
code_content = randomStringswithDigitsAndSymbols(1, code_chars - 5)[0] + run_id
code_redundancy = np.random.choice(['L','M','H'])
code_pixels = np.random.randint(25,201)
rotation = np.random.randint(0,46)
perspective = np.random.uniform(0.5,1.0)
noise = np.random.uniform(0.00,0.10)
shadow = [0]
saturation = [0]
bright_value = np.random.randint(128,256)
dark_value = np.random.randint(0,256)
# blur = np.random.uniform(0.0,0.025)
compression = np.random.randint(2,101)
# Generate QR code
qr_array_binary = generate_QR_code(code_content, code_redundancy)
# Add salt and pepper noise as well as set dark and bright values
qr_array_255 = maw_add_sp_noise_and_set_dark_and_bright_values(
qr_array_binary, noise, dark_value, bright_value)
# Arbitrarily increase image size before further manipulations that may
# degrade its quality
qr_scale_factor = 30
qr_array_scaled_up = maw_scale_up_with_alpha(qr_array_255, qr_scale_factor)
# Add perspective effects
qr_warped = maw_warp_perspective(qr_array_scaled_up, perspective)
# Rotate QR code in image, handling background as alpha, so extra area can
# be pasted over a background
qr_array_rotated = maw_rotate_image(qr_warped, rotation)
# Resize the QR code to be the desired size
scale_factor = (code_pixels / len(qr_array_255)) / qr_scale_factor
qr_array_rescaled = cv2.resize(qr_array_rotated, None, fx=scale_factor,
fy=scale_factor, interpolation=cv2.INTER_AREA)
# # Blur QR code
# blur_kernel = int(qr_height * blur)
# print(qr_height)
# print(blur_kernel)
# if blur_kernel > 0:
# qr_array_blurred = cv2.blur(qr_array_rescaled,(blur_kernel,blur_kernel))
# else:
# qr_array_blurred = qr_array_rescaled
qr_code = qr_array_rescaled
# Write QR codes only
qr_code_id = ("CC-" + code_content[:5] + "_CH-" + str("{:d}".format(code_chars)) + "_CR-" + code_redundancy
+ "_CP-" + str("{:d}".format(code_pixels)) + "_CR-" + str("{:d}".format(rotation)) + "_P-" + str("{:3f}".format(perspective)) + "_N-" + str("{:3f}".format(noise))
+ "_BV-" + str("{:d}".format(bright_value)) + "_DV-" + str("{:d}".format(dark_value))# + "_BL-" + str("{:3f}".format(blur))
+ "_CO-" + str("{:d}".format(compression)))
qr_code_img = Image.fromarray(qr_code)
for bgfile in bg_file_list:
image_count += 1
# Save CSV whenever a certain number of images have been analysed
if save_count == save_every:
results.tail(save_every).to_csv(os.path.join(file_output_path,('temp_results_' + str(image_count) + '_' + results_id + ".csv")),index=False)
results = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).sum()
df['log_ret_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).sum()
df['log_ret_68w'] = | pd.Series(df['log_ret_1d']) | pandas.Series |
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.compat import (
pa_version_under2p0,
pa_version_under4p0,
)
from pandas.errors import PerformanceWarning
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])
def test_startswith_endswith_non_str_patterns(pattern):
# GH3485
ser = | Series(["foo", "bar"]) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, | pd.Timestamp('2012-01-01') | pandas.Timestamp |
from typing import Any
from typing import Dict
from typing import Optional
import pandas
import pytest
from evidently.model_profile.sections.classification_performance_profile_section import \
ClassificationPerformanceProfileSection
from .helpers import calculate_section_results
from .helpers import check_profile_section_result_common_part
from .helpers import check_section_without_calculation_results
def check_classification_performance_metrics_dict(metrics: Dict[str, Any]) -> None:
assert 'accuracy' in metrics
assert 'f1' in metrics
assert 'metrics_matrix' in metrics
assert 'precision' in metrics
assert 'recall' in metrics
assert 'metrics_matrix' in metrics
metrics_matrix = metrics['metrics_matrix']
assert isinstance(metrics_matrix, dict)
assert 'accuracy' in metrics_matrix
assert 'macro avg' in metrics_matrix
assert 'weighted avg' in metrics_matrix
confusion_matrix = metrics['confusion_matrix']
assert 'labels' in confusion_matrix
assert isinstance(confusion_matrix['labels'], list)
assert 'values' in confusion_matrix
assert isinstance(confusion_matrix['values'], list)
def test_no_calculation_results() -> None:
check_section_without_calculation_results(ClassificationPerformanceProfileSection, 'classification_performance')
@pytest.mark.parametrize(
'reference_data,current_data', (
(pandas.DataFrame({'target': [1, 1, 3, 3], 'prediction': [1, 2, 1, 4]}), None),
(
pandas.DataFrame({'target': [1, 2, 3, 4], 'prediction': [1, 2, 1, 4]}),
pandas.DataFrame({'target': [1, 1, 3, 3], 'prediction': [1, 2, 1, 4]}),
),
)
)
def test_profile_section_with_calculated_results(reference_data, current_data) -> None:
section_result = calculate_section_results(ClassificationPerformanceProfileSection, reference_data, current_data)
check_profile_section_result_common_part(section_result, 'classification_performance')
result_data = section_result['data']
# check metrics structure and types, ignore concrete metrics values
assert 'metrics' in result_data
metrics = result_data['metrics']
assert 'reference' in metrics
check_classification_performance_metrics_dict(metrics['reference'])
if current_data is not None:
assert 'current' in metrics
check_classification_performance_metrics_dict(metrics['current'])
@pytest.mark.parametrize(
'reference_data, current_data',
(
(
pandas.DataFrame({'target': [1, 2, 3, 4]}),
pandas.DataFrame({'target': [1, 1, 3, 3]}),
),
(
pandas.DataFrame({'prediction': [1, 2, 3, 4]}),
pandas.DataFrame({'prediction': [1, 1, 3, 3]}),
),
(
pandas.DataFrame({'other_data': [1, 2, 3, 4]}),
pandas.DataFrame({'other_data': [1, 1, 3, 3]}),
)
)
)
def test_profile_section_with_missed_target_and_prediction_columns(
reference_data: pandas.DataFrame, current_data: pandas.DataFrame
) -> None:
section_result = calculate_section_results(ClassificationPerformanceProfileSection, reference_data, current_data)
check_profile_section_result_common_part(section_result, 'classification_performance')
result_data = section_result['data']
assert 'metrics' in result_data
assert result_data['metrics'] == {}
@pytest.mark.parametrize(
'reference_data, current_data',
(
(None, None),
(None, | pandas.DataFrame({'target': [1, 1, 3, 3], 'prediction': [1, 2, 1, 4]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: abhijit
"""
#%% preamble
import numpy as np
import pandas as pd
from glob import glob
#%% Tidy data
filenames = glob('data/table*.csv')
filenames = sorted(filenames)
table1, table2, table3, table4a, table4b, table5 = [pd.read_csv(f) for f in filenames] # Use a list comprehension
#%%
pew = | pd.read_csv('data/pew.csv') | pandas.read_csv |
import json
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
class SEIssueRF:
def __init__(self, data):
self.data = data
def get_label_indices(self, labels):
label_set = [i for i in set(labels)]
label_set.sort()
label_idx = []
for i in labels:
label_idx.append(label_set.index(i))
return label_idx
def predict(self, test_embeddings, test_labels, cls, round_id):
label_set = [i for i in set(test_labels)]
label_set.sort()
test_labels_idx = []
for i in test_labels:
test_labels_idx.append(label_set.index(i))
predictions = cls.predict(test_embeddings)
predictions_proba = cls.predict_proba(test_embeddings)
f1 = f1_score(predictions, test_labels_idx, average='weighted')
conf_mtrx = self.data.confusion_matrix(test_labels_idx, predictions)
roc_auc = self.data.roc_auc(test_labels_idx, predictions_proba)
metrics = {"round_id": round_id, "f1": f1, "roc_auc": roc_auc, "conf_mtrx": conf_mtrx}
print(round_id)
print('F1 Score: {:.3f}, ROC AUC: {:.3f}, Confusion Matrix: '.format(f1, roc_auc))
print('%s' % str(conf_mtrx))
return predictions, metrics
def bert_fit_predict(self, round_id):
self.data.print_id("Random Forest BERT embedding Fitting")
bert_train_data = self.data.get_info_db("bert_embedding", "train")
train_embeddings = [json.loads(emb[1]) for emb in bert_train_data]
train_labels = [emb[2] for emb in bert_train_data]
train_labels_idx = self.get_label_indices(train_labels)
cls = RandomForestClassifier(n_estimators=10)
cls.fit(train_embeddings, train_labels_idx)
# self.data.print_id("Random Forest Prediction")
bert_test_data = self.data.get_info_db("bert_embedding", "test")
test_embeddings = [json.loads(emb[1]) for emb in bert_test_data]
test_labels = [emb[2] for emb in bert_test_data]
test_id = [emb[0] for emb in bert_test_data]
predictions, metrics = self.predict(test_embeddings, test_labels, cls, round_id)
self.to_db("RF_bert_prediction", test_id, predictions)
return test_id, predictions, test_labels, metrics
def bow_fit_predict(self, round_id):
column_trans = ColumnTransformer([('corpus_bow', TfidfVectorizer(), 'corpus')], remainder='passthrough')
train_data = self.data.get_info_db("processed_corpus", "train", False)
test_data = self.data.get_info_db("processed_corpus", "test", False)
fitting_data = [emb[1] for emb in train_data]
fitting_data.extend([emb[1] for emb in test_data])
x = | pd.DataFrame({"corpus": fitting_data}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from sys import argv
dates=("2020-04-01", "2020-04-08", "2020-04-15", "2020-04-22",
"2020-04-29" ,"2020-05-06", "2020-05-13","2020-05-20", "2020-05-27", "2020-06-03",
"2020-06-10", "2020-06-17", "2020-06-24", "2020-07-01", "2020-07-08",
"2020-07-15", "2020-07-22", "2020-07-29", "2020-08-05", "2020-08-12",
"2020-08-19", "2020-08-26", "2020-09-02", "2020-09-16", "2020-09-23",
"2020-09-30", "2020-10-07", "2020-10-14", "2020-10-21")
days_list=(
60, 67, 74, 81, 88, 95, 102, 109, 116, 123, 130,
137, 144, 151, 158, 165, 172,179,186,193,200,207,
214, #skip 221, data missing 2020-09-09
228,235, 242, 249,256,263)
df = pd.DataFrame()
for i,date in enumerate(dates):
states = ['NSW','QLD','SA','TAS','VIC','WA','ACT','NT']
n_sims = int(argv[1])
start_date = '2020-03-01'
days = days_list[i]
forecast_type = "R_L" #default None
forecast_date = date #format should be '%Y-%m-%d'
end_date = pd.to_datetime(start_date,format='%Y-%m-%d') + timedelta(days=days-1)
sims_dict={
'state': [],
'onset date':[],
}
for n in range(n_sims):
if n <2000:
sims_dict['sim'+str(n)] = []
print("forecast up to: {}".format(end_date))
date_col = [day.strftime('%Y-%m-%d') for day in pd.date_range(start_date,end_date)]
for i,state in enumerate(states):
df_results = pd.read_parquet("./results/"+state+start_date+"sim_"+forecast_type+str(n_sims)+"days_"+str(days)+".parquet",columns=date_col)
df_local = df_results.loc['total_inci_obs']
sims_dict['onset date'].extend(date_col)
sims_dict['state'].extend([state]*len(date_col))
n=0
print(state)
for index, row in df_local.iterrows():
if n==2000:
break
#if index>=2000:
# continue
#else:
if np.all(row.isna()):
continue
else:
sims_dict['sim'+str(n)].extend(row.values)
n +=1
print(n)
while n < 2000:
print("Resampling")
for index, row in df_local.iterrows():
if n==2000:
break
if np.all(row.isna()):
continue
else:
sims_dict['sim'+str(n)].extend(row.values)
n +=1
df_single = | pd.DataFrame.from_dict(sims_dict) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# <b>Python Scraping of Book Information</b>
# In[1]:
get_ipython().system('pip install bs4')
# In[2]:
get_ipython().system('pip install splinter')
# In[3]:
get_ipython().system('pip install webdriver_manager')
# In[1]:
# Setup splinter
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
import requests
# In[ ]:
# In[42]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# url = 'http://books.toscrape.com/'
# browser.visit(url)
# for x in range(50):
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# h3 = article.find("h3")
# link = h3.find("a")
# href = link["href"]
# title = link["title"]
# print("----------")
# print(title)
# url = "http://books.toscrape.com/" + href
# browser.visit(url)
# try:
# current_page = current_page + 1
# web_page_url = f"https://books.toscrape.com/catalogue/category/books_1/page-{current_page}.html"
# browser.visit(web_page_url)
# browser.links.find_by_partial_text("next").click()
# print('It worked')
# except:
# print("Scraping Complete")
# browser.quit()
# In[57]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# pageNumber= pageNumber + 1
# url = 'http://books.toscrape.com/'
# pageUrl = f'http://books.toscrape.com/catalogue/page-{pageNumber}.html'
# browser.visit(url)
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# for x in range(20):
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# h3 = article.find("h3")
# link = h3.find("a")
# href = link["href"]
# title = link["title"]
# print("----------")
# print(title)
# #time.sleep(1)
# url = "http://books.toscrape.com/" + href
# browser.visit(url)
# try:
# browser.visit(pageUrl)
# browser.links.find_by_partial_text("next").click()
# except:
# print("Scraping Complete")
# browser.quit()
# In[2]:
#Working through each book and page
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
for i in range(1,3):
if(i == 1):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
browser.quit()
# In[97]:
#Proof of concept using books.toscrape.com
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
for i in range(1,2):
if(i == 1):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
df = pd.read_html(str(table))[0]
print(df)
browser.quit()
# In[20]:
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
table_of_tables = []
for i in list(50,75,100):
table_on_page = []
if(i == 25):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
table_on_page.append(table)
# table_of_tables.append(table_on_page)
df = pd.read_html(str(table))[0]
print(df)
browser.quit()
# In[61]:
# In[48]:
df = pd.DataFrame(table_on_page)
df.to_csv('books2scrape.csv')
# In[52]:
df_to_clean=pd.read_csv('books2scrape.csv')
# In[64]:
df_columns_cleaned = df_to_clean.drop(columns=['Unnamed: 0','0','2','4','6','8','10','12','14'])
# In[71]:
df_columns_cleaned.columns
# In[66]:
df_columns_cleaned.head()
# In[78]:
html_chars = ["<tr>","\n","</th>","<th>","<td>","</td>",
"</tr>"]
for char in html_chars:
df_columns_cleaned['1'] = df_columns_cleaned['1'].str.replace(char, ' ')
df_columns_cleaned['3'] = df_columns_cleaned['3'].str.replace(char, ' ')
df_columns_cleaned['5'] = df_columns_cleaned['5'].str.replace(char, ' ')
df_columns_cleaned['7'] = df_columns_cleaned['7'].str.replace(char, ' ')
df_columns_cleaned['9'] = df_columns_cleaned['9'].str.replace(char, ' ')
df_columns_cleaned['11'] = df_columns_cleaned['11'].str.replace(char, ' ')
df_columns_cleaned['13'] = df_columns_cleaned['13'].str.replace(char, ' ')
# In[79]:
df_columns_cleaned
# In[290]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# pageUrl=""
# table_of_tables = []
# for i in range(1):
# table_on_page = []
# pageUrl = f'ttps://www.hpb.com/books/best-sellers/784-classics?&size=350&&&'
# print(pageUrl)
# browser.visit(pageUrl)
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# time.sleep(randint(1,3))
# section = article.find("section")
# link = section.find("a")
# href = link["href"]
# print(href)
# title = link["title"]
# print("----------")
# print(title)
# time.sleep(randint(1,3))
# url = href
# browser.visit(url)
# res=requests.get(url)
# time.sleep(randint(3,5))
# soup = BeautifulSoup(res.content,'lxml')
# table = soup.find_all('table')[0]
# table_on_page.append(table)
# # table_of_tables.append(table_on_page)
# df = pd.read_html(str(table))[0]
# print(df)
# browser.quit()
# In[198]:
#https://stackoverflow.com/questions/31064981/python3-error-initial-value-must-be-str-or-none-with-stringio
import io
# In[267]:
#grab data from https://citylights.com/greek-roman/
import random
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
table_of_data = []
pageUrl=""
for i in range(1,7):
data_on_page = []
if(i == 1):
pageUrl = f"https://citylights.com/greek-roman/"
else:
pageUrl = f'https://citylights.com/greek-roman/page/{i}/'
print(pageUrl)
time.sleep(1)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#https://stackoverflow.com/questions/52842778/find-partial-class-names-in-spans-with-beautiful-soup
articles = soup.find_all('li', attrs={'class': lambda e: e.startswith('product type-product post') if e else False})
for article in articles:
time.sleep(1)
link = article.find('a')
href = link["href"]
print("----------")
print(href)
url = href
browser.visit(url)
time.sleep(randint(1,2))
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
data = soup.find_all('div', attrs={'class': 'detail-text mb-50'})[0].get_text()
data_on_page.append(data)
table_of_data.append(data_on_page)
df = pd.DataFrame(table_of_data)[0]
print(data)
browser.quit()
# In[268]:
df.to_csv('greek-roman.csv')
# In[269]:
df_greek_roman_to_clean=pd.read_csv('greek-roman.csv')
df_greek_roman_to_clean_columns = df_greek_roman_to_clean.drop(columns=['Unnamed: 0'])
# In[270]:
df_greek_roman_to_clean_columns
# In[271]:
df_greek_roman_to_clean_columns_split = df_greek_roman_to_clean_columns['0'].str.split("\n\t")
# In[272]:
df_greek_roman = df_greek_roman_to_clean_columns_split.to_list()
column_names = ['0','ISBN-10','ISBN-13','Publisher','Publish Date', 'Dimensions']
new_greek_roman_df = pd.DataFrame(df_greek_roman,columns=column_names)
# In[273]:
clean_greek_roman_df=new_greek_roman_df.drop(columns=['0','Dimensions'])
# In[274]:
clean_greek_roman_df.head()
# In[275]:
html_chars = ["<tr>","\n","</th>","<th>","<td>","</td>",
"</tr>",'\t']
for char in html_chars:
clean_greek_roman_df['ISBN-10'] = clean_greek_roman_df['ISBN-10'].str.replace(char, ' ')
clean_greek_roman_df['ISBN-13'] = clean_greek_roman_df['ISBN-13'].str.replace(char, ' ')
clean_greek_roman_df['Publisher'] = clean_greek_roman_df['Publisher'].str.replace(char, ' ')
clean_greek_roman_df['Publish Date'] = clean_greek_roman_df['Publish Date'].str.replace(char, ' ')
# In[276]:
pd.set_option("max_colwidth", 1000)
clean_greek_roman_df.head()
# In[277]:
clean_greek_roman_df.to_csv('greek-roman-clean.csv')
# In[ ]:
# In[279]:
# grab data from https://citylights.com/asian/
import random
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
table_of_data = []
pageUrl=""
for i in range(1,5):
data_on_page = []
if(i == 1):
pageUrl = f"https://citylights.com/asian/"
else:
pageUrl = f'https://citylights.com/asian/page/{i}/'
print(pageUrl)
time.sleep(1)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#https://stackoverflow.com/questions/52842778/find-partial-class-names-in-spans-with-beautiful-soup
articles = soup.find_all('li', attrs={'class': lambda e: e.startswith('product type-product post') if e else False})
for article in articles:
time.sleep(randint(1,2))
link = article.find('a')
href = link["href"]
print("----------")
print(href)
url = href
browser.visit(url)
time.sleep(1)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
data = soup.find_all('div', attrs={'class': 'detail-text mb-50'})[0].get_text()
data_on_page.append(data)
table_of_data.append(data_on_page)
df = pd.DataFrame(data_on_page)[0]
print(data)
browser.quit()
# In[280]:
df.to_csv('asian.csv')
# In[281]:
df_asian_classics_to_clean=pd.read_csv('asian.csv')
df_asian_classics_to_clean_columns = df_asian_classics_to_clean.drop(columns=['Unnamed: 0'])
# In[282]:
df_asian_classics_to_clean_columns
# In[283]:
df_asian_classics_to_clean_columns_split = df_asian_classics_to_clean_columns['0'].str.split("\n\t")
# In[284]:
df_asian_classics = df_asian_classics_to_clean_columns_split.to_list()
column_names = ['0','ISBN-10','ISBN-13','Publisher','Publish Date', 'Dimensions']
new_asian_classics_df = pd.DataFrame(df_asian_classics,columns=column_names)
# In[285]:
clean_asian_classics_df=new_asian_classics_df.drop(columns=['0','Dimensions'])
# In[286]:
clean_asian_classics_df.head()
# In[287]:
html_chars = ["<tr>","\n","</th>","<th>","<td>","</td>",
"</tr>",'\t']
for char in html_chars:
clean_asian_classics_df['ISBN-10'] = clean_asian_classics_df['ISBN-10'].str.replace(char, ' ')
clean_asian_classics_df['ISBN-13'] = clean_asian_classics_df['ISBN-13'].str.replace(char, ' ')
clean_asian_classics_df['Publisher'] = clean_asian_classics_df['Publisher'].str.replace(char, ' ')
clean_asian_classics_df['Publish Date'] = clean_asian_classics_df['Publish Date'].str.replace(char, ' ')
# In[288]:
pd.set_option("max_colwidth", 1000)
clean_asian_classics_df.head()
# In[ ]:
# In[ ]:
# In[ ]:
# In[391]:
greek_roman_clean_for_combine_df = | pd.read_csv('greek-roman-clean.csv') | pandas.read_csv |
"""MVTecAd Dataset."""
# default packages
import dataclasses as dc
import enum
import logging
import pathlib
import shutil
import sys
import tarfile
import typing as t
import urllib.request as request
# third party packages
import pandas as pd
# my packages
import src.data.dataset as ds
import src.data.utils as ut
# logger
_logger = logging.getLogger(__name__)
class Kind(enum.Enum):
HAZELNUT = "hazelnut"
@classmethod
def value_of(cls, name: str) -> "Kind":
"""設定値の文字列から Enum 値を返す.
Raises:
ValueError: 指定した文字列が設定値にない場合
Returns:
[type]: Enum の値
"""
for e in Kind:
if e.value == name:
return e
raise ValueError(f"invalid value: {name}")
class MVTecAd(ds.Dataset):
def __init__(self, kind: Kind) -> None:
super().__init__()
archive, datadir = _get_archive_file_name(kind)
self.archive_file = self.path.joinpath(archive)
self.datadir = self.path.joinpath(datadir)
self.train_list = self.path.joinpath(f"{datadir}_train.csv")
self.valid_list = self.path.joinpath(f"{datadir}_valid.csv")
self.test_list = self.path.joinpath(f"{datadir}_test.csv")
def save_dataset(self, reprocess: bool) -> None:
if reprocess:
_logger.info("=== reporcess mode. delete existing data.")
self.archive_file.unlink()
shutil.rmtree(self.datadir)
self.train_list.unlink()
self.valid_list.unlink()
self.test_list.unlink()
self.path.mkdir(exist_ok=True)
if not self.datadir.exists():
if not self.archive_file.exists():
_logger.info("=== download zip file.")
_download(self.archive_file)
_logger.info("=== extract all.")
with tarfile.open(self.archive_file, "r") as tar:
tar.extractall(self.path)
if not self.train_list.exists() and not self.valid_list.exists():
_logger.info("=== create train and valid file list.")
filelist = sorted(
[p.relative_to(self.path) for p in self.datadir.glob("train/**/*.png")]
)
train_ratio = 0.8
train_num = int(len(filelist) * train_ratio)
if not self.train_list.exists():
train_list = pd.DataFrame({"filepath": filelist[:train_num]})
train_list.to_csv(self.train_list, index=False)
if not self.valid_list.exists():
valid_list = pd.DataFrame({"filepath": filelist[train_num:]})
valid_list.to_csv(self.valid_list, index=False)
if not self.test_list.exists():
_logger.info("=== create test file list.")
filelist = sorted(
[p.relative_to(self.path) for p in self.datadir.glob("test/**/*.png")]
)
test_list = pd.DataFrame({"filepath": filelist})
test_list.to_csv(self.test_list, index=False)
def load_dataset(self) -> None:
self.train = | pd.read_csv(self.train_list) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:10:58 2017
@author: tkc
"""
import os
import pandas as pd
from tkinter import filedialog
AESQUANTPARAMFILE='C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv'
class AESspectrum():
''' Single instance of AES spectra file created from row of spelist (child of AESdataset)
load file from AESdataset (pd dataframe row)
#TODO add direct file load? '''
def __init__(self, AESdataset, rowindex):
# can be opened with AESdataset parent and associated row from
# open files from directory arg
self.AESdataset=AESdataset
self.path=self.AESdataset.path # same path as AESdataset parent
# load params from spelist only (not AESlog which has images)
row=AESdataset.spelist.iloc[rowindex]
self.filename=row.Filename
self.sample=str(row.Sample)
self.numareas=int(row.Areas)
self.evbreaks=row.Evbreaks # TODO data type?
self.spectype = row.Type.lower() # multiplex or survey
self.AESdf = None # entire AES dataframe (all areas)
self.energy = None # same for all cols
self.open_csvfile()
self.aesquantparams = None
self.loadAESquantparams()
# load peaks, shifts, ampls, widths
self.smdifpeakinfo=None # dataframe w/ smdiff peak info
self.get_peaks() # get quant info from smdifpeakslog
self.integpeakinfo=None # dataframe w/ smdiff peak info
self.get_integ_peaks() # get quant info from smdifpeakslog
self.elems_integ = None #
print('Auger file', self.filename, 'loaded.')
def open_csvfile(self):
''' Read Auger spectral file '''
self.AESdf=pd.read_csv(self.filename.replace('.spe','.csv'))
self.colset=self.AESdf.columns # Counts1, Counts2, S7D71, S7D72, etc.
self.energy=self.AESdf['Energy']
print('AESfile ', self.filename,' loaded.')
def loadAESquantparams(self):
''' Loads standard values of Auger quant parameters
TODO what about dealing with local shifts '''
# Checkbutton option for local (or standard) AESquantparams in file loader?
print('AESquantparams loaded')
self.aesquantparams=pd.read_csv(AESQUANTPARAMFILE, encoding='utf-8')
def get_peaks(self):
''' Finds element quant already performed from smdifflog (within AESdataset)
needed for plots: negpeak, pospeak (both indirect calc from shift, peakwidth)
negint, posint (but usually indirectly
ideal positions for peaks already loaded in AESdataset
smdifpeakinfo contains: 0 peak, 1 negpeak energy, 2 pospeak energy, 3) negint
4) posint 5) ampl, 6) adjampl -- important stuff for graphical display
of quant results
returns dataframe for this filename
'''
mycols=['Areanumber', 'Peakenergy', 'Peakindex', 'PeakID', 'Shift',
'Negintensity', 'Posintensity', 'Pospeak', 'Amplitude', 'Peakwidth','Adjamp']
self.smdifpeakinfo=self.AESdataset.Smdifpeakslog[ (self.AESdataset.Smdifpeakslog['Filename']==self.filename)]
self.smdifpeakinfo=self.smdifpeakinfo[mycols]
def get_integ_peaks(self):
''' Pull existing quant results from integ log file (if present) '''
mycols=['Areanumber', 'Element', 'Integcounts', 'Backcounts',
'Significance', 'Adjcnts','Erradjcnts']
self.integpeakinfo=self.AESdataset.Integquantlog[ (self.AESdataset.Integquantlog['Filename']==self.filename)]
self.integpeakinfo=self.integpeakinfo[mycols]
def savecsv():
''' Save any changes to underlying csv file '''
class AESdataset():
''' loads all dataframes with Auger parameters from current project folder '''
def __init__(self, *args, **kwargs):
self.path = filedialog.askdirectory()
# open files
self.AESlog=None
self.spelist=None
self.Smdifpeakslog=None
self.Integquantlog=None
self.Backfitlog=None
self.open_main_files() # loads above
# self.filelist=np.ndarray.tolist(self.AESlog.Filenumber.unique())
self.numfiles=len(self.AESlog)
print(str(self.numfiles),' loaded from AESdataset.')
self.peaks=None
self.peakdata=None
self.get_peakinfo() # load needed Auger peak params (Peaks and Peakdata)
def get_peakinfo(self):
''' takes element strings and energies of background regs and returns tuple for each elem symbol containing all params necessary to find each Auger peak from given spe file
also returns 2-tuple with energy val and index of chosen background regions
'''
# elemental lines (incl Fe2, Fe1, etc.)
self.peaks=self.Smdifpeakslog.PeakID.unique()
self.peakdata=[]
for peak in self.peaks:
try:
# find row in AESquantparams for this element
thispeakdata=self.AESquantparams[(self.AESquantparams['element']==peak)]
thispeakdata=thispeakdata.squeeze() # series with this elements params
# return list of length numelements with 5-tuple for each containing
# 1) peak symbol, 2) ideal negpeak (eV) 3) ideal pospeak (in eV)
# 4)sensitivity kfactor.. and 5) error in kfactor
peaktuple=(peak, thispeakdata.negpeak, thispeakdata.pospeak,
thispeakdata.kfactor, thispeakdata.errkf1) # add tuple with info for this element
self.peakdata.append(peaktuple)
except:
print('AESquantparams not found for ', peak)
print('Found', len(self.peakdata), 'quant peaks in smdifpeakslog' )
def open_main_files(self):
''' Auto loads Auger param files from working directory including
AESparalog- assorted params associated w/ each SEM-AES or TEM-AES emsa file
Backfitparamslog - ranges and parameters for AES background fits
Integquantlog - subtracted and corrected counts for chosen elements
Peakfitlog - params of gaussian fits to each element (xc, width, peakarea, Y0, rsquared)'''
if os.path.exists('Augerparamlog.csv'):
self.AESlog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
self.spelist=self.AESlog[pd.notnull(self.AESlog['Areas'])]
else:
self.AESlog=pd.DataFrame()
self.spelist=pd.DataFrame()
if os.path.exists('Smdifpeakslog.csv'):
self.Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
else:
self.Smdifpeakslog=pd.DataFrame()
if os.path.exists('Backfitlog.csv'):
self.Backfitlog= | pd.read_csv('Backfitlog.csv', encoding='cp437') | pandas.read_csv |
import pandas as pd
import datetime
import numpy as np
class get_result(object):
def __init__(self, data, material, start_time):
self.data = data
self.material = material
self.start_time = start_time
self.freq = 0.5 # 单位:小时
ele_struct = pd.read_excel("尖峰平谷电费结构.xlsx", index_col=[0])
self.top_time = ele_struct.loc[ele_struct['区段'] == '尖'].index.to_list()
self.peak_time = ele_struct.loc[ele_struct['区段'] == '峰'].index.to_list()
self.flat_time = ele_struct.loc[ele_struct['区段'] == '平'].index.to_list()
self.bot_time = ele_struct.loc[ele_struct['区段'] == '谷'].index.to_list()
self.top = ele_struct.loc[ele_struct['区段'] == '尖']['单价'].dropna().unique()[0]
self.peak = ele_struct.loc[ele_struct['区段'] == '峰']['单价'].dropna().unique()[0]
self.flat = ele_struct.loc[ele_struct['区段'] == '平']['单价'].dropna().unique()[0]
self.bot = ele_struct.loc[ele_struct['区段'] == '谷']['单价'].dropna().unique()[0]
def get_price(self, data): # 从1min精度的电能数据得出总电费
ygdn = [data.total_ygdn.min()] + data.resample("H").max().total_ygdn.to_list()
start = (data.index.min() - datetime.timedelta(hours=1)).strftime("%Y-%m-%d %H")
end = data.index.max().strftime("%Y-%m-%d %H")
idx = pd.date_range(start, end, freq="H")
ele = | pd.DataFrame(index=idx, columns=['ele'], data=ygdn) | pandas.DataFrame |
import re
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert_isinstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
assert_isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container): container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
self.assert_numpy_array_equal(original, vals)
self.assertIsNot(original, vals)
vals[0] = n
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
self.float_index = tm.makeFloatIndex(10)
self.dt_index = tm.makeDateIndex(10)
self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10)
self.string_index = tm.makeStringIndex(10)
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index)
self.float_series = Series(arr, index=self.int_index)
self.dt_series = Series(arr, index=self.dt_index)
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index)
self.string_series = Series(arr, index=self.string_index)
types = ['int','float','dt', 'dt_tz', 'period','string']
self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in types for f in ['index','series'] ]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index,op),index=o.index)
else:
expected = getattr(o,op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o,op)
# these couuld be series, arrays or scalars
if isinstance(result,Series) and isinstance(expected,Series):
tm.assert_series_equal(result,expected)
elif isinstance(result,Index) and isinstance(expected,Index):
tm.assert_index_equal(result,expected)
elif isinstance(result,np.ndarray) and isinstance(expected,np.ndarray):
self.assert_numpy_array_equal(result,expected)
else:
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not defined
# we mostly care about Series hwere anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError, otherwise
# an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
self.assertRaises(TypeError, lambda : getattr(o,op))
else:
self.assertRaises(AttributeError, lambda : getattr(o,op))
class TestIndexOps(Ops):
def setUp(self):
super(TestIndexOps, self).setUp()
self.is_valid_objs = [ o for o in self.objs if o._allow_index_ops ]
self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ]
def test_ops(self):
tm._skip_if_not_numpy17_friendly()
for op in ['max','min']:
for o in self.objs:
result = getattr(o,op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(ordinal=getattr(o.values, op)(), freq=o.freq)
try:
self.assertEqual(result, expected)
except ValueError:
# comparing tz-aware series with np.array results in ValueError
expected = expected.astype('M8[ns]').astype('int64')
self.assertEqual(result.value, expected)
def test_nanops(self):
# GH 7261
for op in ['max','min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
values = o.values
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
expected_s = Series(range(10, 0, -1), index=values[::-1], dtype='int64')
tm.assert_series_equal(o.value_counts(), expected_s)
if isinstance(o, DatetimeIndex):
# DatetimeIndex.unique returns DatetimeIndex
self.assertTrue(o.unique().equals(klass(values)))
else:
self.assert_numpy_array_equal(o.unique(), values)
self.assertEqual(o.nunique(), len(np.unique(o.values)))
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values
if o.values.dtype == 'int64':
# skips int64 because it doesn't allow to include nan or None
continue
if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7:
# Unable to assign None
continue
# special assign to the numpy array
if o.values.dtype == 'datetime64[ns]':
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
if isinstance(o, DatetimeIndex):
expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1])
else:
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64')
tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
tm.assert_series_equal(o.value_counts(), expected_s)
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
self.assert_numpy_array_equal(result[1:], values[2:])
if isinstance(o, DatetimeIndex):
self.assertTrue(result[0] is pd.NaT)
else:
self.assertTrue(pd.isnull(result[0]))
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is platform-dep
hist = s.value_counts(sort=False)
hist.sort()
expected = Series([3, 1, 4, 2], index=list('acbd'))
expected.sort()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
| tm.assert_series_equal(hist, expected) | pandas.util.testing.assert_series_equal |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
months = ['all','january','february','march','april','may','june','july','august','september','october','december']
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# Below while loops take user input, and check against valid entries. if in valid, will ask the user the enter another
# value. converts user input to lower to ensure interoperability with rest of the code.
# CITY Input
while True:
city = input("\nPlease select a city to view its data. Options include Chicago, New York City, and Washington\n").lower()
# is city in dict city data?
if city in CITY_DATA:
# echos user entry if valid and continues
print("You entered {}. Lets continue...".format(city.title()))
break
else:
# informs user their entry is invalid and asks for new input
print("\nYou did not enter a valid city. Please try again")
# Month Input
while True:
month = input("\nPlease select a month to view its data, or state all to view everything\n").lower()
if month in months:
# echos user entry if valid and continues
print("You entered {}. Lets continue...".format(month.title()))
break
else:
# informs user their entry is invalid and asks for new input
print("\nYou did not enter a valid option. Please try again")
# establish valid entries for day input
days = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday','all']
# day input loop
while True:
day = input("\nPlease select a day to view its data, or state all to view everything\n").lower()
if day in days:
# echos user entry if valid and continues
print("You entered {}. Lets continue...".format(day.title()))
break
else:
# informs user their entry is invalid and asks for new input
print("\nYou did not enter a valid option. Please try again")
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = | pd.read_csv(CITY_DATA[city]) | pandas.read_csv |
import pandas as pd
import networkx as nx
import logging
import math
import numpy as np
from statsmodels.stats.outliers_influence import variance_inflation_factor
def get_vif(df: pd.DataFrame, threshold: float = 5.0):
"""
Calculates the variance inflation factor (VIF) for each feature column. A VIF
value greater than a specific threshold (default: 5.0) can be considered problematic since
this column is likely correlated with other ones, i.e., we cannot properly
infer the effect of this column.
"""
vif_data = | pd.DataFrame() | pandas.DataFrame |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from decimal import Decimal
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.testing.utils import ReusedSQLTestCase
class ReshapeTest(ReusedSQLTestCase):
def test_get_dummies(self):
for data in [pd.Series([1, 1, 1, 2, 2, 1, 3, 4]),
# pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category'),
# pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4], categories=[4, 3, 2, 1])),
pd.DataFrame({'a': [1, 2, 3, 4, 4, 3, 2, 1],
# 'b': pd.Categorical(list('abcdabcd')),
'b': list('abcdabcd')})]:
exp = pd.get_dummies(data)
ddata = koalas.from_pandas(data)
res = koalas.get_dummies(ddata)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_object(self):
df = pd.DataFrame({'a': [1, 2, 3, 4, 4, 3, 2, 1],
# 'a': pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),
'b': list('abcdabcd'),
# 'c': pd.Categorical(list('abcdabcd')),
'c': list('abcdabcd')})
ddf = koalas.from_pandas(df)
# Explicitly exclude object columns
exp = pd.get_dummies(df, columns=['a', 'c'])
res = koalas.get_dummies(ddf, columns=['a', 'c'])
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.b)
res = koalas.get_dummies(ddf.b)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df, columns=['b'])
res = koalas.get_dummies(ddf, columns=['b'])
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_date_datetime(self):
df = pd.DataFrame({'d': [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
datetime.date(2019, 1, 1)],
'dt': [datetime.datetime(2019, 1, 1, 0, 0, 0),
datetime.datetime(2019, 1, 1, 0, 0, 1),
datetime.datetime(2019, 1, 1, 0, 0, 0)]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.d)
res = koalas.get_dummies(ddf.d)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.dt)
res = koalas.get_dummies(ddf.dt)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_boolean(self):
df = pd.DataFrame({'b': [True, False, True]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.b)
res = koalas.get_dummies(ddf.b)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_decimal(self):
df = pd.DataFrame({'d': [Decimal(1.0), Decimal(2.0), Decimal(1)]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = | pd.get_dummies(df.d) | pandas.get_dummies |
"""
This module allows to collect experimental variables from fits
to data that can then be used as input to simulations
"""
# Author: <NAME>, <NAME>, 2019
# License: MIT License
import numpy as np
import pandas as pd
import scipy.optimize
import scipy.stats
import colicycle.time_mat_operations as tmo
import colicycle.tools_GW as tgw
def load_data(file_to_load, size_scale = 0.065, period=None):
"""Loads a cell cycle dataframe and completes some information
Parameters
----------
file_to_load : str
path to file to load
size_scale : float
pixel to nm scaling
period : int
period of dataframe to keep
Returns
-------
colidata : Pandas dataframe
cell cycle dataframe
"""
colidata = | pd.read_pickle(file_to_load) | pandas.read_pickle |
"""
library for simulating semi-analytic mock maps of CMB secondary anisotropies
"""
__author__ = ["<NAME>", "<NAME>"]
__email__ = ["<EMAIL>", "<EMAIL>"]
import os
import warnings
from sys import getsizeof
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from warnings import warn
import inspect
from itertools import product
import operator
import re
from functools import partial
from tqdm.auto import tqdm
from astropaint.lib.log import CMBAlreadyAdded, NoiseAlreadyAdded
try:
import healpy as hp
except ModuleNotFoundError:
warn("Healpy is not installed. You cannot use the full sky canvas without it.")
from astropy.coordinates import cartesian_to_spherical
from .lib import transform, utils
# find the package path; same as __path__
path_dir = os.path.dirname(os.path.abspath(__file__))
#########################################################
# Halo Catalog Object
#########################################################
class Catalog:
"""halo catalog containing halo masses, locations, velocities, and redshifts
Units
-----
x, y, z: [Mpc]
v_x, v_y, v_z: [km/s]
M_200c: [M_sun]
"""
def __init__(self,
data=None,
calculate_redshifts=False,
default_redshift=0,
):
"""
Parameters
----------
data: dataframe or str
Input data can be either a pandas dataframe or any table with the
following columns:
["x", "y", "z", "v_x", "v_y", "M_200c"]
Alternatively data can be set to a string indicating the name of
a halo catalog to be loaded. There are various options for the input
string:
"random box" and "random shell" (case insensitive) respectively call
.generate_random_box() and .generate_random_shell() methods with the
default arguments.
"test" generates 6 test halos in the positive and negative x, y, z
directions. This is useful for testing and building prototypes.
Any other string will be looked up as the name of a csv file under
astropaint/data/
e.g. "websky", "MICE", or "Sehgal"
calculate_redshifts: bool
if True, redshifts of objects will be calculated from the comoving
distance according to the latest Planck cosmology (astropy.cosmo.Planck18_arXiv_v2)
This can be numerically expensive for large catalogs so if your
catalog already comes with redshifts, set this to False to save time.
default_redshift: float
If calculate_redshift is set to False, this value will be used as the
default redshift for all the halos.
"""
#TODO: define attribute dictionary with __slots__
self._build_counter = 0
self.calculate_redshifts = calculate_redshifts
# if calculate_redshifts==False, assume this redshift for everything
self.default_redshift = default_redshift
# if no input is provided generate a random catalog
if data is None:
self.data = self._initialize_catalog(1)
#self.generate_random_box()
elif isinstance(data, str):
if re.match(".*random.*box", data, re.IGNORECASE):
self.generate_random_box()
elif re.match(".*random.*shell", data, re.IGNORECASE):
self.generate_random_shell()
elif re.match(".*test.*", data, re.IGNORECASE):
self.generate_test_box(configuration=["all"])
else:
self.load_from_csv(data)
else:
#FIXME: check data type and columns
self.data = data
# .................
# octant signatures
# .................
# (x,y,z) signatures for each octant e.g. (+,+,+) , (+,+,-) etc.
self.octant_signature = self._get_octant_signatures(mode="user")
# same thing but for use in calculations
self._octant_shift_signature = self._get_octant_signatures(mode="shift")
self._octant_mirror_signature = self._get_octant_signatures(mode="mirror")
self._octant_rotate_signature = self._get_octant_signatures(mode="rotate")
# TODO: check input type/columns/etc
# ------------------------
# properties
# ------------------------
@property
def data(self):
return self._data
@data.setter
def data(self, val):
self._data = val
self._data = pd.DataFrame(self.data).reset_index(drop=True)
self.size = len(self._data)
self.box_size = self._get_box_size()
if self._build_counter>0:
print("Catalog data has been modified...\n")
# build the complete data frame
# e.g. angular distances, radii, etc.
self.build_dataframe(calculate_redshifts=self.calculate_redshifts,
default_redshift=self.default_redshift)
# ------------------------
# sample data
# ------------------------
#TODO: support inputs other than csv
def load_from_csv(self, sample_name="MICE"):
"""load sample data using the name of dataset"""
if not sample_name.endswith(".csv"):
sample_name += ".csv"
fname = os.path.join(path_dir, "data", f"{sample_name}")
print(f"Catalog loaded from:\n{fname}")
self.data = pd.read_csv(fname, index_col=0)
def save_to_csv(self, sample_name):
"""load sample data using the name of dataset"""
if not sample_name.endswith(".csv"):
sample_name += ".csv"
fname = os.path.join(path_dir, "data", f"{sample_name}")
self.data.to_csv(fname)
print(f"Catalog saved to:\n{fname}")
def generate_random_box(self,
box_size=50,
v_max=100,
mass_min=1E14,
mass_max=1E15,
n_tot=50000,
put_on_shell=False,
inplace=True,
):
catalog = self._initialize_catalog(n_tot)
print("generating random catalog...\n")
# generate random positions
x, y, z = np.random.uniform(low=-box_size/2,
high=box_size/2,
size=(3, n_tot))
if put_on_shell:
(x, y, z) = box_size * np.true_divide((x, y, z), np.linalg.norm((x, y, z), axis=0))
catalog["x"], catalog["y"], catalog["z"] = x, y, z
# generate random velocities
v_x, v_y, v_z = np.random.uniform(low=-v_max,
high=v_max,
size=(3, n_tot))
catalog["v_x"], catalog["v_y"], catalog["v_z"] = v_x, v_y, v_z
# generate random log uniform masses
catalog["M_200c"] = np.exp(np.random.uniform(low=np.log(mass_min),
high=np.log(mass_max),
size=n_tot))
if inplace:
self.data = pd.DataFrame(catalog)
else:
return | pd.DataFrame(catalog) | pandas.DataFrame |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
import numpy as np
import csv
import re
import pickle
import time
from datetime import timedelta
import pandas as pd
from pathlib import Path
import sys
sys.path.insert(0,'/nfs/ghome/live/yashm/Desktop/research/personality/utils')
import utils.gen_utils as utils
inp_dir, dataset, lr, batch_size, epochs, log_expdata, embed, layer, mode, embed_mode, jobid = utils.parse_args()
# embed_mode {mean, cls}
# mode {512_head, 512_tail, 256_head_tail}
network = 'LR'
print('{} : {} : {} : {} : {}'.format(dataset, embed, layer, mode, embed_mode))
n_classes = 2
seed = jobid
np.random.seed(seed)
tf.random.set_seed(seed)
start = time.time()
path = 'explogs/'
def merge_features(embedding, other_features):
df = pd.merge(embedding, other_features, left_index=True, right_index=True)
return df
if (re.search(r'base', embed)):
n_hl = 12
hidden_dim = 768
elif (re.search(r'large', embed)):
n_hl = 24
hidden_dim = 1024
file = open(inp_dir + dataset + '-' + embed + '-' + embed_mode + '-' + mode + '.pkl', 'rb')
data = pickle.load(file)
author_ids, data_x, data_y = list(zip(*data))
file.close()
# alphaW is responsible for which BERT layer embedding we will be using
if (layer == 'all'):
alphaW = np.full([n_hl], 1 / n_hl)
else:
alphaW = np.zeros([n_hl])
alphaW[int(layer) - 1] = 1
# just changing the way data is stored (tuples of minibatches) and getting the output for the required layer of BERT using alphaW
# data_x[ii].shape = (12, batch_size, 768)
inputs = []
targets = []
n_batches = len(data_y)
for ii in range(n_batches):
inputs.extend(np.einsum('k,kij->ij', alphaW, data_x[ii]))
targets.extend(data_y[ii])
inputs = np.array(inputs)
full_targets = np.array(targets)
trait_labels = ['EXT','NEU','AGR','CON','OPN']
n_splits = 10
fold_acc = {}
expdata = {}
expdata['acc'], expdata['trait'], expdata['fold'] = [],[],[]
for trait_idx in range(full_targets.shape[1]):
# convert targets to one-hot encoding
targets = full_targets[:, trait_idx]
n_data = targets.shape[0]
expdata['trait'].extend([trait_labels[trait_idx]] * n_splits)
expdata['fold'].extend(np.arange(1,n_splits+1))
skf = StratifiedKFold(n_splits=n_splits, shuffle=False)
k = -1
for train_index, test_index in skf.split(inputs, targets):
x_train, x_test = inputs[train_index], inputs[test_index]
y_train, y_test = targets[train_index], targets[test_index]
#converting to one-hot embedding
y_train = tf.keras.utils.to_categorical(y_train, num_classes=n_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=n_classes)
model = tf.keras.models.Sequential()
# define the neural network architecture
model.add(tf.keras.layers.Dense(n_classes, input_dim=hidden_dim))
# model.add(tf.keras.layers.Dense(50, activation='relu'))
k+=1
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['mse', 'accuracy'])
history = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size,
validation_data=(x_test, y_test), verbose=0)
# if(k==0):
# print(model.summary())
# print('fold : {} \ntrait : {}\n'.format(k+1, trait_labels[trait_idx]))
# print('\nacc: ', history.history['accuracy'])
# print('val acc: ', history.history['val_accuracy'])
# print('loss: ', history.history['loss'])
# print('val loss: ', history.history['val_loss'])
expdata['acc'].append(max(history.history['val_accuracy']))
# print(expdata)
# for trait in fold_acc.keys():
# fold_acc[trait] = np.mean(fold_acc[trait])
# print (expdata)
print ("done")
df = | pd.DataFrame.from_dict(expdata) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://stackoverflow.com/questions/7961363/removing-duplicates-in-lists
from collections import OrderedDict
from functools import reduce
# pip install pandas
import pandas as pd
# pip install numpy
import numpy as np
def remove_duplicates__list_set(items: list) -> list:
return list(set(items))
def remove_duplicates__OrderedDict_fromkeys(items: list) -> list:
return list(OrderedDict.fromkeys(items))
def remove_duplicates__dict_fromkeys(items: list) -> list:
return list(dict.fromkeys(items))
def remove_duplicates__OrderedDict_list_v1(items: list) -> list:
return list(OrderedDict((x, True) for x in items))
def remove_duplicates__OrderedDict_list_v2(items: list) -> list:
return list(OrderedDict((x, True) for x in items).keys())
def remove_duplicates__generate_new_list_v1(items: list) -> list:
out_list = []
added = set()
for val in items:
if val not in added:
out_list.append(val)
added.add(val)
return out_list
def remove_duplicates__generate_new_list_v2(items: list) -> list:
return [x for i, x in enumerate(items) if x not in items[:i]]
# SOURCE: https://stackoverflow.com/a/29898868/5909792
def remove_duplicates__reduce_v1(items: list) -> list:
return reduce(lambda r, v: v in r and r or r + [v], items, [])
# SOURCE: https://stackoverflow.com/a/29898868/5909792
def remove_duplicates__reduce_v2(items: list) -> list:
return reduce(lambda r, v: v in r[1] and r or (r[0].append(v) or r[1].add(v)) or r, items, ([], set()))[0]
def remove_duplicates__pandas(items: list) -> list:
return | pd.unique(items) | pandas.unique |
# standard libraries
import enum
import glob
import os
import warnings
import zipfile
# third-party libraries
import matplotlib.pyplot as plt
import natsort
import pandas
def get_align_count_pipelines():
return enum.Enum('align_count_pipeline', 'STAR_HTSeq Kallisto') # SAMstats')
# Below is the complete list of labels in the summary file
def get_fastqc_summary_labels():
return ["Basic Statistics", "Per base sequence quality",
"Per tile sequence quality",
"Per sequence quality scores",
"Per base sequence content", "Per sequence GC content",
"Per base N content", "Sequence Length Distribution",
"Sequence Duplication Levels",
"Overrepresented sequences", "Adapter Content",
"Kmer Content"]
def get_sample_from_filename(filename):
return filename.replace(".fastq.gz", "").strip()
def get_fastqc_and_alignment_summary_stats(align_count_pipeline_val, pipeline_output_dir, num_total_threshold=None,
labels_of_interest=get_fastqc_summary_labels(), num_aligned_threshold=None,
num_unique_aligned_threshold=None, percent_aligned_threshold=None,
percent_unique_aligned_threshold=None):
fastqc_results_df = _get_fastqc_results_without_msgs(pipeline_output_dir, labels_of_interest)
alignment_stats_df = get_alignments_stats_df(align_count_pipeline_val, pipeline_output_dir, _get_default_fail_msg(),
num_total_threshold, num_aligned_threshold,
num_unique_aligned_threshold,
percent_aligned_threshold,
percent_unique_aligned_threshold)
result = _combine_fastqc_and_alignment_stats(fastqc_results_df, alignment_stats_df)
return result
def _get_default_fail_msg():
return "CHECK"
def get_fastqc_results(fastqc_results_dir, labels_of_interest, count_fail_threshold, fail_msg=_get_default_fail_msg()):
total_seqs_df = _get_fastqc_total_seqs(fastqc_results_dir)
statuses_df = _get_fastqc_statuses(fastqc_results_dir, labels_of_interest)
result = pandas.merge(statuses_df, total_seqs_df, on=_get_name_str(), how="outer")
result = result[[_get_name_str(), _get_fastqc_statuses_str(), _get_total_str()]]
total_fail_msg = _get_thresh_fail_msgs(count_fail_threshold, result, _get_total_str())
result = _combine_msgs_and_decide_status(result, fail_msg, total_fail_msg, result[_get_fastqc_statuses_str()])
result = result.drop(_get_fastqc_statuses_str(), axis=1)
if result.empty:
warnings.warn("No fastqc results were found in directory '{0}'".format(fastqc_results_dir))
return result
def _get_fastqc_results_without_msgs(fastqc_results_dir, labels_of_interest):
total_seqs_df = _get_fastqc_total_seqs(fastqc_results_dir)
statuses_df = _get_fastqc_statuses(fastqc_results_dir, labels_of_interest)
result = pandas.merge(statuses_df, total_seqs_df, on=_get_name_str(), how="outer")
result = result[[_get_name_str(), _get_fastqc_statuses_str(), _get_total_str()]]
return result
def get_alignments_stats_df(align_count_pipeline_val, pipeline_output_dir, fail_msg=_get_default_fail_msg(),
num_total_threshold=None, num_aligned_threshold=None, num_unique_aligned_threshold=None,
percent_aligned_threshold=None, percent_unique_aligned_threshold=None):
parse_stats_func = _get_parser_for_pipeline(align_count_pipeline_val)
basic_stats_df = parse_stats_func(pipeline_output_dir)
if basic_stats_df.empty:
warnings.warn("No alignment statistics were found in directory '{0}'".format(pipeline_output_dir))
result = _annotate_stats(basic_stats_df, fail_msg, num_total_threshold, num_aligned_threshold,
num_unique_aligned_threshold, percent_aligned_threshold, percent_unique_aligned_threshold)
return result
def make_aligned_reads_plot(summary_stats_df):
#Barplot of number of aligned reads per sample
plt.figure(figsize=(10,10))
ax = plt.subplot(111)
summary_stats_df[[_get_name_str(), _get_total_str(),
_get_uniquely_aligned_str()]].plot(ax=ax, kind='bar', title='# of Reads')
#ax.axis(x='off')
ax.axhline(y=10000000, linewidth=2, color='Red', zorder=0)
xTickMarks = [x for x in summary_stats_df.Sample.tolist()]
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=45, ha='right', fontsize=10)
def parse_star_alignment_stats(pipeline_output_dir):
# Look for each stats file in each relevant subdirectory of the results directory
summary_wildpath = os.path.join(pipeline_output_dir, '*/', "Log.final.out")
summary_filepaths = [x for x in glob.glob(summary_wildpath)]
alignment_stats = pandas.DataFrame()
for curr_summary_path in summary_filepaths:
sample_name = os.path.split(os.path.dirname(curr_summary_path))[1]
p = _parse_star_log_final_out(sample_name, curr_summary_path)
alignment_stats = alignment_stats.append(p)
return alignment_stats
def _parse_star_log_final_out(sample_name, curr_summary_path):
df = pandas.read_csv(curr_summary_path, sep="\t", header=None)
raw_reads = df.iloc[[4]]
y = raw_reads[1].to_frame()
aligned_reads = df.iloc[[7]]
z = aligned_reads[1].to_frame()
d = {_get_name_str(): pandas.Series(sample_name),
_get_total_str(): pandas.Series(float(y[1])),
_get_uniquely_aligned_str(): pandas.Series(float(z[1]))}
p = pandas.DataFrame(data=d)
return p
def parse_kallisto_alignment_stats(pipeline_output_dir):
counts_wildpath = os.path.join(pipeline_output_dir, "*_counts.txt")
counts_fps = [x for x in glob.glob(counts_wildpath)]
sample_stats = []
for count_fp in counts_fps:
_, count_filename = os.path.split(count_fp)
if count_filename == "all_gene_counts.txt": continue
df = pandas.read_csv(count_fp, sep="\t")
long_sample_name = df.columns.values[-1]
short_sample_name = long_sample_name.split("/")[-1]
no_nan_df = df.dropna(subset=["gene"]) # NaN in first col
counts_series = no_nan_df.ix[:, 3]
total_aligned_counts = counts_series.sum()
sample_stats.append({_get_name_str(): short_sample_name, _get_percent_align_str(): total_aligned_counts})
alignment_stats = pandas.DataFrame(sample_stats)
return alignment_stats
def _get_parser_for_pipeline(align_count_pipeline_val):
pipelines = get_align_count_pipelines()
if align_count_pipeline_val.name == pipelines.Kallisto.name:
result_func = parse_kallisto_alignment_stats
elif align_count_pipeline_val.name == pipelines.STAR_HTSeq.name:
result_func = parse_star_alignment_stats
# elif align_count_pipeline_val == ALIGN_COUNT_PIPELINES.SAMstats:
# result_func = parse_samstats_alignment_stats
else:
raise ValueError(("Unrecognized alignment and counting "
"pipeline specified: '{0}'").format(align_count_pipeline_val.name))
return result_func
def _get_name_str():
return "Sample"
def _get_total_str():
return "Total Reads"
def _get_align_str():
return "Aligned Reads"
def _get_percent_align_str():
return "Percent Aligned"
def _get_uniquely_aligned_str():
return "Uniquely Aligned Reads"
def _get_percent_unique_aligned_str():
return "Percent Uniquely Aligned"
def _get_status_str():
return "Status"
def _get_notes_str():
return "Notes"
def _get_unknown_str():
return "Unavailable"
def _get_fastqc_statuses_str():
return "FASTQC Messages"
def _get_fastqc_total_seqs(fastqc_results_dir, *func_args):
result = _loop_over_fastqc_files(fastqc_results_dir,
"fastqc_data.txt", _find_total_seqs_from_fastqc, *func_args)
return result
def _get_fastqc_statuses(fastqc_results_dir, *func_args):
result = _loop_over_fastqc_files(fastqc_results_dir,
"summary.txt", _find_fastqc_statuses_from_fastqc, *func_args)
temp_fastqc_statuses = [", ".join(filter(None, x)) for x in result[_get_fastqc_statuses_str()]]
result[_get_fastqc_statuses_str()] = temp_fastqc_statuses
return result
def _loop_over_fastqc_files(fastqc_results_dir, file_suffix, parse_func, *func_args):
rows_list = []
fastqc_suffix = "_fastqc"
zip_suffix = ".zip"
def collect_record(a_file_handle, *any_func_args):
curr_record = {}
for line in a_file_handle:
curr_record = parse_func(line, curr_record, *any_func_args)
if len(curr_record) > 0:
rows_list.append(curr_record)
fastqc_results_dir = os.path.abspath(fastqc_results_dir)
for root, dirnames, filenames in os.walk(fastqc_results_dir):
for dirname in dirnames:
if dirname.endswith(fastqc_suffix):
file_fp = os.path.join(root, dirname, file_suffix)
with open(file_fp, "rb") as summary_file:
collect_record(summary_file, *func_args)
for filename in filenames:
if filename.endswith(fastqc_suffix + zip_suffix):
file_fp = os.path.join(fastqc_results_dir, filename)
with zipfile.ZipFile(file_fp) as fastqc_zip:
summary_name = os.path.join(filename.replace(zip_suffix, ""), file_suffix)
with fastqc_zip.open(summary_name, 'r') as summary_file:
collect_record(summary_file, *func_args)
outputDf = | pandas.DataFrame(rows_list) | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal( | Series([np.nan, np.nan]) | pandas.Series |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', | pd.Timestamp('2011-01-01') | pandas.Timestamp |
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import pandas
TEST_DF = | pandas.DataFrame( [1,2,3]) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.testing import assert_index_equal
import matplotlib
import modin.pandas as pd
import sys
from modin.pandas.test.utils import (
NROWS,
RAND_LOW,
RAND_HIGH,
df_equals,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
axis_keys,
axis_values,
int_arg_keys,
int_arg_values,
create_test_dfs,
eval_general,
generate_multiindex,
extra_test_parameters,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def eval_setitem(md_df, pd_df, value, col=None, loc=None):
if loc is not None:
col = pd_df.columns[loc]
value_getter = value if callable(value) else (lambda *args, **kwargs: value)
eval_general(
md_df, pd_df, lambda df: df.__setitem__(col, value_getter(df)), __inplace__=True
)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_with_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_without_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [70, 600, 30, -200, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"lookup",
[
[60, 70, 90],
[60.5, 70.5, 100],
],
)
@pytest.mark.parametrize("subset", ["col2", "col1", ["col1", "col2"], None])
def test_asof_large(lookup, subset):
data = test_data["float_nan_data"]
index = list(range(NROWS))
modin_where = pd.Index(lookup)
pandas_where = pandas.Index(lookup)
compare_asof(data, index, modin_where, pandas_where, subset)
def compare_asof(
data, index, modin_where: pd.Index, pandas_where: pandas.Index, subset
):
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
df_equals(
modin_df.asof(modin_where, subset=subset),
pandas_df.asof(pandas_where, subset=subset),
)
df_equals(
modin_df.asof(modin_where.values, subset=subset),
pandas_df.asof(pandas_where.values, subset=subset),
)
df_equals(
modin_df.asof(list(modin_where.values), subset=subset),
pandas_df.asof(list(pandas_where.values), subset=subset),
)
df_equals(
modin_df.asof(modin_where.values[0], subset=subset),
pandas_df.asof(pandas_where.values[0], subset=subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.iloc[0] = modin_df.iloc[1]
pandas_df.iloc[0] = pandas_df.iloc[1]
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.iloc[:, 0] = modin_df.iloc[:, 1]
pandas_df.iloc[:, 0] = pandas_df.iloc[:, 1]
df_equals(modin_df, pandas_df)
# From issue #1775
df_equals(
modin_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],
pandas_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],
)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
df_equals(modin_df.loc[0, key1], pandas_df.loc[0, key1])
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [i % 3 == 0 for i in range(len(modin_df.index))]
columns = [i % 5 == 0 for i in range(len(modin_df.columns))]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[:, columns]
pandas_result = pandas_df.loc[:, columns]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[indices]
pandas_result = pandas_df.loc[indices]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# From issue #1023
key1 = modin_df.columns[0]
key2 = modin_df.columns[-2]
df_equals(modin_df.loc[:, key1:key2], pandas_df.loc[:, key1:key2])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
# From issue #1775
df_equals(
modin_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],
pandas_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],
)
# From issue #1374
with pytest.raises(KeyError):
modin_df.loc["NO_EXIST"]
def test_loc_multi_index():
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"])
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert modin_df.loc[("bar", "one"), "col1"] == pandas_df.loc[("bar", "one"), "col1"]
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
# From issue #1456
transposed_modin = modin_df.T
transposed_pandas = pandas_df.T
df_equals(
transposed_modin.loc[transposed_modin.index[:-2], :],
transposed_pandas.loc[transposed_pandas.index[:-2], :],
)
# From issue #1610
df_equals(modin_df.loc[modin_df.index], pandas_df.loc[pandas_df.index])
df_equals(modin_df.loc[modin_df.index[:7]], pandas_df.loc[pandas_df.index[:7]])
@pytest.mark.parametrize("index", [["row1", "row2", "row3"]])
@pytest.mark.parametrize("columns", [["col1", "col2"]])
def test_loc_assignment(index, columns):
md_df, pd_df = create_test_dfs(index=index, columns=columns)
for i, ind in enumerate(index):
for j, col in enumerate(columns):
value_to_assign = int(str(i) + str(j))
md_df.loc[ind][col] = value_to_assign
pd_df.loc[ind][col] = value_to_assign
df_equals(md_df, pd_df)
@pytest.fixture
def loc_iter_dfs():
columns = ["col1", "col2", "col3"]
index = ["row1", "row2", "row3"]
return create_test_dfs(
{col: ([idx] * len(index)) for idx, col in enumerate(columns)},
columns=columns,
index=index,
)
@pytest.mark.parametrize("reverse_order", [False, True])
@pytest.mark.parametrize("axis", [0, 1])
def test_loc_iter_assignment(loc_iter_dfs, reverse_order, axis):
if reverse_order and axis:
pytest.xfail(
"Due to internal sorting of lookup values assignment order is lost, see GH-#2552"
)
md_df, pd_df = loc_iter_dfs
select = [slice(None), slice(None)]
select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)
select = tuple(select)
pd_df.loc[select] = pd_df.loc[select] + pd_df.loc[select]
md_df.loc[select] = md_df.loc[select] + md_df.loc[select]
df_equals(md_df, pd_df)
@pytest.mark.parametrize("reverse_order", [False, True])
@pytest.mark.parametrize("axis", [0, 1])
def test_loc_order(loc_iter_dfs, reverse_order, axis):
md_df, pd_df = loc_iter_dfs
select = [slice(None), slice(None)]
select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)
select = tuple(select)
df_equals(pd_df.loc[select], md_df.loc[select])
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc_nested_assignment(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
modin_df[key1].loc[0] = 500
pandas_df[key1].loc[0] = 500
df_equals(modin_df, pandas_df)
modin_df[key2].loc[0] = None
pandas_df[key2].loc[0] = None
df_equals(modin_df, pandas_df)
def test_iloc_assignment():
modin_df = pd.DataFrame(index=["row1", "row2", "row3"], columns=["col1", "col2"])
pandas_df = pandas.DataFrame(
index=["row1", "row2", "row3"], columns=["col1", "col2"]
)
modin_df.iloc[0]["col1"] = 11
modin_df.iloc[1]["col1"] = 21
modin_df.iloc[2]["col1"] = 31
modin_df.iloc[0]["col2"] = 12
modin_df.iloc[1]["col2"] = 22
modin_df.iloc[2]["col2"] = 32
pandas_df.iloc[0]["col1"] = 11
pandas_df.iloc[1]["col1"] = 21
pandas_df.iloc[2]["col1"] = 31
pandas_df.iloc[0]["col2"] = 12
pandas_df.iloc[1]["col2"] = 22
pandas_df.iloc[2]["col2"] = 32
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc_nested_assignment(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
modin_df[key1].iloc[0] = 500
pandas_df[key1].iloc[0] = 500
df_equals(modin_df, pandas_df)
modin_df[key2].iloc[0] = None
pandas_df[key2].iloc[0] = None
df_equals(modin_df, pandas_df)
def test_loc_series():
md_df, pd_df = create_test_dfs({"a": [1, 2], "b": [3, 4]})
pd_df.loc[pd_df["a"] > 1, "b"] = np.log(pd_df["b"])
md_df.loc[md_df["a"] > 1, "b"] = np.log(md_df["b"])
df_equals(pd_df, md_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
def test_reindex():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like():
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity():
source_df = pandas.DataFrame(test_data["int_data"])[
["col1", "index", "col3", "col4"]
]
mapping = {"col1": "a", "index": "b", "col3": "c", "col4": "d"}
modin_df = pd.DataFrame(source_df)
df_equals(modin_df.rename(columns=mapping), source_df.rename(columns=mapping))
renamed2 = source_df.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper))
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# Using the `mapper` functionality with `axis`
assert_index_equal(
modin_df.rename(str.upper, axis=0).index, df.rename(str.upper, axis=0).index
)
assert_index_equal(
modin_df.rename(str.upper, axis=1).columns,
df.rename(str.upper, axis=1).columns,
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = source_df.rename(columns={"col3": "foo", "col4": "bar"})
modin_df = pd.DataFrame(source_df)
assert_index_equal(
modin_df.rename(columns={"col3": "foo", "col4": "bar"}).index,
source_df.rename(columns={"col3": "foo", "col4": "bar"}).index,
)
# other axis
renamed = source_df.T.rename(index={"col3": "foo", "col4": "bar"})
assert_index_equal(
source_df.T.rename(index={"col3": "foo", "col4": "bar"}).index,
modin_df.T.rename(index={"col3": "foo", "col4": "bar"}).index,
)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex():
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
| assert_index_equal(renamed.columns, modin_renamed.columns) | pandas.testing.assert_index_equal |
import datetime
import inspect
import logging
import numpy.testing as npt
import os.path
import pandas as pd
import pkgutil
import sys
from tabulate import tabulate
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO, BytesIO
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..iec_exe import Iec, IecOutputs
#print(sys.path)
#print(os.path)
# load transposed qaqc data for inputs and expected outputs
# this works for both local nosetests and travis deploy
#input details
try:
if __package__ is not None:
csv_data = pkgutil.get_data(__package__, 'iec_qaqc_in_transpose.csv')
data_inputs = BytesIO(csv_data)
pd_obj_inputs = pd.read_csv(data_inputs, index_col=0, engine='python')
else:
csv_transpose_path_in = os.path.join(os.path.dirname(__file__),"iec_qaqc_in_transpose.csv")
print(csv_transpose_path_in)
pd_obj_inputs = | pd.read_csv(csv_transpose_path_in, index_col=0, engine='python') | pandas.read_csv |
'''
Created on 19 May 2018
@author: Ari-Tensors
Binary classification: Predict if an asset will fail within certain time frame (e.g. cycles)
'''
import keras
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os, traceback
import json
# Setting seed for reproducibility
np.random.seed(1234)
PYTHONHASHSEED = 0
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix, recall_score, precision_score
from keras.models import Sequential,load_model,save_model
from keras.layers import Dense, Dropout, LSTM
##################################
# Data Ingestion
##################################
pd.options.display.max_rows = 500
pd.options.display.max_columns = 50
pd.options.display.width = 1000
class BinaryClassification:
#class attributes
# pick a large window size of 50 cycles
sequence_length = 25
#local model path
model_path = './server/Output/binary_model.h5'
# function to generate labels
def gen_labels(self,id_df, seq_length, label):
# For one id I put all the labels in a single matrix.
# For example:
# [[1]
# [4]
# [1]
# [5]
# [9]
# ...
# [200]]
data_matrix = id_df[label].values
num_elements = data_matrix.shape[0]
# I have to remove the first seq_length labels
# because for one id the first sequence of seq_length size have as target
# the last label (the previus ones are discarded).
# All the next id's sequences will have associated step by step one label as target.
return data_matrix[seq_length:num_elements, :]
# function to reshape features into (samples, time steps, features)
def gen_sequence(self,id_df, seq_length, seq_cols):
""" Only sequences that meet the window-length are considered, no padding is used. This means for testing
Need to drop those which are below the window-length. An alternative would be to pad sequences so that
shorter ones can be used. """
# for one id I put all the rows in a single matrix
data_matrix = id_df[seq_cols].values
num_elements = data_matrix.shape[0]
# Iterate over two lists in parallel.
# For example id1 have 192 rows and sequence_length is equal to 50
# so zip iterate over two following list of numbers (0,112),(50,192)
# 0 50 -> from row 0 to row 50
# 1 51 -> from row 1 to row 51
# 2 52 -> from row 2 to row 52
# ...
# 111 191 -> from row 111 to 191
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
yield data_matrix[start:stop, :]
def startTraining(self,w1,w0,epoch_val):
try:
# read training data - It is the aircraft engine run-to-failure data.
self.train_df = pd.read_csv('./server/Dataset/PM_train.txt', sep=" ", header=None)
self.train_df.drop(self.train_df.columns[[26, 27]], axis=1, inplace=True)
self.train_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
self.train_df = self.train_df.sort_values(['id','cycle'])
# TRAIN
# Data Labeling - generate column RUL(Remaining Usefull Life or Time to Failure)
rul = pd.DataFrame(self.train_df.groupby('id')['cycle'].max()).reset_index()
rul.columns = ['id', 'max']
self.train_df = self.train_df.merge(rul, on=['id'], how='left')
self.train_df['RUL'] = self.train_df['max'] - self.train_df['cycle']
self.train_df.drop('max', axis=1, inplace=True)
# generate label columns for training data
# only use "label1" for binary classification,
# while trying to answer the question: is a specific engine going to fail within w1 cycles?
# w1 = 30
# w0 = 15
self.train_df['label1'] = np.where(self.train_df['RUL'] <= w1, 1, 0 )
self.train_df['label2'] = self.train_df['label1']
self.train_df.loc[self.train_df['RUL'] <= w0, 'label2'] = 2
# MinMax normalization (from 0 to 1)
self.train_df['cycle_norm'] = self.train_df['cycle']
self.cols_normalize = self.train_df.columns.difference(['id','cycle','RUL','label1','label2'])
self.min_max_scaler = preprocessing.MinMaxScaler()
norm_train_df = pd.DataFrame(self.min_max_scaler.fit_transform(self.train_df[self.cols_normalize]),
columns=self.cols_normalize,
index=self.train_df.index)
join_df = self.train_df[self.train_df.columns.difference(self.cols_normalize)].join(norm_train_df)
self.train_df = join_df.reindex(columns = self.train_df.columns)
# pick the feature columns
sensor_cols = ['s' + str(i) for i in range(1,22)]
self.sequence_cols = ['setting1', 'setting2', 'setting3', 'cycle_norm']
self.sequence_cols.extend(sensor_cols)
# generator for the sequences
seq_gen = (list(self.gen_sequence(self.train_df[self.train_df['id']==id], self.sequence_length, self.sequence_cols))
for id in self.train_df['id'].unique())
# generate sequences and convert to numpy array
seq_array = np.concatenate(list(seq_gen)).astype(np.float32)
seq_array.shape
# generate labels
label_gen = [self.gen_labels(self.train_df[self.train_df['id']==id], self.sequence_length, ['label1'])
for id in self.train_df['id'].unique()]
label_array = np.concatenate(label_gen).astype(np.float32)
label_array.shape
# Next, Build a deep network.
# The first layer is an LSTM layer with 100 units followed by another LSTM layer with 50 units.
# Dropout is also applied after each LSTM layer to control overfitting.
# Final layer is a Dense output layer with single unit and sigmoid activation since this is a binary classification problem.
# build the network
nb_features = seq_array.shape[2]
nb_out = label_array.shape[1]
model = Sequential()
model.add(LSTM(
input_shape=(self.sequence_length, nb_features),
units=100,
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
units=50,
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=nb_out, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit the network
self.history = model.fit(seq_array, label_array, epochs=epoch_val, batch_size=200, validation_split=0.05, verbose=2,
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min'),
keras.callbacks.ModelCheckpoint(BinaryClassification.model_path,monitor='val_loss', save_best_only=True, mode='min', verbose=0)]
)
return True
except:
traceback.print_exc()
return False
def drawModelAccuracy(self):
# summarize history for Accuracy
fig_acc = plt.figure(figsize=(10, 10))
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig_acc.savefig("./server/Output/model_accuracy.png")
def drawModelLoss(self):
# summarize history for Loss
fig_acc = plt.figure(figsize=(10, 10))
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig_acc.savefig("./server/Output/model_loss.png")
def executeAIOnTest(self,w1,w0):
try:
self.startTraining(w1,w0,2)
# read ground truth data - It contains the information of true remaining cycles for each engine in the testing data.
truth_df = pd.read_csv('./server/Dataset/PM_truth.txt', sep=" ", header=None)
truth_df.drop(truth_df.columns[[1]], axis=1, inplace=True)
# read test data - It is the aircraft engine operating data without failure events recorded.
test_df = | pd.read_csv('./server/Dataset/PM_test.txt', sep=" ", header=None) | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.