prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 5 13:17:22 2021
@author: trduong
"""
# import os, sys;
# sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import pandas as pd
import logging
import argparse
import sys
from utils.evaluate_func import evaluate_distribution, evaluate_fairness, evaluate_classifier, classification_performance
from utils.helpers import load_config
from utils.helpers import features_setting
if __name__ == "__main__":
"""Parsing argument"""
parser = argparse.ArgumentParser()
# parser.add_argument('--mode', type=str, default='both')
parser.add_argument('--data_name', type=str, default='compas')
# parser.add_argument('--data_name', type=str, default='adult')
# parser.add_argument('--data_name', type=str, default='bank')
args = parser.parse_args()
data_name = args.data_name
"""Load configuration"""
config_path = "/home/trduong/Data/counterfactual_fairness_game_theoric/configuration.yml"
conf = load_config(config_path)
"""Set up logging"""
logger = logging.getLogger('genetic')
file_handler = logging.FileHandler(filename=conf['evaluate_compas_log'])
stdout_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
file_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
"""Setup features"""
dict_ = features_setting(data_name)
sensitive_features = dict_["sensitive_features"]
normal_features = dict_["normal_features"]
categorical_features = dict_["categorical_features"]
continuous_features = dict_["continuous_features"]
full_features = dict_["full_features"]
target = dict_["target"]
baseline_path = conf['{}'.format(data_name)]
ivr_path = conf['ivr_{}'.format(data_name)]
evaluate_path = conf['evaluate_{}'.format(data_name)]
logger.debug("Baseline path: {}".format(baseline_path))
logger.debug("Evaluate path: {}".format(evaluate_path))
logger.debug("Invariant path: {}".format(ivr_path))
# baseline_columns = ['full', 'unaware', 'cf1', 'cf2']
# method_columns = ['AL_prediction', 'GL_prediction', 'GD_prediction']
# prediction_columns = baseline_columns + method_columns
"""Load data"""
df1 = pd.read_csv(baseline_path)
df2 = pd.read_csv(ivr_path)
df2 = df2.drop(columns=full_features+[target])
df = | pd.concat([df1, df2], axis=1) | pandas.concat |
# %%
"""You know what to do."""
import pandas as pd
# %%
people = {
"first": ["Corey", "Jane", "John"],
"last": ["Schafer", "Doe", "Doe"],
"email": ["<EMAIL>",
"<EMAIL>",
"<EMAIL>"]
}
# %%
df = pd.DataFrame(people)
# %%
df
# %%
df["first"] + " " + df["last"]
# %%
"""Add full_name column."""
df["full_name"] = df["first"] + " " + df["last"]
# %%
df
# %%
"""Remove first and last columns."""
df.drop(columns=["first", "last"], inplace=True)
# %%
df
# %%
df["full_name"].str.split(" ", expand=True)
# %%
df[["first", "last"]] = df["full_name"].str.split(" ", expand=True)
# %%
df
# %%
"""Adding a new row"""
"""If we don't use ignore_index=True, it will raise an error."""
df.append({"first": "Tony"}, ignore_index=True)
# %%
people = {
'first': ['Tony', 'Steve'],
'last': ['Stark', 'Rogers'],
'email': ['<EMAIL>', '<EMAIL>']
}
df2 = | pd.DataFrame(people) | pandas.DataFrame |
from itertools import count, groupby
from typing import Any, Dict, List, Tuple, Union
import pandas as pd
import spacy
from spacy.tokens.doc import Doc
from spacy_streamlit.visualizer import TOKEN_ATTRS
from streamlit import cache as st_cache
from . import constants
@st_cache(allow_output_mutation=True)
def load_spacy_nlp_model() -> spacy.language.Language:
return spacy.load(constants.SPACY_MODEL)
def _doc_text(doc: Doc) -> str:
# a workaround to use st_cache for get_doc
return doc.text # type: ignore
@st_cache(hash_funcs={Doc: _doc_text})
def get_doc(text: str) -> spacy.tokens.Doc:
nlp = load_spacy_nlp_model()
return nlp(text)
def get_tokens_df(text: str) -> pd.DataFrame:
doc = get_doc(text)
return _get_tokens_df(doc)
def _get_tokens_df(
doc: Doc,
attrs: List[str] = TOKEN_ATTRS,
) -> pd.DataFrame:
data = [[str(getattr(token, attr)) for attr in attrs] for token in doc]
df = | pd.DataFrame(data, columns=attrs) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.spatial import distance_matrix
def euc(a, b):
return np.sum(np.square(a - b))
# euclidian(np.array([1,2,3]),np.array([2,3,4]))
def kruskal_stress(adata, key_repr):
"""
Computes the kruskal stress for the low dimension representation
stored at adata.obsm.key_repr
Parameters
----------
adata
key_repr
Returns
-------
"""
for key in key_repr:
print(key)
high_dim_counts = adata.X
low_dim_counts = adata.obsm[key]
high_dim_distances = distance_matrix(high_dim_counts, high_dim_counts)
low_dim_distances = distance_matrix(low_dim_counts, low_dim_counts)
stress = np.sqrt(
np.square(high_dim_distances - low_dim_distances).sum()
/ np.square(low_dim_distances).sum()
)
print(
f"dist_diff = "
f"{np.square(high_dim_distances - low_dim_distances).sum()}"
)
print(f"lowdimsum = {np.square(low_dim_distances).sum()}")
df = | pd.DataFrame(low_dim_distances) | pandas.DataFrame |
import warnings
from functools import reduce
import os
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
from qualipy.project import Project
from qualipy.util import set_value_type, set_metric_id
from qualipy.anomaly._isolation_forest import IsolationForestModel
from qualipy.anomaly._prophet import ProphetModel
from qualipy.anomaly._std import STDCheck
from qualipy.anomaly.base import LoadedModel
from qualipy.anomaly.trend_rules import trend_rules
anomaly_columns = [
"column_name",
"date",
"metric",
"arguments",
"return_format",
"value",
"severity",
"batch_name",
"insert_time",
"trend_function_name",
]
MODS = {
"IsolationForest": IsolationForestModel,
"prophet": ProphetModel,
"std": STDCheck,
}
class GenerateAnomalies:
def __init__(self, project_name, config_dir):
self.config_dir = config_dir
with open(os.path.join(config_dir, "config.json"), "r") as conf_file:
config = json.load(conf_file)
self.model_type = config[project_name].get("ANOMALY_MODEL", "std")
self.anom_args = config[project_name].get("ANOMALY_ARGS", {})
self.specific = self.anom_args.pop("specific", {})
self.project_name = project_name
self.project = Project(project_name, config_dir=config_dir, re_init=True)
df = self.project.get_project_table()
df["floored_datetime"] = df.date.dt.floor("T")
df = (
df.groupby("floored_datetime", as_index=False)
.apply(lambda g: g[g.insert_time == g.insert_time.max()])
.reset_index(drop=True)
)
df = df.drop("floored_datetime", axis=1)
df.column_name = df.column_name + "_" + df.run_name
df["metric_name"] = (
df.column_name
+ "_"
+ df.metric.astype(str)
+ "_"
+ np.where(df.arguments.isnull(), "", df.arguments)
)
df = set_metric_id(df)
df = df.sort_values("date")
self.df = df
def _num_train_and_save(self, data, all_rows, metric_name):
try:
metric_id = data.metric_id.iloc[0]
mod = MODS[self.model_type](
config_dir=self.config_dir,
metric_name=metric_id,
project_name=self.project_name,
)
mod.fit(data)
mod.save()
preds = mod.predict(data)
if isinstance(preds, tuple):
severity = preds[1]
preds = preds[0]
outlier_rows = data[preds == -1].copy()
outlier_rows["severity"] = severity[preds == -1]
else:
outlier_rows = data[preds == -1]
outlier_rows["severity"] = np.NaN
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
except Exception as e:
print(str(e))
warnings.warn(f"Unable to create anomaly model for {metric_name}")
return all_rows
def _num_from_loaded_model(self, data, all_rows):
mod = LoadedModel(config_dir=self.config_dir)
mod.load(data.metric_id.iloc[0])
preds = mod.predict(data)
if isinstance(preds, tuple):
severity = preds[1]
preds = preds[0]
outlier_rows = data[preds == -1].copy()
outlier_rows["severity"] = severity[preds == -1]
else:
outlier_rows = data[preds == -1]
outlier_rows["severity"] = np.NaN
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
return all_rows
def create_anom_num_table(self, retrain=False):
df = self.df.copy()
df = df[
(df["type"] == "numerical")
| (df["column_name"].isin(["rows", "columns"]))
| (df["metric"].isin(["perc_missing", "count"]))
]
df.value = df.value.astype(float)
all_rows = []
if self.model_type != "ignore":
for metric_name, data in tqdm(df.groupby("metric_name")):
if not retrain:
try:
all_rows = self._num_from_loaded_model(data, all_rows)
except ValueError:
warnings.warn(f"Unable to load anomaly model for {metric_name}")
except FileNotFoundError:
all_rows = self._num_train_and_save(data, all_rows, metric_name)
else:
all_rows = self._num_train_and_save(data, all_rows, metric_name)
try:
data = pd.concat(all_rows).sort_values("date", ascending=False)
data["trend_function_name"] = np.NaN
data = data[anomaly_columns]
data.value = data.value.astype(str)
except:
data = pd.DataFrame([], columns=anomaly_columns)
return data
def create_anom_cat_table(self, retrain=False):
df = self.df
df = df[df["type"] == "categorical"]
all_rows = []
if self.model_type != "ignore":
for metric_id, data in tqdm(df.groupby("metric_id")):
data = set_value_type(data.copy())
try:
data_values = [
(pd.Series(c) / pd.Series(c).sum()).to_dict()
for c in data["value"]
]
unique_vals = reduce(
lambda x, y: x.union(y), [set(i.keys()) for i in data_values]
)
non_diff_lines = []
potential_lines = []
for cat in unique_vals:
values = pd.Series([i.get(cat, 0) for i in data_values])
running_means = values.rolling(window=5).mean()
differences = values - running_means
sum_abs = np.abs(differences).sum()
potential_lines.append((cat, differences, sum_abs))
non_diff_lines.append((cat, values))
potential_lines = sorted(
potential_lines, key=lambda v: v[2], reverse=True
)
diffs_df = pd.DataFrame({i[0]: i[1] for i in potential_lines})
diffs_df["sum_of_changes"] = diffs_df.abs().sum(axis=1)
all_non_diff_lines = pd.DataFrame(
{i[0]: i[1] for i in non_diff_lines}
)
for col in all_non_diff_lines.columns:
mean = all_non_diff_lines[col].mean()
std = all_non_diff_lines[col].std()
if std > 0.05:
all_non_diff_lines[f"{col}_below"] = np.where(
all_non_diff_lines[col] < (mean - (4 * std)), 1, 0
)
all_non_diff_lines[f"{col}_above"] = np.where(
all_non_diff_lines[col] > (mean + (4 * std)), 1, 0
)
else:
all_non_diff_lines[f"{col}_below"] = 0
all_non_diff_lines[f"{col}_above"] = 0
std_sums = all_non_diff_lines[
[
col
for col in all_non_diff_lines.columns
if "_below" in str(col) or "_above" in str(col)
]
].sum(axis=1)
mod = IsolationForestModel(
config_dir=self.config_dir,
metric_name=metric_id,
arguments={
"contamination": 0.01,
"n_estimators": 50,
"multivariate": True,
"check_for_std": True,
},
)
outliers = mod.train_predict(all_non_diff_lines)
all_non_diff_lines["iso_outlier"] = outliers
data["severity"] = diffs_df.sum_of_changes.values
sample_size = data.value.apply(lambda v: sum(v.values()))
outlier_rows = data[
(outliers == -1) & (std_sums.values > 0) & (sample_size > 10)
]
if outlier_rows.shape[0] > 0:
all_rows.append(outlier_rows)
except ValueError:
pass
try:
data = pd.concat(all_rows).sort_values("date", ascending=False)
data["trend_function_name"] = np.NaN
data = data[anomaly_columns]
data.value = data.value.astype(str)
except:
data = pd.DataFrame([], columns=anomaly_columns)
return data
def create_error_check_table(self):
# obv only need to do this once
df = self.df
df = df[df["type"] == "boolean"]
if df.shape[0] > 0:
df = set_value_type(df)
df = df[~df.value]
df["severity"] = np.NaN
df["trend_function_name"] = np.NaN
df = df[anomaly_columns]
else:
df = pd.DataFrame([], columns=anomaly_columns)
return df
def create_trend_rule_table(self):
# obv only need to do this once
df = self.df
if len(self.specific) > 0:
all_rows = []
df = df[df.metric_id.isin(self.specific)]
for metric_id, group in df.groupby("metric_id"):
trend_functions = self.specific[metric_id]
group = set_value_type(group)
for fun, items in trend_functions.items():
outlier_data = trend_rules[fun]["function"](group.copy())
if outlier_data.shape[0] > 0:
outlier_data["severity"] = items.get("severity", np.NaN)
outlier_data["trend_function_name"] = fun
all_rows.append(outlier_data)
if len(all_rows) > 0:
data = pd.concat(all_rows).sort_values("date", ascending=False)
data = data[anomaly_columns]
data.value = data.value.astype(str)
else:
data = pd.DataFrame([], columns=anomaly_columns)
else:
data = | pd.DataFrame([], columns=anomaly_columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import os
import pdb
import json
import math
import torch
import random
import logging
import argparse
import pickle
import numpy as np
import pandas as pd
import calculate_log as callog
from sklearn import svm
import sklearn
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
from simpletransformers.classification import ClassificationModel
from our_model import our_model
logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
seed=42
def detect(all_test_deviations,all_ood_deviations, verbose=True, normalize=True):
average_results = {}
for i in range(1,11):
random.seed(i)
validation_indices = random.sample(range(len(all_test_deviations)),int(0.1*len(all_test_deviations)))
test_indices = sorted(list(set(range(len(all_test_deviations)))-set(validation_indices)))
validation = all_test_deviations[validation_indices]
test_deviations = all_test_deviations[test_indices]
t95 = validation.mean(axis=0)+10**-7
if not normalize:
t95 = np.ones_like(t95)
test_deviations = (test_deviations/t95[np.newaxis,:]).sum(axis=1)
ood_deviations = (all_ood_deviations/t95[np.newaxis,:]).sum(axis=1)
results = callog.compute_metric(-test_deviations,-ood_deviations)
for m in results:
average_results[m] = average_results.get(m,0)+results[m]
for m in average_results:
average_results[m] /= i
if verbose:
callog.print_results(average_results)
return average_results
def detection_performance(scores, Y, outf, tag='TMP'):
"""
Measure the detection performance
return: detection metrics
"""
os.makedirs(outf, exist_ok=True)
num_samples = scores.shape[0]
l1 = open('%s/confidence_%s_In.txt'%(outf, tag), 'w')
l2 = open('%s/confidence_%s_Out.txt'%(outf, tag), 'w')
y_pred = scores # regressor.predict_proba(X)[:, 1]
for i in range(num_samples):
if Y[i] == 0:
l1.write("{}\n".format(-y_pred[i]))
else:
l2.write("{}\n".format(-y_pred[i]))
l1.close()
l2.close()
results = callog.metric(outf, [tag])
return results
def load_sst_dataset():
train_df = load_extra_dataset("./dataset/sst/sst-train.txt", label=1)
test_df = load_extra_dataset("./dataset/sst/sst-test.txt", label=1)
ood_snli_df = load_extra_dataset("./dataset/sst/snli-dev.txt", drop_index=True, label=0)
ood_rte_df = load_extra_dataset("./dataset/sst/rte-dev.txt", drop_index=True, label=0)
ood_20ng_df = load_extra_dataset("./dataset/sst/20ng-test.txt", drop_index=True, label=0)
ood_multi30k_df = load_extra_dataset("./dataset/sst/multi30k-val.txt", drop_index=True, label=0)
ood_snli_df = ood_snli_df.sample(n=500, random_state=seed)
ood_rte_df = ood_rte_df.sample(n=500, random_state=seed)
ood_20ng_df = ood_20ng_df.sample(n=500, random_state=seed)
ood_multi30k_df = ood_multi30k_df.sample(n=500, random_state=seed)
ood_df = | pd.concat([ood_snli_df, ood_rte_df, ood_20ng_df, ood_multi30k_df]) | pandas.concat |
from sklearn.linear_model import LinearRegression as Lr
from scipy.stats import f
import pandas as pd
def _calculate_rss(X_series: pd.DataFrame, y_series: pd.Series):
"""
This function returns the sum of squared residuals. The function firstly checks that the input
arguments are of the correct type, followed by fitting the linear regression model on the X_series
and y_series. The predicted values are then placed into the 'y_hat' column, after which the residuals
are calculated. Finally, the sum of squared residuals (rss) is calculated.
:param: X_series: the series or set of series denoting the X variable. (pd.DataFrame)
:param: y_series: the series denoting the y variable. (pd.Series)
:return: summary_result: a Pandas DataFrame summarising the result. (pd.DataFrame)
:return: rss: the sum of squared errors. (float)
"""
if not isinstance(X_series, pd.DataFrame):
raise TypeError("The 'X_series' argument should be a Pandas DataFrame.")
if not isinstance(y_series, pd.Series):
raise TypeError("The 'y_series' argument must be a Pandas Series.")
model = Lr().fit(X_series, y_series)
summary_result = pd.DataFrame()
summary_result['y_hat'] = list(model.predict(X_series))
summary_result['y_actual'] = y_series.values
summary_result['residuals'] = summary_result['y_actual'] - summary_result['y_hat']
summary_result['residuals_sq'] = (summary_result['y_actual'] - summary_result['y_hat']) ** 2
rss = float(summary_result['residuals_sq'].sum())
return summary_result, rss
def _data_preparation(X_series: (pd.Series, pd.DataFrame), y_series: pd.Series, last_index: int, first_index: int):
"""
This function prepares the data by splitting the X_series and y_series into two subsets. The function firstly checks
that the input arguments are of the expected types, followed by splitting the X_series and y_series into X_series_one,
X_series_two, y_series_one and y_series_two respectively. The function then returns the sub-series'.
:param: y_series: the series denoting the y variable. (pd.Series)
:param: X_series: the series or set of series denoting the X variable. (pd.Series, pd.DataFrame)
:param: last_index: the final index value to be included before the data split. (int)
:param: first_index: the first index value to be included after the data split. (int)
:return: X_series_one: the Pandas DataFrame containing the pre-split X data. (pd.DataFrame)
:return: X_series_two: the Pandas DataFrame containing the post-split X data. (pd.DataFrame)
:return: y_series_one: the Pandas Series containing the pre-split y data. (pd.Series)
:return: y_series_two: the Pandas Series containing the post_split y data. (pd.Series)
"""
if not isinstance(y_series, pd.Series):
raise TypeError("The 'y_series' argument must be a Pandas Series.")
if not isinstance(X_series, (pd.Series, pd.DataFrame)):
raise TypeError("The 'X_series' argument must be a Pandas Series or a Pandas DataFrame.")
if not all(isinstance(v, int) for v in [last_index, first_index]):
raise TypeError("The 'last_index' and 'first_index' arguments must be integer types.")
X_series_one = X_series[: last_index]
X_series_two = X_series[first_index:]
y_series_one = y_series[: last_index]
y_series_two = y_series[first_index:]
return X_series_one, X_series_two, y_series_one, y_series_two
def _calculate_chow_statistic(pooled_rss_value: (int, float), rss_one: (int, float), rss_two: (int, float),
k_value: int, n_one_value: int, n_two_value: int):
"""
This function calculates the chow test statistic. Firstly the function checks that the input arguments are of the
correct input type, followed by calculating the numerator argument for th chow test. After this, the denominator
argument is calculated, and the chow test statistic is attempted. If this fails due to a zero division error, the
user is warned and the value is returned as 0.
:param: pooled_rss_value: the sum of squared errors for the whole data series. (float)
:param: rss_one: the sum of squared errors for the first series. (float)
:param: rss_two: the sum of squared errors for ths second series. (float)
:param: k_value: the number of degrees of freedom. (int)
:param: n_one_value: the length of the first series. (int)
:param: n_two_value: the length of the second series. (int)
:return: chow_test: the chow test statistic. (float)
"""
if not all(isinstance(v, (float, int)) for v in [pooled_rss_value, rss_one, rss_two]):
raise TypeError("The 'pooled_rss_value', 'rss_one' and 'rss_two' values must be either integers or floats.")
if not all(isinstance(v, int) for v in [k_value, n_one_value, n_two_value]):
raise TypeError("The 'k_value', 'n_one_value' and 'n_two_value' arguments must be integer types.")
numerator = (pooled_rss_value - (rss_one + rss_two)) / k_value
denominator = (rss_one + rss_two) / (n_one_value + n_two_value - (2 * k_value))
try:
return numerator / denominator
except ZeroDivisionError:
return 0
def _determine_p_value_significance(chow_statistic: (int, float), n_one_value: int, n_two_value: int, k_value: int,
significance_level: float, verbose: bool = True):
"""
This function determines the statistical significance of the chow_statistic passed as an input argument. The
function firstly checks that the input arguments are of the correct type, followed by defining the p-value with
respect to the f-distribution. The p-value is subsequently assessed against the significance_level argument,
printing the output if verbose is set to True. The chow_statistic and corresponding p-value are returned.
:param: chow_statistic: the chow statistic for which to assess the p-value. (float)
:param: n_one_value: the number of observations held within the first subset of data. (int)
:param: n_two_value: the number of observations held within the second subset of data. (int)
:param: k_value: the number of degrees of freedom. (int)
:param: significance_level: the significance level against which the p-value is assessed. (float)
:param: verbose: determines if progress is printed. (bool)
:return: chow_statistic: the chow statistic for which to assess the p-value. (float)
:return: p_value: the p-value associated with the chow statistic. (float)
"""
if not all(isinstance(v, int) for v in [n_one_value, n_two_value, k_value]):
raise TypeError("The 'n_one_value', 'n_two_value' and 'k_value' must be integer types.")
if not isinstance(chow_statistic, (int, float)):
raise TypeError("The 'chow_statistic' must be an integer or float type.")
p_value = float(1 - f.cdf(chow_statistic, dfn=k_value, dfd=((n_one_value + n_two_value) - 2 * k_value)))
if p_value <= significance_level and verbose:
print("Reject the null hypothesis of equality of regression coefficients in the two periods.")
elif p_value > significance_level and verbose:
print("Fail to reject the null hypothesis of equality of regression coefficients in the two periods.")
if verbose:
print("Chow Statistic: {}, P_value: {}".format(chow_statistic, p_value))
return chow_statistic, p_value
def chow_test(X_series: (pd.Series, pd.DataFrame), y_series: pd.Series, last_index: int, first_index: int,
significance: float):
"""
This function acts as the highest level of abstraction for the chow test. The function firstly checks that the
input arguments are of the correct type, followed by calculating the sum of squared residuals for the entire data
series, and the two sub-sets of data, as determined by the last_index and first_index arguments. The chow test is
then computed and assessed against the significance argument. Finally, the chow_test value and p_value are returned
from the function.
:param: X_series: the series or set of series denoting the X variable. (pd.DataFrame)
:param: y_series: the series denoting the y variable. (pd.Series)
:param: last_index: the final index value to be included before the data split. (int)
:param: first_index: the first index value to be included after the data split. (int)
:param: significance_level: the significance level against which the p-value is assessed. (float)
:return: chow_value: the chow test output value. (float)
:return: p_value: the associated p-value for the chow test. (float)
"""
if not isinstance(y_series, pd.Series):
raise TypeError("The 'y_series' argument must be a Pandas Series.")
if not isinstance(X_series, (pd.Series, pd.DataFrame)):
raise TypeError("The 'X_series' argument must be a Pandas Series or a Pandas DataFrame.")
if not all(isinstance(v, int)for v in [last_index, first_index]):
raise TypeError("The 'last_index' and 'first_index' arguments must be integer types.")
if not isinstance(significance, float):
raise TypeError("The 'significance' argument must be a float type.")
if significance not in [0.01, 0.05, 0.1]:
raise KeyError("The 'significance' argument must be 0.01, 0.05 or 0.1")
if isinstance(X_series, pd.Series):
X_series = | pd.DataFrame(X_series) | pandas.DataFrame |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"exponential": {"tau": 10},
}
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self, raw):
self._check_moment_func(np.median, name="median", raw=raw)
def test_rolling_min(self, raw):
self._check_moment_func(np.min, name="min", raw=raw)
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self, raw):
self._check_moment_func(np.max, name="max", raw=raw)
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
| pd.Series([1, 2, 3]) | pandas.Series |
from __future__ import print_function
import copy
import matplotlib.pyplot as plt
import numba as nb
import numpy as np
import pandas as pd
from astromodels import Model, PointSource
from threeML.classicMLE.goodness_of_fit import GoodnessOfFit
from threeML.classicMLE.joint_likelihood import JointLikelihood
from threeML.data_list import DataList
from threeML.io.logging import setup_logger
from threeML.io.package_data import get_path_of_data_file
from threeML.plugin_prototype import PluginPrototype
from threeML.utils.statistics.likelihood_functions import (
half_chi2, poisson_log_likelihood_ideal_bkg)
plt.style.use(str(get_path_of_data_file("threeml.mplstyle")))
log = setup_logger(__name__)
__instrument_name = "n.a."
class XYLike(PluginPrototype):
def __init__(
self,
name,
x,
y,
yerr=None,
poisson_data=False,
exposure=None,
quiet=False,
source_name=None,
):
nuisance_parameters = {}
super(XYLike, self).__init__(name, nuisance_parameters)
# Make x and y always arrays so we can handle them always in the same way
# even if they have only one element
self._x = np.array(x, ndmin=1)
self._y = np.array(y, ndmin=1)
# If there are specified errors, use those (assume Gaussian statistic)
# otherwise make sure that the user specified poisson_error = True and use
# Poisson statistic
if yerr is not None:
self._yerr = np.array(yerr, ndmin=1)
assert np.all(self._yerr > 0), "Errors cannot be negative or zero."
log.info(
"Using Gaussian statistic (equivalent to chi^2) with the provided errors."
)
self._is_poisson = False
self._has_errors = True
elif not poisson_data:
self._yerr = np.ones_like(self._y)
self._is_poisson = False
self._has_errors = False
log.info("Using unweighted Gaussian (equivalent to chi^2) statistic.")
else:
log.info("Using Poisson log-likelihood")
self._is_poisson = True
self._yerr = None
self._has_errors = True
self._y = self._y.astype(np.int64)
self._zeros = np.zeros_like(self._y)
# sets the exposure assuming eval at center
# of bin. this should probably be improved
# with a histogram plugin
if exposure is None:
self._has_exposure: bool = False
self._exposure = np.ones(len(self._x))
else:
self._has_exposure: bool = True
self._exposure = exposure
# This will keep track of the simulated datasets we generate
self._n_simulated_datasets = 0
# This will contain the JointLikelihood object after a call to .fit()
self._joint_like_obj = None
self._likelihood_model = None
# currently not used by XYLike, but needed for subclasses
self._mask = np.ones(self._x.shape, dtype=bool)
# This is the name of the source this SED refers to (if it is a SED)
self._source_name = source_name
@classmethod
def from_function(cls, name, function, x, yerr=None, exposure=None, **kwargs):
"""
Generate an XYLike plugin from an astromodels function instance
:param name: name of plugin
:param function: astromodels function instance
:param x: where to simulate
:param yerr: y errors or None for Poisson data
:param kwargs: kwargs from xylike constructor
:return: XYLike plugin
"""
y = function(x)
xyl_gen = XYLike("generator", x, y, yerr=yerr,
exposure=exposure, **kwargs)
pts = PointSource("fake", 0.0, 0.0, function)
model = Model(pts)
xyl_gen.set_model(model)
return xyl_gen.get_simulated_dataset(name)
@classmethod
def from_dataframe(
cls,
name,
dataframe,
x_column="x",
y_column="y",
err_column="yerr",
poisson=False,
):
"""
Generate a XYLike instance from a Pandas.DataFrame instance
:param name: the name for the XYLike instance
:param dataframe: the input data frame
:param x_column: name of the column to be used as x (default: 'x')
:param y_column: name of the column to be used as y (default: 'y')
:param err_column: name of the column to be used as error on y (default: 'yerr')
:param poisson: if True, then the err_column is ignored and data are treated as Poisson distributed
:return: a XYLike instance
"""
x = dataframe[x_column]
y = dataframe[y_column]
if poisson is False:
yerr = dataframe[err_column]
if np.all(yerr == -99):
# This is a dataframe generate with the to_dataframe method, which uses -99 to indicate that the
# data are Poisson
return cls(name, x=x, y=y, poisson_data=True)
else:
# A dataset with errors
return cls(name, x=x, y=y, yerr=yerr)
else:
return cls(name, x=x, y=y, poisson_data=True)
@classmethod
def from_text_file(cls, name, filename):
"""
Instance the plugin starting from a text file generated with the .to_txt() method. Note that a more general
way of creating a XYLike instance from a text file is to read the file using pandas.DataFrame.from_csv, and
then use the .from_dataframe method of the XYLike plugin:
> df = pd.DataFrame.from_csv(filename, ...)
> xyl = XYLike.from_dataframe("my instance", df)
:param name: the name for the new instance
:param filename: path to the file
:return:
"""
df = pd.read_csv(filename, sep=" ")
return cls.from_dataframe(name, df)
def to_dataframe(self):
"""
Returns a pandas.DataFrame instance with the data in the 'x', 'y', and 'yerr' column. If the data are Poisson,
the yerr column will be -99 for every entry
:return: a pandas.DataFrame instance
"""
x_series = pd.Series(self.x, name="x")
y_series = pd.Series(self.y, name="y")
if self._is_poisson:
# Since DataFrame does not support metadata, there is no way to save the information that the data
# are Poisson distributed. We use instead a value of -99 for the error, to indicate that the data
# are Poisson
yerr_series = pd.Series(np.ones_like(self.x) * (-99), name="yerr")
else:
yerr_series = | pd.Series(self.yerr, name="yerr") | pandas.Series |
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Routines for managing the output of research projects running multiple simulations.
"""
import csv
import os.path
import pandas as pd
from pathlib import Path
from pandas import DataFrame
from lindbladmpo.plot_routines import *
def generate_paths(s_output_path: str, b_make_paths=True):
"""Concatenate a data directory and figures directory path, and optionally create the directories.
Args:
s_output_path: The output path, a base directory for the data and figures directories
b_make_paths: If True the directories are created if missing.
Returns:
A tuple with the data and plot directory strings.
"""
s_data_path = ""
s_plot_path = ""
if not os.path.exists(s_output_path):
if b_make_paths:
os.mkdir(s_output_path)
else:
return s_data_path, s_plot_path
s_data_path = s_output_path + "data/"
if not os.path.exists(s_data_path):
if b_make_paths:
os.mkdir(s_data_path)
s_plot_path = s_output_path + "figures/"
if not os.path.exists(s_plot_path):
if b_make_paths:
os.mkdir(s_plot_path)
return s_data_path, s_plot_path
def save_to_db(s_db_path: str, sim_metadata: dict):
"""Save a data line into the .csv dataframe file using pandas.
Args:
s_db_path: The full filename for the .csv dataframe file.
sim_metadata: The dictionary giving one line of db data.
"""
if not os.path.isfile(s_db_path):
# New database, write header line based on metadata keys
with open(s_db_path, "w") as f:
header = sim_metadata.keys()
writer = csv.writer(f)
writer.writerow(header)
f.close()
df = pd.read_csv(s_db_path)
db_line = {}
for key in sim_metadata.keys():
db_line[key] = [sim_metadata[key]]
db_data = | pd.DataFrame(db_line) | pandas.DataFrame |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.index import Index, Int64Index
from pandas.tseries.frequencies import infer_freq, to_offset
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, datetime):
func = getattr(self, opname)
result = func(_to_m8(other))
elif isinstance(other, np.ndarray):
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if isinstance(other, timedelta):
func = getattr(self, opname)
return func(np.timedelta64(other))
else:
func = getattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeSeriesError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_groupby = lib.groupby_arrays # _wrap_i8_function(lib.groupby_int64)
_arrmap = _wrap_dt_function(_algos.arrmap_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not isinstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if isinstance(freq, basestring):
freq = to_offset(freq)
else:
if isinstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.to_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', copy=copy)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', copy=copy)
else:
subarr = tools.to_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_get_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not isinstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not isinstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_get_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', copy=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(isinstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(isinstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_to_pydatetime(self.asi8)
def __repr__(self):
from pandas.core.format import _format_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
summary = str(self.__class__)
if len(self) > 0:
first = _format_datetime64(values[0], tz=self.tz)
last = _format_datetime64(values[-1], tz=self.tz)
summary += '\n[%s, ..., %s]' % (first, last)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
summary += tagline % (len(self), freq, self.tz)
return summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if len(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif len(state) == 3:
# legacy format: daterange
offset = state[1]
if len(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if isinstance(other, Index):
return self.union(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shift(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if isinstance(other, Index):
return self.diff(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shift(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.astype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def summary(self, name=None):
if len(self) > 0:
index_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.astype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if isinstance(freq, basestring):
freq = to_offset(freq)
return Index.shift(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shift with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz, name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not isinstance(other, DatetimeIndex) and len(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = getattr(obj, 'offset', None)
self.tz = getattr(obj, 'tz', None)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif other.offset != self.offset or (not self.is_monotonic or
not other.is_monotonic):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._view_like(left_chunk)
def _partial_date_slice(self, reso, parsed):
if not self.is_monotonic:
raise TimeSeriesError('Partial indexing only valid for ordered time'
' series')
if reso == 'year':
t1 = Timestamp(datetime(parsed.year, 1, 1))
t2 = Timestamp(datetime(parsed.year, 12, 31))
elif reso == 'month':
d = lib.monthrange(parsed.year, parsed.month)[1]
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, parsed.month, d))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = lib.monthrange(parsed.year, qe)[1] # at end of month
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, qe, d))
else:
raise KeyError
stamps = self.asi8
left = stamps.searchsorted(t1.value, side='left')
right = stamps.searchsorted(t2.value, side='right')
return slice(left, right)
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
return Index.get_value(self, series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
if isinstance(key, time):
locs = self._indices_at_time(key)
return series.take(locs)
stamp = Timestamp(key)
try:
return self._engine.get_value(series, stamp)
except KeyError:
raise KeyError(stamp)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
try:
return self._get_string_slice(key)
except (TypeError, KeyError):
pass
if isinstance(key, time):
return self._indices_at_time(key)
stamp = Timestamp(key)
try:
return self._engine.get_loc(stamp)
except KeyError:
raise KeyError(stamp)
def _indices_at_time(self, key):
from dateutil.parser import parse
# TODO: time object with tzinfo?
nanos = _time_to_nanosecond(key)
indexer = lib.values_at_time(self.asi8, nanos)
return com._ensure_platform_int(indexer)
def _get_string_slice(self, key):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
asdt, parsed, reso = parse_time_string(key, freq)
key = asdt
loc = self._partial_date_slice(reso, parsed)
return loc
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if isinstance(start, basestring) or isinstance(end, basestring):
try:
if start:
start_loc = self._get_string_slice(start).start
else:
start_loc = 0
if end:
end_loc = self._get_string_slice(end).stop
else:
end_loc = len(self)
return start_loc, end_loc
except KeyError:
pass
return | Index.slice_locs(self, start, end) | pandas.core.index.Index.slice_locs |
''' functions to get movie aspects, compute similarity, get ratings to test '''
import time
import scipy
import numpy as np
import pandas as pd
from collections import Counter
import sklearn.preprocessing as pp
from multiprocessing import cpu_count
from concurrent.futures import ProcessPoolExecutor
THREADS = cpu_count() - 1
def map_aspect_values_to_movies(x):
(film, meta), aspect = x
aspects = dict()
if aspect == "director" and type(meta[aspect]) is str:
aspects[meta[aspect]] = 1
else:
for g in meta[aspect]:
aspects[g] = 1
return film, meta, aspects
def dict_movie_aspect(paper_films, aspect):
paper_films_aspect_prepended = map(lambda e: (e, aspect), list(paper_films.items()))
aspect_dict = dict()
with ProcessPoolExecutor(max_workers=THREADS) as executor:
results = executor.map(map_aspect_values_to_movies, paper_films_aspect_prepended)
for film, meta, aspects in results:
aspect_dict[film + "_" + meta["title"]] = aspects
return aspect_dict
def viewed_matrix(ratings_cold_start, all_films, data_origin):
user_ids = ratings_cold_start["userID"]
item_ids = ratings_cold_start["itemID"]
train_ratings = ratings_cold_start["rating"]
assert len(user_ids) == len(item_ids) == len(train_ratings)
movies_watched = dict()
for uid in all_films.keys():
movies_watched[uid + "_" + all_films[uid]["title"]] = dict()
for i in range(len(item_ids)):
current_user_id = user_ids[i]
current_item_id = item_ids[i]
if data_origin == 'netflix':
current_rating = int(train_ratings[i])
elif data_origin == 'small':
current_rating = float(train_ratings[i])
elif data_origin == '100k':
current_rating = int(train_ratings[i])
try:
movies_watched[current_item_id + "_" + all_films[current_item_id]["title"]][current_user_id] = current_rating
except Exception:
print('item id missing %s' % current_item_id) ## possibly the movies lacking info such as actors which are discarded
return movies_watched
def get_movies_aspect_matrix(films, aspect_type):
aspects_associated_to_movies = dict_movie_aspect(films, aspect_type)
movies_all_aspects_matrix = | pd.DataFrame.from_dict(aspects_associated_to_movies, dtype='int64', orient='index') | pandas.DataFrame.from_dict |
import datetime
from collections import OrderedDict
import warnings
import numpy as np
from numpy import array, nan
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from conftest import assert_frame_equal, assert_series_equal
from pvlib import irradiance
from conftest import requires_ephem, requires_numba
# fixtures create realistic test input data
# test input data generated at Location(32.2, -111, 'US/Arizona', 700)
# test input data is hard coded to avoid dependencies on other parts of pvlib
@pytest.fixture
def times():
# must include night values
return pd.date_range(start='20140624', freq='6H', periods=4,
tz='US/Arizona')
@pytest.fixture
def irrad_data(times):
return pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 79.73860422, 316.1949056 , 40.46149818],
[1042.48031487, 939.95469881, 118.45831879],
[ 257.20751138, 646.22886049, 62.03376265]]),
columns=['ghi', 'dni', 'dhi'], index=times)
@pytest.fixture
def ephem_data(times):
return pd.DataFrame(np.array(
[[124.0390863 , 124.0390863 , -34.0390863 , -34.0390863 ,
352.69550699, -2.36677158],
[ 82.85457044, 82.97705621, 7.14542956, 7.02294379,
66.71410338, -2.42072165],
[ 10.56413562, 10.56725766, 79.43586438, 79.43274234,
144.76567754, -2.47457321],
[ 72.41687122, 72.46903556, 17.58312878, 17.53096444,
287.04104128, -2.52831909]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
@pytest.fixture
def dni_et(times):
return np.array(
[1321.1655834833093, 1321.1655834833093, 1321.1655834833093,
1321.1655834833093])
@pytest.fixture
def relative_airmass(times):
return pd.Series([np.nan, 7.58831596, 1.01688136, 3.27930443], times)
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('testval, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, | pd.Series([value], index=dt_index) | pandas.Series |
'''
###################################
Modified from Mike's predict_acc.py
###################################
'''
import os
import sys
import random
import pickle
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from keras.models import load_model
base_path = '/home/tyt/how2ml/mfcc4'
base_data_path = os.path.join(base_path, 'data')
def getUnData():
fileX = os.path.join(base_data_path, 'X_unverified.npy')
fileY = os.path.join(base_data_path, 'y_unverified.npy')
filefname = os.path.join(base_data_path, 'fname_unverified.npy')
X_un = np.load(fileX)
y_un = np.load(fileY)
fname_un = np.load(filefname)
return X_un, y_un, fname_un
def getTestData():
fileX = os.path.join(base_data_path, 'X_test.npy')
filefname = os.path.join('./', 'fname_test.npy')
X_test = np.load(fileX)
fname_test = np.load(filefname)
return X_test, fname_test
if __name__ == '__main__':
X_test, fname_test = getTestData()
# X_un, y_un, fname_un = getUnData()
# X_all = np.concatenate((X_un, X_test))
# fname_all = np.concatenate((fname_un, fname_test))
X_all = X_test
fname_all = fname_test
for i in range(10):
base_model_path = os.path.join(base_path, 'cnn_model_152')
model_name = 'model{}'.format(i)
filename = os.path.join(base_model_path, model_name)
npy_predict = os.path.join(base_path, 'final_npy_predict_phase3_152')
if not os.path.exists(npy_predict):
os.makedirs(npy_predict)
csv_predict = os.path.join(base_path, 'final_csv_predict_phase3_152')
if not os.path.exists(csv_predict):
os.makedirs(csv_predict)
model = load_model(filename)
print('Predicting X_all...')
result = model.predict(X_all)
np.save(os.path.join(npy_predict, 'mow_mfcc4_resnet152_phase3_test_{}.npy'.format(i+1)), result)
df = | pd.DataFrame(result) | pandas.DataFrame |
import os
from common.score import scorePredict
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from simpletransformers.classification.classification_model import ClassificationModel
def train_predict_model(df_train, df_test, is_predict, use_cuda):
labels_test = | pd.Series(df_test['labels']) | pandas.Series |
import pandas as pd
import numpy as np
from MachineLearning.project.generate_dataset import project_dummify
def pre_processor(df, drop_duration=False):
def func(x):
if x < 30:
return 'below30'
elif 30 <= x <= 65:
return '30to65'
elif x > 65:
return 'over65'
df_age_raw = df[['age']]
df_age = df_age_raw.applymap(lambda x: func(x))
df_obj = df[['job', 'marital', 'education', 'default',
'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome']]
concat = pd.concat([df_age, df_obj], axis=1)
df_dummies = pd.get_dummies(concat)
if drop_duration is True:
df_num = df[['duration', 'campaign', 'pdays', 'previous',
'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed']]
else:
df_num = df[['campaign', 'pdays', 'previous',
'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed']]
df_norm = df_num.apply(lambda x: (x - np.mean(x)) / (np.max(x) - np.min(x)))
df_label = df['y'].apply(lambda x: 1 if x == 'yes' else 0)
# df_out = pd.concat([df_dummies, df_norm, df_label], axis=1)
df_out = pd.concat([df_dummies, df_label], axis=1)
return df_out
reader_train = | pd.read_csv('C:/bank/data_set/origin_data_sets/bank-additional-full.csv', sep=";") | pandas.read_csv |
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import requests
import numpy as np
from EAR_calculator import *
from imutils import face_utils
from imutils.video import VideoStream
import imutils
import dlib
import time
import argparse
import cv2
import pandas as pd
import csv
from playsound import playsound
from scipy.spatial import distance as dist
import os
from datetime import datetime
# Creating the dataset
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
#all eye and mouth aspect ratio with time
ear_list=[]
total_ear=[]
mar_list=[]
total_mar=[]
ts=[]
total_ts=[]
url = "http://192.168.0.100:8080/shot.jpg"
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape_predictor", required = True, help = "path to dlib's facial landmark predictor")
ap.add_argument("-r", "--picamera", type = int, default = -1, help = "whether raspberry pi camera shall be used or not")
args = vars(ap.parse_args())
# Declare a constant which will work as the threshold for EAR value, below which it will be regared as a blink
EAR_THRESHOLD = 0.3
# Declare another costant to hold the consecutive number of frames to consider for a blink
CONSECUTIVE_FRAMES = 15
# Another constant which will work as a threshold for MAR value
MAR_THRESHOLD = 14
# Initialize two counters
BLINK_COUNT = 0
FRAME_COUNT = 0
# Now, intialize the dlib's face detector model as 'detector' and the landmark predictor model as 'predictor'
print("[INFO]Loading the predictor.....")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# Grab the indexes of the facial landamarks for the left and right eye respectively
(lstart, lend) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rstart, rend) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mstart, mend) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
# Now start the video stream and allow the camera to warm-up
print("[INFO]Loading Camera.....")
time.sleep(2)
assure_path_exists("dataset_phonecam/")
count_sleep = 0
count_yawn = 0
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = []
ys = []
while True:
img_resp = requests.get(url)
img_arr = np.array(bytearray(img_resp.content), dtype = np.uint8)
frame = cv2.imdecode(img_arr, -1)
frame = imutils.resize(frame, width = 875)
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
cv2.putText(frame, "PRESS 'q' TO EXIT", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 3)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces
rects = detector(frame, 1)
# Now loop over all the face detections and apply the predictor
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
# Convert it to a (68, 2) size numpy array
shape = face_utils.shape_to_np(shape)
# Draw a rectangle over the detected face
(x, y, w, h) = face_utils.rect_to_bb(rect)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Put a number
cv2.putText(frame, "Driver", (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
leftEye = shape[lstart:lend]
rightEye = shape[rstart:rend]
mouth = shape[mstart:mend]
# Compute the EAR for both the eyes
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# Take the average of both the EAR
EAR = (leftEAR + rightEAR) / 2.0
#live datawrite in csv
ear_list.append(EAR)
ts.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
# Compute the convex hull for both the eyes and then visualize it
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
# Draw the contours
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [mouth], -1, (0, 255, 0), 1)
MAR = mouth_aspect_ratio(mouth)
mar_list.append(MAR/10)
# Check if EAR < EAR_THRESHOLD, if so then it indicates that a blink is taking place
# Thus, count the number of frames for which the eye remains closed
if EAR < EAR_THRESHOLD:
FRAME_COUNT += 1
cv2.drawContours(frame, [leftEyeHull], -1, (0, 0, 255), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 0, 255), 1)
if FRAME_COUNT >= CONSECUTIVE_FRAMES:
count_sleep += 1
# Add the frame to the dataset ar a proof of drowsy driving
cv2.imwrite("dataset_phonecam/frame_sleep%d.jpg" % count_sleep, frame)
playsound('sound files/alarm.mp3')
cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
if FRAME_COUNT >= CONSECUTIVE_FRAMES:
playsound('sound files/warning.mp3')
FRAME_COUNT = 0
# Check if the person is yawning
if MAR > MAR_THRESHOLD:
count_yawn += 1
cv2.drawContours(frame, [mouth], -1, (0, 0, 255), 1)
cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imwrite("dataset_phonecam/frame_yawn%d.jpg" % count_yawn, frame)
playsound('sound files/alarm.mp3')
playsound('sound files/warning_yawn.mp3')
#total data collection for plotting
for i in ear_list:
total_ear.append(i)
for i in mar_list:
total_mar.append(i)
for i in ts:
total_ts.append(i)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
a = total_ear
b=total_mar
c = total_ts
df = pd.DataFrame({"EAR" : a, "MAR":b,"TIME" : c})
df.to_csv("op_phonecam.csv", index=False)
df= | pd.read_csv("op_phonecam.csv") | pandas.read_csv |
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--model_dir", type=str, default="./model/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="virginica")
args = parser.parse_args()
test_dataset = None
if os.path.exists(os.path.join(args.data_dir,'test.csv')):
test_dataset = os.path.join(args.data_dir,'test.csv')
elif os.path.exists(os.path.join(args.data_dir,'val.csv')):
test_dataset = os.path.join(args.data_dir,'val.csv')
elif os.path.exists(os.path.join(args.data_dir,'train.csv')):
test_dataset = os.path.join(args.data_dir,'train.csv')
else:
print("ERROR:test file invalid!")
exit()
test_data = | pd.read_csv(test_dataset) | pandas.read_csv |
import os
import keras
import keras.backend as backend
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.callbacks import CSVLogger, History
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import normalize, LabelEncoder, label_binarize
"""
Created by <NAME> on 8/1/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
n_epochs = 300
batch_size = 32
def create_regressor(n_features, layers, n_outputs, optimizer=None):
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
model = Model(inputs=input_layer, outputs=dense)
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
model.compile(optimizer=optimizer, loss=["mse"], metrics=["mae"])
return model
def random_classifier(drug_name=None, prediction_class=None):
accuracies = {}
data_directory = '../Data/CCLE/Classification/FS/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv") and not (
compound.__contains__("PLX4720") or compound.__contains__("Panobinostat")):
name = compound.split(".")[0]
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
n_samples = x_data.shape[0]
if prediction_class is None:
y_pred = np.random.random_integers(low=0, high=1, size=(n_samples, 1))
else:
if prediction_class == 1:
y_pred = np.ones(shape=[n_samples, 1])
else:
y_pred = np.zeros(shape=[n_samples, 1])
accuracies[name] = accuracy_score(y_data, y_pred)
print("%s's Accuracy\t:\t%.4f%%" % (compound.split(".")[0], 100 * accuracy_score(y_data, y_pred)))
log_path = "../Results/Classification/ML/"
log_name = "Random" + "-" + str(prediction_class) + ".csv" if prediction_class is not None else "Random.csv"
accuracies = pd.DataFrame(accuracies, index=[0])
accuracies.to_csv(log_path + log_name)
def create_SAE(n_features=50000, n_code=12):
input_layer = Input(shape=(n_features,))
dense = Dense(2048, activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.2)(dense)
dense = Dense(1024, activation='relu', name="dense_1")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(256, activation='relu', name="dense_2")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(64, activation='relu', name="dense_3")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
encoded = Dense(n_code, activation='relu', name="encoded")(dense)
dense = Dense(512, activation="relu", name="dense_4")(encoded)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
decoded = Dense(n_features, activation='sigmoid', name="decoded")(dense)
cl_output = Dense(2, activation="softmax", name="classifier")(encoded)
model = Model(inputs=input_layer, outputs=[decoded, cl_output])
model.summary()
lambda_value = 9.5581e-3
def contractive_loss(y_pred, y_true):
mse = backend.mean(backend.square(y_true - y_pred), axis=1)
w = backend.variable(value=model.get_layer('encoded').get_weights()[0]) # N inputs N_hidden
w = backend.transpose(w) # N_hidden inputs N
h = model.get_layer('encoded').output
dh = h * (1 - h) # N_batch inputs N_hidden
# N_batch inputs N_hidden * N_hidden inputs 1 = N_batch inputs 1
contractive = lambda_value * backend.sum(dh ** 2 * backend.sum(w ** 2, axis=1), axis=1)
return mse + contractive
reconstructor_loss = contractive_loss
classifier_loss = "categorical_crossentropy"
optimizer = keras.optimizers.Nadam(lr=0.005, beta_1=0.95)
model.compile(optimizer=optimizer, loss=[reconstructor_loss, classifier_loss],
loss_weights=[0.005, 0.005],
metrics={"decoded": ["mae", "mse", "mape"], "classifier": "acc"})
return model
def create_classifier(n_features=51, layers=None, n_outputs=1):
if layers is None:
layers = [1024, 256, 64, 16, 4]
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
optimizer = keras.optimizers.adamax()
if n_outputs > 1:
dense = Dense(n_outputs, activation='softmax', name="output")(dense)
loss = keras.losses.categorical_crossentropy
else:
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
loss = keras.losses.binary_crossentropy
model = Model(inputs=input_layer, outputs=dense)
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
return model
def load_data(data_path="../Data/CCLE/drug_response.csv", feature_selection=False):
if data_path.__contains__("/FS/"):
data = pd.read_csv(data_path)
else:
data = pd.read_csv(data_path, index_col="Cell Line")
if data_path.__contains__("Regression"):
y_data = data['IC50 (uM)']
x_data = data.drop(['IC50 (uM)'], axis=1)
else:
y_data = data['class']
x_data = data.drop(['class'], axis=1)
label_encoder = LabelEncoder()
y_data = label_encoder.fit_transform(y_data)
y_data = np.reshape(y_data, (-1, 1))
y_data = keras.utils.to_categorical(y_data, 2)
if feature_selection and not data_path.__contains__("/FS/"):
feature_names = list(pd.read_csv("../Data/BestFeatures.csv", header=None).loc[0, :])
x_data = data[feature_names]
return np.array(x_data), np.array(y_data)
def produce_classification_data(compounds):
for compound in compounds:
name = compound.split(".")[0]
print(compound, end="\t")
data = pd.read_csv("../Data/CCLE/Regression/" + name + "_preprocessed.csv")
data['class'] = np.nan
data.loc[data['IC50 (uM)'] >= 8, 'class'] = 1 # resistant
data.loc[data['IC50 (uM)'] < 8] = 0 # sensitive
data.dropna(how='any', axis=0, inplace=True)
data.drop(["IC50 (uM)"], axis=1, inplace=True)
data.to_csv("../Data/CCLE/Classification/" + name + ".csv", index_label="Cell Line")
print("Finished!")
def normalize_data(x_data, y_data=None):
x_data = pd.DataFrame(normalize(np.array(x_data), axis=0, norm='max')).values
if y_data is not None:
y_data = pd.DataFrame(np.reshape(np.array(y_data), (-1, 1)))
y_data = pd.DataFrame(normalize(np.array(y_data), axis=0, norm='max'))
return np.array(x_data), np.array(y_data)
return np.array(x_data)
def regressor(drug_name=None):
data_directory = '../Data/CCLE/Regression/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith("_preprocessed.csv"):
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized!")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.15, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
# for optimizer in optimizers:
model = create_regressor(x_train.shape[1], [1024, 256, 64, 4], 1, None)
logger_path = '../Results/Regression/' + compound.split(".")[0] + ".log"
csv_logger = CSVLogger(logger_path)
model.summary()
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
result = pd.read_csv(logger_path, delimiter=',')
plt.figure(figsize=(15, 10))
plt.plot(result['epoch'], result["loss"], label="Training Loss")
plt.plot(result['epoch'], result["val_loss"], label="Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("MSE Loss")
plt.xticks([i for i in range(0, n_epochs + 5, 5)])
plt.yticks(np.arange(0.25, -0.05, -0.05).tolist())
plt.title(compound.split(".")[0])
plt.grid()
plt.savefig("../Results/Regression/images/%s.png" % compound.split(".")[0])
plt.close("all")
model.save("../Results/Regression/%s.h5" % compound.split(".")[0])
def regressor_with_different_optimizers():
data_path = "../Data/CCLE/Regression/ZD-6474_preprocessed.csv"
optimizers = [
keras.optimizers.SGD(lr=0.1, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.Adagrad(lr=0.01, decay=1e-6),
keras.optimizers.Adadelta(lr=1.0, rho=0.95, decay=1e-6),
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.99, decay=1e-6),
keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999)
]
print("Loading Data...")
x_data, y_data = load_data(data_path, feature_selection=True)
print("Data has been Loaded.")
print("Normalizing Data...")
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized.")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
n_features = x_train.shape[1]
layers = [1024, 256, 64, 8]
n_outputs = 1
for idx, optimizer in enumerate(optimizers):
model = create_regressor(n_features, layers, n_outputs, optimizer)
logger_path = "../Results/Optimizers/"
optimizer_name = str(optimizer.__class__).split(".")[-1].split("\'")[0] + "_"
optimizer_name += '_'.join(
["%s_%.4f" % (key, value) for (key, value) in optimizer.get_config().items()])
optimizer_name += '.log'
csv_logger = CSVLogger(logger_path + optimizer_name)
model.summary()
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
def regressor_with_k_best_features(k=50):
data_directory = '../Data/CCLE/'
compounds = os.listdir(data_directory)
feature_names = list(pd.read_csv("../Data/BestFeatures.csv", header=None).loc[0, :])
for compound in compounds:
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound)
print("Data has been Loaded!")
x_data = x_data[feature_names]
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized!")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
for k in [50, 40, 30, 20, 10, 5, 4, 3, 2, 1]:
model = create_regressor(x_train.shape[1], [32, 16, 4], 1)
dir_name = "../Results/Drugs/%s/%dFeaturesSelection" % (compound.split(".")[0], k)
os.makedirs(dir_name)
csv_logger = CSVLogger(dir_name + '/best_%s_%d.log' % (compound.split(".")[0], k))
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
import csv
with open("../Results/Drugs/%s/%s.csv" % (compound.split(".")[0], compound.split(".")[0]), 'a') as file:
writer = csv.writer(file)
loss = model.evaluate(x_test.as_matrix(), y_test.as_matrix(), verbose=0)
loss.insert(0, k)
writer.writerow(loss)
df = pd.read_csv("../Results/Drugs/%s/%s.csv" % (compound.split(".")[0], compound.split(".")[0]), header=None)
plt.figure()
plt.plot(df[0], df[1], "-o")
plt.xlabel("# of Features")
plt.ylabel("Mean Absolute Error")
plt.title(compound.split(".")[0])
plt.savefig("../Results/Drugs/%s/%s.png" % (compound.split(".")[0], compound.split(".")[0]))
def classifier(drug_name=None):
data_directory = '../Data/CCLE/Classification/FS/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv"):
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.05, shuffle=True)
# print("x_train shape\t:\t" + str(x_train.shape))
# print("y_train shape\t:\t" + str(y_train.shape))
# print("x_test shape\t:\t" + str(x_test.shape))
# print("y_test shape\t:\t" + str(y_test.shape))
logger_path = "../Results/Classification/CV/"
# plt.figure(figsize=(15, 10))
# plt.title(compound.split(".")[0])
model = None
for k in range(10, 15, 5):
model = KerasClassifier(build_fn=create_classifier,
epochs=500,
batch_size=64,
verbose=2,
)
# y_data = encode_labels(y_data, 2)
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.25)
model.fit(x_train, y_train, validation_data=(x_test, y_test))
print(x_test.shape)
print(y_test.shape)
y_pred = model.predict(x_test)
y_pred = np.reshape(y_pred, (-1, 1))
y_test = np.reshape(y_test, (-1, 1))
print("Accuracy: %.4f %%" % (accuracy_score(y_test, y_pred) * 100))
print(y_pred.shape)
print(y_test.shape)
n_classes = y_pred.shape[1]
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
plt.close("all")
plt.figure()
lw = 2
plt.plot(fpr[0], tpr[0], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for %s' % compound.split(".")[0])
plt.legend(loc="lower right")
# plt.show()
plt.savefig("../Results/Classification/ML/ROC/Deep Learning/%s.pdf" % (compound.split(".")[0]))
# log_name = "Stratified %s-%d-cv.csv" % (compound.split(".")[0], k)
# for x_train_cv, x_validation, y_train_cv, y_validation in stratified_kfold(x_data, y_data, k=k):
# label_encoder = LabelEncoder()
# y_train_cv = label_encoder.fit_transform(y_train_cv)
# y_train_cv = np.reshape(y_train_cv, (-1, 1))
# y_train_cv = keras.utils.to_categorical(y_train_cv, 2)
#
# y_validation = label_encoder.transform(y_validation)
# y_validation = np.reshape(y_validation, (-1, 1))
# y_validation = keras.utils.to_categorical(y_validation, 2)
# model.fit(x=x_train_cv,
# y=y_train_cv,
# batch_size=batch_size,
# epochs=n_epochs,
# validation_data=(x_validation, y_validation),
# verbose=0,
# shuffle=True)
# score = model.evaluate(x_validation, y_validation, verbose=0)
# print("Stratified %d-fold %s %s: %.2f%%" % (
# k, compound.split(".")[0], model.metrics_names[1], score[1] * 100))
# cross_validation_scores.append(score[1] * 100)
# model.save(filepath="../Results/Classification/%s.h5" % compound.split(".")[0])
# np.savetxt(fname=logger_path + log_name, X=np.array(cross_validation_scores), delimiter=',')
# plt.plot(cross_validation_scores, label="%d-fold cross validation")
# result = pd.read_csv(logger_path, delimiter=',')
# plt.xlabel("Folds")
# plt.ylabel("Accuracy")
# plt.xticks([i for i in range(0, n_epochs + 5, 5)], rotation=90)
# plt.yticks(np.arange(0, 1.05, 0.05).tolist())
# plt.title(compound.split(".")[0])
# plt.grid()
# plt.legend(loc="upper right")
# plt.savefig("../Results/Classification/images/%s.png" % compound.split(".")[0])
# plt.close("all")
print("Finished!")
def encode_labels(y_data, n_classes=2):
label_encoder = LabelEncoder()
y_data = label_encoder.fit_transform(y_data)
y_data = np.reshape(y_data, (-1, 1))
y_data = keras.utils.to_categorical(y_data, n_classes)
return y_data
def plot_results(path="../Results/Classification/"):
logs = os.listdir(path)
print(logs)
for log in logs:
if os.path.isfile(path + log) and log.endswith(".log"):
result = pd.read_csv(path + log, delimiter=',')
plt.figure(figsize=(15, 10))
plt.plot(result['epoch'], result["val_acc"])
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title(log.split(".")[0])
plt.savefig("../Results/Classification/images/%s.png" % log.split(".")[0])
plt.close("all")
def plot_roc_curve(path="../Results/Classification/"):
models = os.listdir(path)
models = [models[i] for i in range(len(models)) if models[i].endswith(".h5")]
for model in models:
drug_name = model.split(".")[0]
# print(drug_name + "\t:\t", end="")
model = keras.models.load_model(path + model)
x_data, y_data = load_data(data_path='../Data/CCLE/Classification/' + drug_name + ".csv",
feature_selection=True)
y_pred = model.predict(x_data.as_matrix())
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(2):
fpr[i], tpr[i], _ = roc_curve(y_data[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
plt.figure()
lw = 2
plt.plot(fpr[0], tpr[0], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
def machine_learning_classifiers(drug_name=None, alg_name="SVM"):
data_directory = '../Data/CCLE/Classification/FS/'
if not drug_name:
compounds = os.listdir(data_directory)
else:
compounds = [drug_name + ".csv"]
log_path = "../Results/Classification/ML/"
accuracies = {}
for k in range(5, 15, 5):
log_name = alg_name + "-Stratified-%d-cv.csv" % k
for compound in compounds:
if compound.endswith(".csv") and not (
compound.__contains__("PLX4720") or compound.__contains__("Panobinostat")):
name = compound.split(".")[0]
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
accuracies[name] = 0.0
ml_classifier = OneVsRestClassifier(svm.SVC(C=1.0, kernel='rbf', probability=True))
if alg_name == "SVM":
ml_classifier = OneVsRestClassifier(svm.SVC(C=1.0, kernel='rbf', probability=True))
elif alg_name == "RandomForest":
ml_classifier = OneVsRestClassifier(RandomForestClassifier())
elif alg_name == "GradientBoosting":
ml_classifier = OneVsRestClassifier(GradientBoostingClassifier(learning_rate=0.01))
y_data = label_binarize(y_data, classes=[0, 1])
print(y_data.shape)
n_classes = y_data.shape[1]
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.25)
for x_train_cv, x_validation, y_train_cv, y_validation in stratified_kfold(x_data, y_data, k=k):
ml_classifier = ml_classifier.fit(x_train_cv, y_train_cv)
y_pred = ml_classifier.predict(x_validation)
accuracies[name] += accuracy_score(y_validation, y_pred)
print(name, k, accuracy_score(y_validation, y_pred) * 100)
# ml_classifier.fit(x_train, y_train)
# y_pred = ml_classifier.predict(x_test)
# y_pred = np.reshape(y_pred, (-1, 1))
# target_names = ["class_0: Resistant", "class_1: Sensitive"]
# string = classification_report(y_test, y_pred, target_names=target_names)
# with open("../Results/Classification/ML/Classification Report/%s/%s.txt" % (alg_name, name), "w") as f:
# f.write(string)
# fpr = dict()
# tpr = dict()
# roc_auc = dict()
# for i in range(n_classes):
# fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
# roc_auc[i] = auc(fpr[i], tpr[i])
#
# # Compute micro-average ROC curve and ROC area
# fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
# roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# plt.close("all")
# plt.figure()
# lw = 2
# plt.plot(fpr[0], tpr[0], color='darkorange',
# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])
# plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic for %s' % name)
# plt.legend(loc="lower right")
# plt.show()
# plt.savefig("../Results/Classification/ML/ROC/%s/%s.pdf" % (alg_name, name))
accuracies[name] /= k
print("Mean Accuracy = %.4f" % accuracies[name])
results = pd.DataFrame(data=accuracies, index=[0])
results.to_csv(log_path + log_name)
print("Finished!")
def kfold_cross_validation(x_data, y_data, k=10):
kf = KFold(n_splits=k, shuffle=True)
for train_idx, test_idx in kf.split(x_data):
train_idx = list(train_idx)
test_idx = list(test_idx)
x_train, x_test = x_data[train_idx], x_data[test_idx]
y_train, y_test = y_data[train_idx], y_data[test_idx]
yield x_train, x_test, y_train, y_test
data_directory = '../Data/CCLE/Classification/'
compounds = os.listdir(data_directory)
log_path = "../Results/Classification/ML/"
def stratified_kfold(x_data, y_data, k=10):
skfold = StratifiedKFold(n_splits=k, shuffle=True)
for train_idx, test_idx in skfold.split(x_data, y_data):
train_idx = list(train_idx)
test_idx = list(test_idx)
x_train, x_test = x_data[train_idx], x_data[test_idx]
y_train, y_test = y_data[train_idx], y_data[test_idx]
yield x_train, x_test, y_train, y_test
def generate_small_datas():
data_directory = '../Data/CCLE/Classification/'
compounds = ["AEW541.csv"]
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv"):
print(compound, end="\t")
x_data, y_data = load_data(data_directory + compound, feature_selection=True)
data = | pd.DataFrame(x_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 20:47:09 2020
@author: <NAME>
"""
from extraction import log_replayer as rpl
from extraction import interarrival_definition as arr
from extraction import gateways_probabilities as gt
from extraction import role_discovery as rl
from extraction import schedule_tables as sch
from extraction import tasks_evaluator as te
import pandas as pd
class ParameterMiner():
"""
This class extracts all the BPS parameters
"""
def __init__(self, log, bpmn, process_graph, settings):
"""constructor"""
self.log = log
self.bpmn = bpmn
self.process_graph = process_graph
self.settings = settings
self.process_stats = list()
self.parameters = dict()
self.conformant_traces = list()
self.resource_table = | pd.DataFrame() | pandas.DataFrame |
from io import StringIO
import os
import pandas as pd
from pathlib import Path
import PySimpleGUI as sg
import subprocess
import sys
# Constants
encoding = 'CP1250'
file_types = (("(Sparda) CSV", "*.csv"),)
# Variables
data_dir_path = None
account_txs_files = None
def read_sparda_tx_csv(filename):
lines = None
with open(filename, 'r', encoding=encoding) as f:
lines = f.readlines()
account_name = f'sparda-blz{lines[4].split(";")[1].strip()}-konto{lines[5].split(";")[1].strip()}'
csv_text = ''.join(lines[15:-3])
get_saldo = lambda s: float(s.split(';')[-2].replace('.', '').replace(',','.')) * -1 if s.strip().endswith('S') else 1
anfangssaldo = get_saldo(lines[-2])
endsaldo = get_saldo(lines[-1])
df = pd.read_csv(StringIO(csv_text), sep=';', decimal=',', thousands='.')
values = df['Umsatz'] * df['Soll/Haben'].map(lambda s: -1 if s == 'S' else 1)
df['Umsatz'] = values
df.drop('Soll/Haben', axis=1, inplace=True)
saldo = [0] * len(values)
saldo[0] = anfangssaldo + values[0]
for i in range(1, len(values)):
saldo[i] = saldo[i-1] + values[i]
assert(abs(endsaldo - saldo[-1]) < 0.01)
df['Saldo'] = saldo
return account_name, df
def join_unique_and_sort_dfs(dfs):
df = pd.concat(dfs)
df.drop_duplicates(inplace=True)
df.reset_index(drop=True, inplace=True)
df.sort_values(by=['Buchungstag', 'Valuta'], inplace=True)
return df
def format_date_in_place_DE(df, column_names):
for column_name in column_names:
df[column_name] = pd.to_datetime(df[column_name].astype(str), format='%d%m%Y')
df[column_name] = df[column_name].dt.strftime('%d.%m.%Y')
def import_txs(main_window, file):
try:
account_name, df_b = read_sparda_tx_csv(file)
except:
main_window.ding()
sg.Popup('Fehler beim Lesen der Datei')
return None
# check if there is a file with the name of account_name in data_dir_path
if account_name in account_txs_files:
df_a = pd.read_csv(data_dir_path / (account_name + '.csv'), sep=';', decimal=',', thousands='.', encoding=encoding)
try:
df_b = join_unique_and_sort_dfs([df_a, df_b])
except:
main_window.ding()
sg.Popup('Fehler beim Zusammenführen der Dateien')
return None
df_b.to_csv(data_dir_path / (account_name + '.csv'), sep=';', decimal=',', index=False, encoding=encoding)
sg.Popup('Datei wurde importiert')
return account_name
def export_txs(main_window, file):
layout = [
[sg.Input(), sg.FileSaveAs(button_text='Speicherort auswählen', file_types=file_types, initial_folder=os.path.expanduser('~'))],
[sg.OK(), sg.Cancel(button_text='Abbrechen')],
]
window = sg.Window(f'{file} exportieren', layout)
event, values = window.read()
window.close()
if event == sg.WIN_CLOSED or event == 'Abbrechen':
return
file_out = values[0].strip()
df_out = | pd.read_csv(data_dir_path / (file + '.csv'), sep=';', decimal=',', thousands='.', encoding=encoding) | pandas.read_csv |
# Variables
base_list = 'List_1' # this is the base list, each item in this list is checked for a match in the other
list_2 = 'List_2' # List_2 is the name of the list in the excel file
xlfile = 'DATA_IN.xlsx'
# Importing Libs
import pandas as pd
import numpy as np
# Smart Stuff
df_0 = | pd.read_excel(xlfile, dtype=str) | pandas.read_excel |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = | DataFrame(vals, index=rng) | pandas.DataFrame |
#!/usr/bin/env python
import numpy as np
import sklearn.metrics
import sklearn.metrics.pairwise
from scipy.stats.stats import pearsonr
from sklearn.metrics.cluster import normalized_mutual_info_score
import matplotlib.pyplot as plt; plt.rcdefaults()
import matplotlib.pyplot as plt
from sklearn.preprocessing import KBinsDiscretizer
from opt_gaussian import *
import pandas as pd
import numpy as np
import ppscore as pps
# compute normalized HSIC between X,Y
# if sigma_type = mpd, it uses median of pairwise distance
# if sigma_type = opt, it uses optimal
def ℍ(X,Y, X_kernel='Gaussian', Y_kernel='Gaussian', sigma_type='opt'):
def get_γ(X,Y, sigma_type):
if sigma_type == 'mpd':
σ = np.median(sklearn.metrics.pairwise_distances(X)) # find a σ via optimum
else:
optimizer = opt_gaussian(X,Y, Y_kernel=Y_kernel)
optimizer.minimize_H()
σ = optimizer.result.x[0]
if σ < 0.01: σ = 0.05 # ensure that σ is not too low
γ = 1.0/(2*σ*σ)
return γ
if len(X.shape) == 1: X = np.reshape(X, (X.size, 1))
if len(Y.shape) == 1: Y = np.reshape(Y, (Y.size, 1))
n = X.shape[0]
if X_kernel == 'linear': Kᵪ = X.dot(X.T)
if Y_kernel == 'linear': Kᵧ = Y.dot(Y.T)
if X_kernel == 'Gaussian':
γ = get_γ(X,Y, sigma_type)
Kᵪ = sklearn.metrics.pairwise.rbf_kernel(X, gamma=γ)
if Y_kernel == 'Gaussian':
γ = get_γ(X, Y, sigma_type)
Kᵧ = sklearn.metrics.pairwise.rbf_kernel(Y, gamma=γ)
#np.fill_diagonal(Kᵪ, 0)
#np.fill_diagonal(Kᵧ, 0)
HKᵪ = Kᵪ - np.mean(Kᵪ, axis=0) # equivalent to HKᵪ = H.dot(Kᵪ)
HKᵧ = Kᵧ - np.mean(Kᵧ, axis=0) # equivalent to HKᵧ = H.dot(Kᵧ)
Hᵪᵧ= np.sum(HKᵪ*HKᵧ)
Hᵪ = np.linalg.norm(HKᵪ) # equivalent to np.sqrt(np.sum(KᵪH*KᵪH))
Hᵧ = np.linalg.norm(HKᵧ) # equivalent to np.sqrt(np.sum(KᵧH*KᵧH))
H = Hᵪᵧ/( Hᵪ * Hᵧ )
return H
def double_center(Ψ):
HΨ = Ψ - np.mean(Ψ, axis=0) # equivalent to Γ = Ⲏ.dot(Kᵧ).dot(Ⲏ)
HΨH = (HΨ.T - np.mean(HΨ.T, axis=0)).T
return HΨH
if __name__ == '__main__':
n = 300
# Perfect Linear Data
dat = np.random.rand(n,1)
plinear_data = np.hstack((dat,dat)) + 1
df = pd.DataFrame(data=plinear_data, columns=["x", "y"])
enc = KBinsDiscretizer(n_bins=10, encode='ordinal')
XP_data_nmi = np.squeeze(enc.fit_transform(np.atleast_2d(plinear_data[:,0]).T))
enc = KBinsDiscretizer(n_bins=10, encode='ordinal')
YP_data_nmi = np.squeeze(enc.fit_transform(np.atleast_2d(plinear_data[:,1]).T))
plinear_pc = np.round(pearsonr(plinear_data[:,0], plinear_data[:,1])[0], 2)
plinear_nmi = np.round(normalized_mutual_info_score(XP_data_nmi, YP_data_nmi),2)
plinear_hsic = np.round(ℍ(plinear_data[:,0], plinear_data[:,1]),2)
plinear_pps = np.round(pps.score(df, "x", "y")['ppscore'],2)
print('Linear Relationship:')
print('\tCorrelation : ', plinear_pc)
print('\tNMI : ', plinear_nmi)
print('\tpps : ', plinear_pps)
print('\tHSIC : ', plinear_hsic)
# Linear Data
dat = np.random.rand(n,1)
linear_data = np.hstack((dat,dat)) + 0.04*np.random.randn(n,2)
df = pd.DataFrame(data=linear_data, columns=["x", "y"])
enc = KBinsDiscretizer(n_bins=10, encode='ordinal')
XL_data_nmi = np.squeeze(enc.fit_transform(np.atleast_2d(linear_data[:,0]).T))
enc = KBinsDiscretizer(n_bins=10, encode='ordinal')
YL_data_nmi = np.squeeze(enc.fit_transform(np.atleast_2d(linear_data[:,1]).T))
linear_pc = np.round(pearsonr(linear_data[:,0], linear_data[:,1])[0], 2)
linear_nmi = np.round(normalized_mutual_info_score(XL_data_nmi, YL_data_nmi),2)
linear_hsic = np.round(ℍ(linear_data[:,0], linear_data[:,1]),2)
linear_pps = np.round(pps.score(df, "x", "y")['ppscore'],2)
print('Linear Relationship:')
print('\tCorrelation : ', linear_pc)
print('\tNMI : ', linear_nmi)
print('\tpps : ', linear_pps)
print('\tHSIC : ', linear_hsic)
# Sine Data
dat_x = 9.3*np.random.rand(n,1)
dat_y = np.sin(dat_x)
sine_data = np.hstack((dat_x,dat_y)) + 0.06*np.random.randn(n,2)
df = | pd.DataFrame(data=sine_data, columns=["x", "y"]) | pandas.DataFrame |
#############################################################################
# Copyright (C) 2020-2021 German Aerospace Center (DLR-SC)
#
# Authors:
#
# Contact: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import unittest
from pyfakefs import fake_filesystem_unittest
import numpy as np
import io
from datetime import date, timedelta
import os
import pandas as pd
from memilio.epidata import getRKIDatawithEstimations as grdwd
from memilio.epidata import getDataIntoPandasDataFrame as gd
from memilio.epidata import defaultDict as dd
from unittest.mock import patch, call
class TestGetRKIDatawithEstimations(fake_filesystem_unittest.TestCase):
path = '/home/RKIEstimationData'
# Notice data is not realistic
str_whole_country_Germany_jh = \
("""[{"CountryRegion":"Germany","Date":"2020-01-22","Confirmed":0,"Recovered":0.0,"Deaths":0.0},\
{"CountryRegion":"Germany","Date":"2020-01-23","Confirmed":0,"Recovered":0.0,"Deaths":0.0},\
{"CountryRegion":"Germany","Date":"2020-01-24","Confirmed":0,"Recovered":0.0,"Deaths":0.0},\
{"CountryRegion":"Germany","Date":"2020-01-25","Confirmed":0,"Recovered":0.0,"Deaths":0.0},\
{"CountryRegion":"Germany","Date":"2020-01-26","Confirmed":0,"Recovered":1.0,"Deaths":1.0},\
{"CountryRegion":"Germany","Date":"2020-01-27","Confirmed":5,"Recovered":2.0,"Deaths":1.0},\
{"CountryRegion":"Germany","Date":"2020-01-28","Confirmed":6,"Recovered":3.0,"Deaths":1.0},\
{"CountryRegion":"Germany","Date":"2020-01-29","Confirmed":7,"Recovered":3.0,"Deaths":1.0},\
{"CountryRegion":"Germany","Date":"2020-01-30","Confirmed":8,"Recovered":3.0,"Deaths":1.0},\
{"CountryRegion":"Germany","Date":"2020-01-31","Confirmed":9,"Recovered":3.0,"Deaths":2.0}]""")
str_all_germany_rki = ("""[{"Date":1577836800000,"Confirmed":21,"Deaths":0,"Recovered":21},
{"Date":1577923200000,"Confirmed":23,"Deaths":0,"Recovered":23},
{"Date":1578009600000,"Confirmed":28,"Deaths":0,"Recovered":28},
{"Date":1578096000000,"Confirmed":32,"Deaths":0,"Recovered":32},
{"Date":1578268800000,"Confirmed":33,"Deaths":0,"Recovered":33},
{"Date":1578441600000,"Confirmed":34,"Deaths":0,"Recovered":34},
{"Date":1578528000000,"Confirmed":35,"Deaths":0,"Recovered":35},
{"Date":1578614400000,"Confirmed":37,"Deaths":0,"Recovered":37},
{"Date":1578700800000,"Confirmed":40,"Deaths":0,"Recovered":40},
{"Date":1578873600000,"Confirmed":41,"Deaths":0,"Recovered":41},
{"Date":1578960000000,"Confirmed":42,"Deaths":0,"Recovered":42},
{"Date":1579046400000,"Confirmed":43,"Deaths":0,"Recovered":43},
{"Date":1579132800000,"Confirmed":44,"Deaths":0,"Recovered":44},
{"Date":1579219200000,"Confirmed":47,"Deaths":1,"Recovered":46},
{"Date":1579305600000,"Confirmed":50,"Deaths":1,"Recovered":49},
{"Date":1579392000000,"Confirmed":51,"Deaths":1,"Recovered":50},
{"Date":1579478400000,"Confirmed":52,"Deaths":1,"Recovered":51},
{"Date":1579564800000,"Confirmed":53,"Deaths":1,"Recovered":52},
{"Date":1579651200000,"Confirmed":57,"Deaths":1,"Recovered":56},
{"Date":1579737600000,"Confirmed":59,"Deaths":1,"Recovered":58},
{"Date":1579824000000,"Confirmed":60,"Deaths":1,"Recovered":59},
{"Date":1579910400000,"Confirmed":62,"Deaths":1,"Recovered":61},
{"Date":1579996800000,"Confirmed":63,"Deaths":1,"Recovered":62},
{"Date":1580083200000,"Confirmed":68,"Deaths":1,"Recovered":67},
{"Date":1580169600000,"Confirmed":70,"Deaths":1,"Recovered":69},
{"Date":1580256000000,"Confirmed":73,"Deaths":1,"Recovered":72},
{"Date":1580342400000,"Confirmed":84,"Deaths":1,"Recovered":83},
{"Date":1580428800000,"Confirmed":96,"Deaths":1,"Recovered":95}]""")
str_all_gender_rki = (""" [{"Gender": "female", "Date": 1577836800000, "Confirmed": 7, "Deaths": 0, "Recovered": 7},
{"Gender": "female", "Date": 1577923200000, "Confirmed": 9, "Deaths": 0, "Recovered": 9},
{"Gender": "female", "Date": 1578009600000, "Confirmed": 13, "Deaths": 0, "Recovered": 13},
{"Gender": "female", "Date": 1578096000000, "Confirmed": 16, "Deaths": 0, "Recovered": 16},
{"Gender": "female", "Date": 1578441600000, "Confirmed": 17, "Deaths": 0, "Recovered": 17},
{"Gender": "female", "Date": 1578614400000, "Confirmed": 18, "Deaths": 0, "Recovered": 18},
{"Gender": "female", "Date": 1578700800000, "Confirmed": 20, "Deaths": 0, "Recovered": 20},
{"Gender": "female", "Date": 1578873600000, "Confirmed": 21, "Deaths": 0, "Recovered": 21},
{"Gender": "female", "Date": 1579046400000, "Confirmed": 22, "Deaths": 0, "Recovered": 22},
{"Gender": "female", "Date": 1579132800000, "Confirmed": 23, "Deaths": 0, "Recovered": 23},
{"Gender": "female", "Date": 1579219200000, "Confirmed": 25, "Deaths": 1, "Recovered": 24},
{"Gender": "female", "Date": 1579392000000, "Confirmed": 26, "Deaths": 1, "Recovered": 25},
{"Gender": "female", "Date": 1579737600000, "Confirmed": 27, "Deaths": 1, "Recovered": 26},
{"Gender": "female", "Date": 1580083200000, "Confirmed": 30, "Deaths": 1, "Recovered": 29},
{"Gender": "female", "Date": 1580256000000, "Confirmed": 33, "Deaths": 1, "Recovered": 32},
{"Gender": "female", "Date": 1580342400000, "Confirmed": 38, "Deaths": 1, "Recovered": 37},
{"Gender": "female", "Date": 1580428800000, "Confirmed": 43, "Deaths": 1, "Recovered": 42},
{"Gender":"male","Date":1577836800000,"Confirmed":14,"Deaths":0,"Recovered":14},
{"Gender":"male","Date":1578009600000,"Confirmed":15,"Deaths":0,"Recovered":15},
{"Gender":"male","Date":1578096000000,"Confirmed":16,"Deaths":0,"Recovered":16},
{"Gender":"male","Date":1578268800000,"Confirmed":17,"Deaths":0,"Recovered":17},
{"Gender":"male","Date":1578528000000,"Confirmed":18,"Deaths":0,"Recovered":18},
{"Gender":"male","Date":1578614400000,"Confirmed":19,"Deaths":0,"Recovered":19},
{"Gender":"male","Date":1578700800000,"Confirmed":20,"Deaths":0,"Recovered":20},
{"Gender":"male","Date":1578960000000,"Confirmed":21,"Deaths":0,"Recovered":21},
{"Gender":"male","Date":1579219200000,"Confirmed":22,"Deaths":0,"Recovered":22},
{"Gender":"male","Date":1579305600000,"Confirmed":25,"Deaths":0,"Recovered":25},
{"Gender":"male","Date":1579478400000,"Confirmed":26,"Deaths":0,"Recovered":26},
{"Gender":"male","Date":1579564800000,"Confirmed":27,"Deaths":0,"Recovered":27},
{"Gender":"male","Date":1579651200000,"Confirmed":31,"Deaths":0,"Recovered":31},
{"Gender":"male","Date":1579737600000,"Confirmed":32,"Deaths":0,"Recovered":32},
{"Gender":"male","Date":1579824000000,"Confirmed":33,"Deaths":0,"Recovered":33},
{"Gender":"male","Date":1579910400000,"Confirmed":35,"Deaths":0,"Recovered":35},
{"Gender":"male","Date":1579996800000,"Confirmed":36,"Deaths":0,"Recovered":36},
{"Gender":"male","Date":1580083200000,"Confirmed":38,"Deaths":0,"Recovered":38},
{"Gender":"male","Date":1580169600000,"Confirmed":40,"Deaths":0,"Recovered":40},
{"Gender":"male","Date":1580342400000,"Confirmed":46,"Deaths":0,"Recovered":46},
{"Gender":"male","Date":1580428800000,"Confirmed":53,"Deaths":0,"Recovered":53}]""")
str_age_germany = ("""[
{"Age_RKI":"A00-A04","Date":1579824000000,"Confirmed":1,"Deaths":0,"Recovered":1},
{"Age_RKI":"A00-A04","Date":1580256000000,"Confirmed":2,"Deaths":0,"Recovered":2},
{"Age_RKI":"A00-A04","Date":1580428800000,"Confirmed":3,"Deaths":0,"Recovered":3},
{"Age_RKI":"A05-A14","Date":1580256000000,"Confirmed":10,"Deaths":0,"Recovered":10},
{"Age_RKI":"A05-A14","Date":1580342400000,"Confirmed":11,"Deaths":0,"Recovered":11},
{"Age_RKI":"A05-A14","Date":1580428800000,"Confirmed":12,"Deaths":0,"Recovered":12},
{"Age_RKI":"A15-A34","Date":1579737600000,"Confirmed":42,"Deaths":0,"Recovered":42},
{"Age_RKI":"A15-A34","Date":1579824000000,"Confirmed":43,"Deaths":0,"Recovered":43},
{"Age_RKI":"A15-A34","Date":1579910400000,"Confirmed":44,"Deaths":0,"Recovered":44},
{"Age_RKI":"A15-A34","Date":1580083200000,"Confirmed":47,"Deaths":0,"Recovered":47},
{"Age_RKI":"A15-A34","Date":1580169600000,"Confirmed":48,"Deaths":0,"Recovered":48},
{"Age_RKI":"A15-A34","Date":1580256000000,"Confirmed":51,"Deaths":0,"Recovered":51},
{"Age_RKI":"A15-A34","Date":1580342400000,"Confirmed":56,"Deaths":0,"Recovered":56},
{"Age_RKI":"A15-A34","Date":1580428800000,"Confirmed":58,"Deaths":0,"Recovered":58},
{"Age_RKI":"A35-A59","Date":1579824000000,"Confirmed":69,"Deaths":0,"Recovered":69},
{"Age_RKI":"A35-A59","Date":1579910400000,"Confirmed":70,"Deaths":0,"Recovered":70},
{"Age_RKI":"A35-A59","Date":1579996800000,"Confirmed":74,"Deaths":0,"Recovered":74},
{"Age_RKI":"A35-A59","Date":1580083200000,"Confirmed":76,"Deaths":0,"Recovered":76},
{"Age_RKI":"A35-A59","Date":1580169600000,"Confirmed":78,"Deaths":0,"Recovered":78},
{"Age_RKI":"A35-A59","Date":1580256000000,"Confirmed":80,"Deaths":0,"Recovered":80},
{"Age_RKI":"A35-A59","Date":1580342400000,"Confirmed":81,"Deaths":0,"Recovered":81},
{"Age_RKI":"A35-A59","Date":1580428800000,"Confirmed":86,"Deaths":0,"Recovered":86},
{"Age_RKI":"A60-A79","Date":1579824000000,"Confirmed":31,"Deaths":3,"Recovered":28},
{"Age_RKI":"A60-A79","Date":1579910400000,"Confirmed":32,"Deaths":3,"Recovered":29},
{"Age_RKI":"A60-A79","Date":1580083200000,"Confirmed":33,"Deaths":3,"Recovered":30},
{"Age_RKI":"A60-A79","Date":1580169600000,"Confirmed":34,"Deaths":3,"Recovered":31},
{"Age_RKI":"A60-A79","Date":1580342400000,"Confirmed":35,"Deaths":3,"Recovered":32},
{"Age_RKI":"A80+","Date":1579737600000,"Confirmed":20,"Deaths":1,"Recovered":19},
{"Age_RKI":"A80+","Date":1579910400000,"Confirmed":21,"Deaths":1,"Recovered":20},
{"Age_RKI":"A80+","Date":1580083200000,"Confirmed":22,"Deaths":1,"Recovered":21},
{"Age_RKI":"A80+","Date":1580256000000,"Confirmed":23,"Deaths":1,"Recovered":22}]""")
rki_files_to_change = ["all_germany_rki", "all_gender_rki", "all_age_rki",
"all_state_rki", "all_state_gender_rki", "all_state_age_rki",
"all_county_rki", "all_county_gender_rki", "all_county_age_rki"]
def setUp(self):
self.setUpPyfakefs()
def write_rki_data(self, out_folder):
for file_to_change in self.rki_files_to_change:
file_rki = file_to_change + ".json"
file_rki_with_path = os.path.join(out_folder, file_rki)
if file_to_change == "all_gender_rki":
with open(file_rki_with_path, 'w') as f:
f.write(self.str_all_gender_rki)
elif file_to_change == "all_age_rki":
with open(file_rki_with_path, 'w') as f:
f.write(self.str_age_germany)
else:
with open(file_rki_with_path, 'w') as f:
f.write(self.str_all_germany_rki)
def write_jh_data(self, out_folder):
file_jh = "whole_country_Germany_jh.json"
file_jh_with_path = os.path.join(out_folder, file_jh)
with open(file_jh_with_path, 'w') as f:
f.write(self.str_whole_country_Germany_jh)
def write_weekly_deaths_xlsx_data(self, out_folder, file_name='RKI_deaths_weekly.xlsx'):
sheet1 = pd.DataFrame({'Sterbejahr': ['2020', '2020', '2020', '2020'], 'Sterbewoche': ['1', '3', '10', '51'],
'Anzahl verstorbene COVID-19 Fälle': ['0', '<4', '18', '3000']})
sheet2 = pd.DataFrame({'Sterbejahr': ['2020', '2020', '2020', '2020'], 'Sterbewoche': ['1', '3', '10', '51'],
'AG 0-9 Jahre': ['0', '<4', '30', '10'], 'AG 10-19 Jahre': ['0', '<4', '30', '10'],
'AG 20-29 Jahre': ['0', '<4', '30', '10'], 'AG 30-39 Jahre': ['0', '<4', '30', '10'],
'AG 40-49 Jahre': ['0', '<4', '30', '10'], 'AG 50-59 Jahre': ['0', '<4', '30', '10'],
'AG 60-69 Jahre': ['0', '<4', '30', '10'], 'AG 70-79 Jahre': ['0', '<4', '30', '10'],
'AG 80-89 Jahre': ['0', '<4', '30', '10'], 'AG 90+ Jahre': ['0', '<4', '30', '10']})
sheet3 = pd.DataFrame({'Sterbejahr': ['2020', '2020', '2020', '2020'], 'Sterbewoche': ['1', '3', '10', '51'],
'Männer, AG 0-19 Jahre': ['0', '<4', '30', '10'],
'Männer, AG 20-39 Jahre': ['0', '<4', '30', '10'],
'Männer, AG 40-59 Jahre': ['0', '<4', '30', '10'],
'Männer, AG 60-79 Jahre': ['0', '<4', '30', '10'],
'Männer, AG 80+ Jahre': ['0', '<4', '30', '10'],
'Frauen, AG 0-19 Jahre': ['0', '<4', '30', '10'],
'Frauen, AG 20-39 Jahre': ['0', '<4', '30', '10'],
'Frauen, AG 40-59 Jahre': ['0', '<4', '30', '10'],
'Frauen, AG 60-79 Jahre': ['0', '<4', '30', '10'],
'Frauen, AG 80+ Jahre': ['0', '<4', '30', '10']})
income_sheets = {'COVID_Todesfälle': sheet1, 'COVID_Todesfälle_KW_AG10': sheet2,
'COVID_Todesfälle_KW_AG20_G': sheet3}
path = os.path.join(out_folder, file_name)
dummy = pd.ExcelWriter(path)
for sheet_name in income_sheets.keys():
income_sheets[sheet_name].to_excel(dummy, sheet_name=sheet_name, index=False)
dummy.save()
def test_get_rki_data_with_estimations(self):
[read_data, make_plot, file_format, out_folder, no_raw] \
= [True, False, "json", self.path, False]
# write files which should be read in by program
directory = os.path.join(out_folder, 'Germany/')
gd.check_dir(directory)
self.write_rki_data(directory)
self.write_jh_data(directory)
self.write_weekly_deaths_xlsx_data(directory)
# check if expected files are written
self.assertEqual(len(os.listdir(self.path)), 1)
self.assertEqual(len(os.listdir(directory)), 2 + len(self.rki_files_to_change))
grdwd.get_rki_data_with_estimations(read_data, file_format, out_folder, no_raw, make_plot)
# check if expected files are written
self.assertEqual(len(os.listdir(self.path)), 1)
# 1 jh-file, 2*len(rki): original+estimated, 4 weekly deaths original+original&estimated+ageresolved+genderresolved
self.assertEqual(len(os.listdir(directory)), 1 + 2 * len(self.rki_files_to_change) + 4)
f_read = os.path.join(directory, "all_germany_rki_estimated.json")
df = pd.read_json(f_read)
confirmed = dd.EngEng['confirmed']
recovered = dd.EngEng['recovered']
deaths = dd.EngEng['deaths']
date = dd.EngEng['date']
recovered_estimated = recovered + "_estimated"
deaths_estimated = deaths + "_estimated"
data_list = df.columns.values.tolist()
self.assertEqual(data_list, [date, confirmed, deaths, recovered, recovered_estimated, deaths_estimated])
self.assertEqual(df[(df[date] == "2020-01-30")][recovered_estimated].item(), np.round(84 * 3. / 8.))
self.assertEqual(df[(df[date] == "2020-01-31")][recovered_estimated].item(), np.round(96 * 3. / 9.))
self.assertEqual(df[(df[date] == "2020-01-30")][deaths_estimated].item(), np.round(84 * 1. / 8.))
self.assertEqual(df[(df[date] == "2020-01-31")][deaths_estimated].item(), np.round(96 * 2. / 9.))
# gender specific data
gender = dd.EngEng['gender']
f_read = os.path.join(directory, "all_gender_rki_estimated.json")
df = pd.read_json(f_read)
data_list = df.columns.values.tolist()
self.assertEqual(data_list, [gender, date, confirmed, deaths, recovered, recovered_estimated, deaths_estimated])
self.assertEqual(df[(df[date] == "2020-01-28") & (df[gender] == "male")][recovered_estimated].item(),
np.round(40 * 3. / 6.))
self.assertEqual(df[(df[date] == "2020-01-29") & (df[gender] == "female")][recovered_estimated].item(),
np.round(33 * 3. / 7.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "male")][recovered_estimated].item(),
np.round(53 * 3. / 9.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "female")][recovered_estimated].item(),
np.round(43 * 3. / 9.))
self.assertEqual(df[(df[date] == "2020-01-28") & (df[gender] == "male")][deaths_estimated].item(),
np.round(40 * 1. / 6.))
self.assertEqual(df[(df[date] == "2020-01-29") & (df[gender] == "female")][deaths_estimated].item(),
np.round(33 * 1. / 7.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "male")][deaths_estimated].item(),
np.round(53 * 2. / 9.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "female")][deaths_estimated].item(),
np.round(43 * 2. / 9.))
def test_get_rki_data_with_estimations_age_data(self):
[read_data, make_plot, file_format, out_folder, no_raw] \
= [True, False, "json", self.path, False]
# write files which should be read in by program
directory = os.path.join(out_folder, 'Germany/')
gd.check_dir(directory)
self.write_rki_data(directory)
self.write_jh_data(directory)
self.write_weekly_deaths_xlsx_data(directory)
# check if expected files are written
self.assertEqual(len(os.listdir(self.path)), 1)
self.assertEqual(len(os.listdir(directory)), 2 + len(self.rki_files_to_change))
grdwd.get_rki_data_with_estimations(read_data, file_format, out_folder, no_raw, make_plot)
# check if expected files are written
self.assertEqual(len(os.listdir(self.path)), 1)
# 1 jh-file, 2*len(rki): original+estimated, 4 weekly deaths original+original&estimated+ageresolved+genderresolved
self.assertEqual(len(os.listdir(directory)), 1 + 2 * len(self.rki_files_to_change) + 4)
f_read = os.path.join(directory, "all_age_rki_estimated.json")
df = pd.read_json(f_read)
confirmed = dd.EngEng['confirmed']
recovered = dd.EngEng['recovered']
deaths = dd.EngEng['deaths']
date = dd.EngEng['date']
recovered_estimated = recovered + "_estimated"
deaths_estimated = deaths + "_estimated"
ages = ["A0-A04", "A05-A14", "A15-A34", "A35-A59", "A60-A79", "A80+"]
data_list = df.columns.values.tolist()
self.assertEqual(data_list.sort(),
[date, confirmed, deaths, recovered, recovered_estimated, deaths_estimated, "Age_RKI"].sort())
age_values_rec = [3, 12, 58, 86]
index = 0
for value in ages:
mask = (df['Age_RKI'] == value) & (df[date] == "2020-01-31")
try:
self.assertEqual(df.loc[mask][recovered_estimated].item(), np.round(age_values_rec[index] * 3. / 9.))
self.assertEqual(df.loc[mask][deaths_estimated].item(), np.round(age_values_rec[index] * 2. / 9.))
except ValueError:
pass
index = index + 1
@patch('memilio.epidata.getRKIDatawithEstimations.grd.get_rki_data')
@patch('memilio.epidata.getRKIDatawithEstimations.gjd.get_jh_data')
@patch('memilio.epidata.getRKIDatawithEstimations.download_weekly_deaths_numbers_rki')
def test_get_rki_data_with_estimations_download(self, mock_get_jh_data, mock_get_rki_data,
mock_download_weekly_deaths_numbers_rki):
[read_data, make_plot, file_format, out_folder, no_raw] \
= [False, False, "json", self.path, False]
directory = os.path.join(out_folder, 'Germany/')
gd.check_dir(directory)
mock_get_rki_data.side_effect = self.write_rki_data(directory)
mock_get_jh_data.side_effect = self.write_jh_data(directory)
mock_download_weekly_deaths_numbers_rki.side_effect = self.write_weekly_deaths_xlsx_data(directory)
# write files which should be read in by program
directory = os.path.join(out_folder, 'Germany/')
gd.check_dir(directory)
rki_files_to_change = ["all_germany_rki", "all_gender_rki", "all_age_rki",
"all_state_rki", "all_state_gender_rki", "all_state_age_rki",
"all_county_rki", "all_county_gender_rki", "all_county_age_rki"]
for file_to_change in rki_files_to_change:
file_rki = file_to_change + ".json"
file_rki_with_path = os.path.join(directory, file_rki)
if file_to_change == "all_gender_rki":
with open(file_rki_with_path, 'w') as f:
f.write(self.str_all_gender_rki)
else:
with open(file_rki_with_path, 'w') as f:
f.write(self.str_all_germany_rki)
file_jh = "whole_country_Germany_jh.json"
file_jh_with_path = os.path.join(directory, file_jh)
with open(file_jh_with_path, 'w') as f:
f.write(self.str_whole_country_Germany_jh)
# check if expected files are written
self.assertEqual(len(os.listdir(self.path)), 1)
self.assertEqual(len(os.listdir(directory)), 2 + len(rki_files_to_change))
grdwd.get_rki_data_with_estimations(read_data, file_format, out_folder, no_raw, make_plot)
# check if expected files are written
self.assertEqual(len(os.listdir(self.path)), 1)
self.assertEqual(len(os.listdir(directory)), 1 + 2 * len(rki_files_to_change) + 4)
confirmed = dd.EngEng['confirmed']
recovered = dd.EngEng['recovered']
deaths = dd.EngEng['deaths']
date = dd.EngEng['date']
recovered_estimated = recovered + "_estimated"
deaths_estimated = deaths + "_estimated"
f_read = os.path.join(directory, "all_germany_rki_estimated.json")
df = pd.read_json(f_read)
data_list = df.columns.values.tolist()
self.assertEqual(data_list, [date, confirmed, deaths, recovered, recovered_estimated, deaths_estimated])
self.assertEqual(df[(df[date] == "2020-01-30")][recovered_estimated].item(), np.round(84 * 3. / 8.))
self.assertEqual(df[(df[date] == "2020-01-31")][recovered_estimated].item(), np.round(96 * 3. / 9.))
self.assertEqual(df[(df[date] == "2020-01-30")][deaths_estimated].item(), np.round(84 * 1. / 8.))
self.assertEqual(df[(df[date] == "2020-01-31")][deaths_estimated].item(), np.round(96 * 2. / 9.))
# gender specific data
gender = dd.EngEng['gender']
f_read = os.path.join(directory, "all_gender_rki_estimated.json")
df = pd.read_json(f_read)
data_list = df.columns.values.tolist()
self.assertEqual(data_list, [gender, date, confirmed, deaths, recovered, recovered_estimated, deaths_estimated])
self.assertEqual(df[(df[date] == "2020-01-28") & (df[gender] == "male")][recovered_estimated].item(),
np.round(40 * 3. / 6.))
self.assertEqual(df[(df[date] == "2020-01-29") & (df[gender] == "female")][recovered_estimated].item(),
np.round(33 * 3. / 7.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "male")][recovered_estimated].item(),
np.round(53 * 3. / 9.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "female")][recovered_estimated].item(),
np.round(43 * 3. / 9.))
self.assertEqual(df[(df[date] == "2020-01-28") & (df[gender] == "male")][deaths_estimated].item(),
np.round(40 * 1. / 6.))
self.assertEqual(df[(df[date] == "2020-01-29") & (df[gender] == "female")][deaths_estimated].item(),
np.round(33 * 1. / 7.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "male")][deaths_estimated].item(),
np.round(53 * 2. / 9.))
self.assertEqual(df[(df[date] == "2020-01-31") & (df[gender] == "female")][deaths_estimated].item(),
np.round(43 * 2. / 9.))
def test_download_weekly_rki(self):
directory = os.path.join(self.path, 'Germany/')
gd.check_dir(directory)
self.write_weekly_deaths_xlsx_data(directory, file_name='RKI_deaths_weekly_fake.xlsx')
self.assertEqual(len(os.listdir(self.path)), 1)
self.assertEqual(len(os.listdir(directory)), 1)
with patch('requests.get') as mock_request:
df = gd.loadExcel(
'RKI_deaths_weekly_fake', apiUrl=directory, extension='.xlsx',
param_dict={"sheet_name": 'COVID_Todesfälle', "header": 0,
"engine": 'openpyxl'})
towrite = io.BytesIO()
df.to_excel(towrite, index=False)
towrite.seek(0)
mock_request.return_value.content = towrite.read()
grdwd.download_weekly_deaths_numbers_rki(directory)
self.assertEqual(len(os.listdir(self.path)), 1)
self.assertEqual(len(os.listdir(directory)), 2)
df_real_deaths_per_week = gd.loadExcel(
'RKI_deaths_weekly', apiUrl=directory, extension='.xlsx',
param_dict={"sheet_name": 0, "header": 0, "engine": 'openpyxl'})
self.assertEqual(df_real_deaths_per_week.shape, (4, 3))
self.assertEqual( | pd.to_numeric(df_real_deaths_per_week['Sterbejahr']) | pandas.to_numeric |
# Copyright 2019-2020 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export per-epoch statistics from machine learning logs.
Machine learning jobs write log files summarizing the performance of the model
after each training epoch. The log directory is printed at the start of
execution of a machine learning job, for example:
$ bazel run //tasks/dataflow:train_ggnn
Writing logs to ~/programl/dataflow/logs/ggnn/reachability/foo@20:05:16T12:53:42
...
This script reads one of these log directories and prints a table of per-epoch
stats to stdout. For example:
$ export-ml-logs --path=~/programl/dataflow/ml/logs/foo@20:05:16T12:53:42
CSV format can be exported using --fmt=csv:
$ export-ml-logs --path=~/programl/dataflow/ml/logs/foo@20:05:16T12:53:42 \\
--fmt=csv > stats.csv
"""
import subprocess
import sys
from pathlib import Path
from typing import Optional
import pandas as pd
from absl import app, flags, logging
from tabulate import tabulate
from programl.proto import epoch_pb2
from programl.util.py import pbutil, progress
from programl.util.py.init_app import init_app
flags.DEFINE_str(
"path",
Path("~/programl/dataflow").expanduser(),
"The dataset directory root.",
)
flags.DEFINE_string("fmt", "txt", "Stdout format.")
flags.DEFINE_string("worksheet", "Sheet1", "The name of the worksheet to export to")
FLAGS = flags.FLAGS
def ReadEpochLogs(path: Path) -> Optional[epoch_pb2.EpochList]:
if not (path / "epochs").is_dir():
return None
epochs = []
for path in (path / "epochs").iterdir():
epoch = pbutil.FromFile(path, epoch_pb2.EpochList())
# Skip files without data.
if not len(epoch.epoch):
continue
epochs += list(epoch.epoch)
return epoch_pb2.EpochList(epoch=sorted(epochs, key=lambda x: x.epoch_num))
def EpochsToDataFrame(epochs: epoch_pb2.EpochList) -> Optional[pd.DataFrame]:
def V(results, field):
if results.batch_count:
return getattr(results, field)
else:
return None
rows = []
for e in epochs.epoch:
rows.append(
{
"epoch_num": e.epoch_num,
"walltime_seconds": e.walltime_seconds,
"train_graph_count": V(e.train_results, "graph_count"),
"train_batch_count": V(e.train_results, "batch_count"),
"train_target_count": V(e.train_results, "target_count"),
"train_learning_rate": V(e.train_results, "mean_learning_rate"),
"train_loss": V(e.train_results, "mean_loss"),
"train_accuracy": V(e.train_results, "mean_accuracy"),
"train_precision": V(e.train_results, "mean_precision"),
"train_recall": V(e.train_results, "mean_recall"),
"train_f1": V(e.train_results, "mean_f1"),
"train_walltime_seconds": V(e.train_results, "walltime_seconds"),
"val_graph_count": V(e.val_results, "graph_count"),
"val_batch_count": V(e.val_results, "batch_count"),
"val_target_count": V(e.val_results, "target_count"),
"val_loss": V(e.val_results, "mean_loss"),
"val_accuracy": V(e.val_results, "mean_accuracy"),
"val_precision": V(e.val_results, "mean_precision"),
"val_recall": V(e.val_results, "mean_recall"),
"val_f1": V(e.val_results, "mean_f1"),
"val_walltime_seconds": V(e.val_results, "walltime_seconds"),
"test_graph_count": V(e.test_results, "graph_count"),
"test_batch_count": V(e.test_results, "batch_count"),
"test_target_count": V(e.test_results, "target_count"),
"test_loss": V(e.test_results, "mean_loss"),
"test_accuracy": V(e.test_results, "mean_accuracy"),
"test_precision": V(e.test_results, "mean_precision"),
"test_recall": V(e.test_results, "mean_recall"),
"test_f1": V(e.test_results, "mean_f1"),
"test_walltime_seconds": V(e.test_results, "walltime_seconds"),
}
)
if not len(rows):
return
df = | pd.DataFrame(rows) | pandas.DataFrame |
#!/usr/bin/env python3
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 07:38:29 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import pandas as pd
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import plotutils as pu
import compmem as cm
# 2018-09-01 06:22:21.180029: Step 341, loss=3.60 (33.8 examples/sec; 0.740 sec/batch)
ptn_iter = re.compile(r"""(?P<timestamp>.+): \s [sS]tep \s (?P<Step>\d+),\s
(loss|perplexity) .* \(
(?P<Speed>[\d.]+) \s examples/sec; \s
(?P<Duration>[\d.]+) \s sec/batch\)?""", re.VERBOSE)
def parse_iterations(path):
path, _ = cm.find_file(path)
iterations = []
with cm.open_file(path) as f:
for line in f:
line = line.rstrip('\n')
m = ptn_iter.match(line)
if m:
iterations.append(m.groupdict())
assert len(iterations) > 0
fake = {}
fake.update(iterations[-1])
fake['Speed'] = 0
fake['timestamp'] = (pd.to_datetime(fake['timestamp']) + pd.Timedelta(1, 'us')).strftime('%Y-%m-%d %H:%M:%S.%f')
iterations.append(fake)
fake = {}
fake.update(iterations[0])
fake['Speed'] = 0
fake['timestamp'] = (pd.to_datetime(fake['timestamp']) - pd.Timedelta(1, 'us')).strftime('%Y-%m-%d %H:%M:%S.%f')
iterations[:0] = [fake]
df = | pd.DataFrame(iterations) | pandas.DataFrame |
from surprise import Reader, Dataset, SVDpp, accuracy
from surprise.model_selection import cross_validate, train_test_split
import pandas as pd
users_data = "BX-Users.csv"
books_data = "BX-Books.csv"
ratings_data = "BX-Books-Ratings.csv"
users = pd.read_csv("BX-Users.csv", sep=';', error_bad_lines=False, encoding="latin-1")
users.columns = ['userID', 'Location', 'Age']
books_column = ['']
rating = | pd.read_csv("BX-Book-Ratings.csv", sep=';', error_bad_lines=False, encoding='latin-1', low_memory=False) | pandas.read_csv |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pypatent
def query_patents():
"""
Find Rooster cumstomers in the patent databases
Find all operators in the mesenchymal/exosome sector
Identify operators not citing Rooster
"""
print("running search_patents")
# Name searchers
searchNames = []
searchNames.append('allogenic') # Identify domain space
searchNames.append('autologous') # Identify domain space
for name in searchNames:
# help on USPTO search terms
# https://patft.uspto.gov/netahtml/PTO/help/helpflds.htm
searchTerms = [name, 'mesenchymal']
query_USPTO(name, searchTerms)
print("completed search_patents")
def query_USPTO(searchName, searchTerms):
"""
Query the USPTO with the search terms
Save as a dataframe
"""
df = | pd.DataFrame() | pandas.DataFrame |
from tests.deap.conftest import building_area, building_volume
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from ber_public.deap import vent
def test_calculate_infiltration_rate_due_to_openings():
"""Output is equivalent to DEAP 4.2.0 example A"""
building_volume = pd.Series([321, 0, 100, 200])
no_chimneys = pd.Series([0, 0, 0, 1])
no_open_flues = pd.Series([0, 0, 0, 1])
no_fans = pd.Series([1, 0, 0, 1])
no_room_heaters = pd.Series([0, 0, 0, 1])
is_draught_lobby = pd.Series(["NO", "NO", "YES", "NO"])
expected_output = pd.Series([0.08, 0, 0, 0.6])
output = vent._calculate_infiltration_rate_due_to_openings(
building_volume=building_volume,
no_chimneys=no_chimneys,
no_open_flues=no_open_flues,
no_fans=no_fans,
no_room_heaters=no_room_heaters,
is_draught_lobby=is_draught_lobby,
draught_lobby_boolean=vent.YES_NO,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_infiltration_rate_due_to_structure():
"""Output is equivalent to DEAP 4.2.0 example A"""
is_permeability_tested = pd.Series(["YES", "NO", "NO"])
permeability_test_result = pd.Series([0.15, np.nan, np.nan])
no_storeys = pd.Series([np.nan, 2, 1])
percentage_draught_stripped = pd.Series([np.nan, 100, 75])
is_floor_suspended = pd.Series(
[np.nan, "No ", "Yes (Unsealed) "]
)
structure_type = pd.Series(
[np.nan, "Masonry ", "Timber or Steel Frame "]
)
expected_output = pd.Series([0.15, 0.5, 0.55])
output = vent._calculate_infiltration_rate_due_to_structure(
is_permeability_tested=is_permeability_tested,
permeability_test_result=permeability_test_result,
no_storeys=no_storeys,
percentage_draught_stripped=percentage_draught_stripped,
is_floor_suspended=is_floor_suspended,
structure_type=structure_type,
suspended_floor_types=vent.SUSPENDED_FLOOR_TYPES,
structure_types=vent.STRUCTURE_TYPES,
permeability_test_boolean=vent.YES_NO,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_infiltration_rate(monkeypatch):
"""Output is equivalent to DEAP 4.2.0 example A"""
no_sides_sheltered = pd.Series([2, 2])
def _mock_calculate_infiltration_rate_due_to_openings(*args, **kwargs):
return pd.Series([0.08, 0.08])
def _mock_calculate_infiltration_rate_due_to_structure(*args, **kwargs):
return pd.Series([0.15, 0.5])
monkeypatch.setattr(
vent,
"_calculate_infiltration_rate_due_to_openings",
_mock_calculate_infiltration_rate_due_to_openings,
)
monkeypatch.setattr(
vent,
"_calculate_infiltration_rate_due_to_structure",
_mock_calculate_infiltration_rate_due_to_structure,
)
expected_output = pd.Series([0.2, 0.49])
output = vent.calculate_infiltration_rate(
no_sides_sheltered=no_sides_sheltered,
building_volume=None,
no_chimneys=None,
no_open_flues=None,
no_fans=None,
no_room_heaters=None,
is_draught_lobby=None,
is_permeability_tested=None,
permeability_test_result=None,
no_storeys=None,
percentage_draught_stripped=None,
is_floor_suspended=None,
structure_type=None,
draught_lobby_boolean=None,
suspended_floor_types=None,
structure_types=None,
permeability_test_boolean=None,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_effective_air_rate_change():
"""Output is equivalent to DEAP 4.2.0 example A"""
n_methods = 6
ventilation_method = pd.Series(
[
"Natural vent.",
"Pos input vent.- loft",
"Pos input vent.- outside",
"Whole house extract vent.",
"Bal.whole mech.vent no heat re",
"Bal.whole mech.vent heat recvr",
]
)
building_volume = pd.Series([321] * n_methods)
infiltration_rate = pd.Series([0.2] * n_methods)
heat_exchanger_efficiency = pd.Series([0] * n_methods)
expected_output = pd.Series([0.52, 0.58, 0.5, 0.5, 0.7, 0.7])
output = vent.calculate_effective_air_rate_change(
ventilation_method=ventilation_method,
building_volume=building_volume,
infiltration_rate=infiltration_rate,
heat_exchanger_efficiency=heat_exchanger_efficiency,
ventilation_method_names=vent.VENTILATION_METHODS,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_ventilation_heat_loss(monkeypatch):
"""Output is equivalent to DEAP 4.2.0 example A"""
building_volume = | pd.Series([321]) | pandas.Series |
import pandas
import numpy
import numpy.random
import typing
import itertools
import matplotlib.pyplot
import scipy.stats
DataFrame = typing.TypeVar('pandas.core.frame.DataFrame')
class Corpus(object):
def __init__(self, design_file_name: str, participant_file_name: str, genealogy_levels: list, weights: list) -> None:
# Read in the data
self.design_data = | pandas.read_csv(design_file_name) | pandas.read_csv |
import os
from datetime import datetime
import joblib
import numpy as np
import pandas as pd
from rfpimp import permutation_importances
from sklearn.metrics import mean_absolute_error
from sklearn.base import clone
from _08_stats import imp_df, drop_col_feat_imp, mae
"""
Module that generates statistics of the trained model.
"""
def generate_stats(simulation_folder):
"""
Function for generating statistics about model
:param simulation_folder: str, name of subfolder for given data sets
:return: none, statistics saved down as side effect
"""
Start = datetime.now()
project_directory = os.path.dirname(os.getcwd())
path_to_data = os.path.join(project_directory, "Data", simulation_folder)
path_to_characteristics_data = os.path.join(path_to_data, "Characteristics")
path_to_scenario = os.path.join(project_directory, "Models", simulation_folder,
"XGB", "Model")
path_to_stats = os.path.join(path_to_scenario, "Stats")
if not os.path.exists(path_to_stats):
os.makedirs(path_to_stats)
path_to_model = os.path.join(path_to_scenario, "model.sav")
X_train = np.load(os.path.join(path_to_characteristics_data, "X_train.npy"), allow_pickle=True)
X_test = np.load(os.path.join(path_to_characteristics_data, "X_test.npy"), allow_pickle=True)
y_train = np.load(os.path.join(path_to_characteristics_data, "y_train.npy"), allow_pickle=True)
y_test = np.load(os.path.join(path_to_characteristics_data, "y_test.npy"), allow_pickle=True)
# TODO: fix the save of the data to get variable names from there
characteristics_data = pd.read_csv(os.path.join(path_to_characteristics_data, "characteristics.csv"))
model = joblib.load(path_to_model)
data_type = ["Train", "Test"]
for dt in data_type:
X = X_train if dt == "Train" else X_test
y = y_train if dt == "Train" else y_test
# Making the Confusion Matrix
y_pred = model.predict(X)
## TODO: mae per class
print("mae")
test_train_mae = mean_absolute_error(y, y_pred)
df = pd.DataFrame({'mae': [test_train_mae]})
df.to_csv(os.path.join(path_to_stats, "MAE_" + dt + ".csv"))
# feature importances
importances = model.feature_importances_
column_names = characteristics_data.drop(["file", "motion", "diff_type"], axis=1).columns.values
df = imp_df(column_names, importances)
df.to_csv(os.path.join(path_to_stats, "Feature_importances.csv"), index=False)
# permutation importances
X_train_df = pd.DataFrame(X_train, columns=column_names)
y_train_df = pd.DataFrame(y_train)
md = clone(model)
md.fit(X_train_df,y_train_df)
df = permutation_importances(md, X_train_df, y_train_df, mae)
df.to_csv(os.path.join(path_to_stats, "Permutation_fi.csv"), index=True)
# drop column feature importance
X_train_df = pd.DataFrame(X_train, columns=column_names)
df = drop_col_feat_imp(model, X_train_df, y_train)
df.to_csv(os.path.join(path_to_stats, "Drop_column_fi.csv"), index=False)
End = datetime.now()
ExecutedTime = End - Start
df = | pd.DataFrame({'ExecutedTime': [ExecutedTime]}) | pandas.DataFrame |
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
cols = ['BoP FA Net', 'BoP FA OI Net', 'BoP FA PI Net', 'CA % GDP']
raw_data = pd.read_csv('raw_data/BoP_UK.csv', index_col=0, parse_dates=True)
data = pd.DataFrame(raw_data.iloc[:240, :4].fillna(0)).astype(float)
data.columns = cols
data.index = pd.date_range('1955-01-01', '2014-12-31', freq='Q')
raw_eur = pd.read_csv('raw_data/EUR_CA.csv', index_col=0, parse_dates=True)
raw_eur = raw_eur[::-1]
raw_eur.index = pd.date_range('1999-01-01', '2015-03-01', freq='M')
raw_eur.index.name = 'Date'
raw_eur = raw_eur.resample('Q', how='sum')
data_eur_gdp_q = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['EUR_GDP_Q'].dropna()
data_eur_gdp_q.columns = ['EUR_GDP_Q']
data_eur_gdp_q.index.name = 'Date'
data_eur_gdp_q = data_eur_gdp_q.loc['1999-03-31':]
end_gdp = pd.DataFrame(data=[data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1],
data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1]],
index=pd.date_range('2014-06-30', '2015-03-31', freq='Q'))
eur_gdp = pd.concat([data_eur_gdp_q, end_gdp])
eur_gdp.columns = ['EUR_CA']
eur_ca = raw_eur.div(eur_gdp)
eur_ca.columns = ['EUR CA']
uk_ca = data['CA % GDP'] / 100.0
uk_ca.columns = ['UK CA']
uk_fa = pd.DataFrame(data.iloc[:, :3])
uk_gdp = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['UK_GDP_Q'].dropna()
uk_gdp_final = pd.concat([uk_gdp, pd.DataFrame(data=[uk_gdp.iloc[-1], uk_gdp.iloc[-1]],
index=pd.date_range('2014-09-01', '2014-12-31', freq='Q'))])
uk_fa_gdp = pd.DataFrame(index=uk_gdp_final.index)
uk_fa_gdp['UK FA Net'] = uk_fa['BoP FA Net'] / uk_gdp_final
uk_fa_gdp['UK FA OI'] = uk_fa['BoP FA OI Net'] / uk_gdp_final
uk_fa_gdp['UK FA PI'] = uk_fa['BoP FA PI Net'] / uk_gdp_final
print(eur_gdp)
eur_fa = pd.read_csv('raw_data/EUR_FA.csv', index_col=0, header=0, parse_dates=True).dropna().astype(float)
eur_fa = eur_fa.iloc[::-1]
print(eur_fa)
eur_fa.index = | pd.date_range('2009-01-01', '2015-02-28', freq='M') | pandas.date_range |
import pandas as pd
import numpy as np
import sys
import os
from datetime import datetime, timedelta
from email.utils import parsedate_tz
pathN = sys.argv[1]
directory = os.fsencode(pathN)
dset = | pd.DataFrame(columns=['tweet_id','date','time']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
author: <NAME>
email: <EMAIL>
license: Apache License 2.0
"""
import pandas as pd
import xgboost as xgb
import pickle
from sklearn.metrics import recall_score
from gossipcat.Logging import get_logger
from gossipcat.Report import Visual
def date_delta(date, delta, ago=True, format='%Y-%m-%d'):
from datetime import datetime, timedelta
if ago:
return (datetime.strptime(date, format) - timedelta(days=delta)).strftime(format)
else:
return (datetime.strptime(date, format) + timedelta(days=delta)).strftime(format)
def time_fold(df, col_date, n_splits=12, delta=30*3, step=30*1):
"""provides train/valid indices to split time series data samples that are observed at
fixed time intervals, in train/valid sets.
Arg:
df: training data frame
col_date: date column for splitting
n_splits: number of splits
delta: test size in days
step: length of sliding window
Returns:
folds: train/valid indices
"""
date_max = df[col_date].max()
date_min = df[col_date].min()
date_range = round((pd.to_datetime(date_max) - | pd.to_datetime(date_min) | pandas.to_datetime |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [md]
# # Predicting costumer subscription
# ## Training ML Models
# %%
# Load the preprocessed data
import pandas as pd
data = pd.read_pickle("data/preprocessed_data.pkl")
data.head()
# %%
# Since our dataset is imbalanced, calculate a majority baseline.
from sklearn.dummy import DummyClassifier
SEED = 42
majority = DummyClassifier(random_state=SEED)
# %%
# Use SVM to train the model.
# SVM typically leads to near-optimal results in linearly separable problems.
from sklearn.svm import LinearSVC
svm = LinearSVC(dual=False, random_state=SEED)
# %%
# Use kNN to train the model.
# kNN may work well when a general function cannot be learned
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
# %%
# Use a random forest to train the model.
# Random forests may offer explainable solutions.
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=10, random_state=SEED)
# %%
# Use logistic regression to train the model.
# Logistic regression is a strong baseline for binary classification problems
# and also provides an explainable model.
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(random_state=SEED, max_iter=1000)
# %%
# Use a boosting algorithm to train the model.
# Boosting may generalize better on test data.
from sklearn.ensemble import GradientBoostingClassifier
# HistGradientBoostingClassifier(categorical_features=[0])
gb = GradientBoostingClassifier(random_state=SEED)
# %%
# Drop columns used only for visualization
data = data.drop(['y_yes', 'age_group', 'duration_min_group'], axis=1)
# %%
# Encode categorical data
dummies = pd.get_dummies(data)
dummies.head()
# %%
# Create training and test sets
X_train = dummies.drop(['y_no', 'y_yes'], axis=1)
y_train = dummies['y_yes']
# %%
# Persist data for reuse
X_train.to_pickle("data/X_train.pkl")
y_train.to_pickle("data/y_train.pkl")
# %%
# Iterate over classifier to generate repors
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import classification_report
from notebooks.util import plot_confusion_matrix
SCORING = 'f1_macro'
classifiers = {
'Majority': majority,
'SVM': svm,
'kNN': knn,
'Random Forest': rf,
'LR': lr,
'Gradient Boosting': gb
}
results = pd.DataFrame(columns=[SCORING, "model"])
for k, v in classifiers.items():
# Cross-validate the dataset.
clf_scores = cross_val_score(v, X_train, y_train, scoring=SCORING, cv=10)
results = pd.concat([results, | pd.DataFrame({SCORING: clf_scores, 'model': k}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from pkg_resources import resource_stream
import numpy as np
import pandas as pd
import pytest
import pytz
from eemeter.transform import (
as_freq,
clean_caltrack_billing_data,
downsample_and_clean_caltrack_daily_data,
clean_caltrack_billing_daily_data,
day_counts,
get_baseline_data,
get_reporting_data,
get_terms,
remove_duplicates,
NoBaselineDataError,
NoReportingDataError,
overwrite_partial_rows_with_nan,
)
def test_as_freq_not_series(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
with pytest.raises(ValueError):
as_freq(meter_data, freq="H")
def test_as_freq_hourly(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_hourly = as_freq(meter_data.value, freq="H")
assert as_hourly.shape == (18961,)
assert round(meter_data.value.sum(), 1) == round(as_hourly.sum(), 1) == 21290.2
def test_as_freq_daily(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21290.2
def test_as_freq_daily_all_nones_instantaneous(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D", series_type="instantaneous")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_daily_all_nones(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_month_start(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_month_start = as_freq(meter_data.value, freq="MS")
assert as_month_start.shape == (28,)
assert round(meter_data.value.sum(), 1) == round(as_month_start.sum(), 1) == 21290.2
def test_as_freq_hourly_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_hourly = as_freq(temperature_data, freq="H", series_type="instantaneous")
assert as_hourly.shape == (19417,)
assert round(temperature_data.mean(), 1) == round(as_hourly.mean(), 1) == 54.6
def test_as_freq_daily_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (811,)
assert abs(temperature_data.mean() - as_daily.mean()) <= 0.1
def test_as_freq_month_start_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_month_start = as_freq(temperature_data, freq="MS", series_type="instantaneous")
assert as_month_start.shape == (29,)
assert round(as_month_start.mean(), 1) == 53.4
def test_as_freq_daily_temperature_monthly(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.groupby(pd.Grouper(freq="MS")).mean()
assert temperature_data.shape == (28,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (824,)
assert round(as_daily.mean(), 1) == 54.5
def test_as_freq_empty():
meter_data = pd.DataFrame({"value": []})
empty_meter_data = as_freq(meter_data.value, freq="H")
assert empty_meter_data.empty
def test_as_freq_perserves_nulls(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
monthly_with_nulls = meter_data[meter_data.index.year != 2016].reindex(
meter_data.index
)
daily_with_nulls = as_freq(monthly_with_nulls.value, freq="D")
assert (
round(monthly_with_nulls.value.sum(), 2)
== round(daily_with_nulls.sum(), 2)
== 11094.05
)
assert monthly_with_nulls.value.isnull().sum() == 13
assert daily_with_nulls.isnull().sum() == 365
def test_day_counts(il_electricity_cdd_hdd_billing_monthly):
data = il_electricity_cdd_hdd_billing_monthly["meter_data"].value
counts = day_counts(data.index)
assert counts.shape == (27,)
assert counts.iloc[0] == 29.0
assert pd.isnull(counts.iloc[-1])
assert counts.sum() == 790.0
def test_day_counts_empty_series():
index = pd.DatetimeIndex([])
index.freq = None
data = pd.Series([], index=index)
counts = day_counts(data.index)
assert counts.shape == (0,)
def test_get_baseline_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(meter_data)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_baseline_data_with_end(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(meter_data, end=blackout_start_date)
assert meter_data.shape != baseline_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_end_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date, max_days=None
)
assert meter_data.shape != baseline_data.shape == (9595, 1)
assert len(warnings) == 0
def test_get_baseline_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
with pytest.raises(NoBaselineDataError):
get_baseline_data(meter_data, end=pd.Timestamp("2000").tz_localize("UTC"))
def test_get_baseline_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, start=start, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_start"
assert (
warning.description
== "Data does not have coverage at requested baseline start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_baseline_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, end=end, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_end"
assert (
warning.description
== "Data does not have coverage at requested baseline end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_baseline_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_n_days_billing_period_overshoot(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2017, 11, 9, tzinfo=pytz.UTC),
max_days=45,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 526.25
assert len(warnings) == 0
def test_get_baseline_data_too_far_from_date(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
end_date = datetime(2020, 11, 9, tzinfo=pytz.UTC)
max_days = 45
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 1393.4
assert len(warnings) == 0
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (3, 1)
assert round(baseline_data.value.sum(), 2) == 2043.92
assert len(warnings) == 0
# Includes 3 data points because data at index -3 is closer to start target
# then data at index -2
start_target = baseline_data.index[-1] - timedelta(days=max_days)
assert abs((baseline_data.index[0] - start_target).days) < abs(
(baseline_data.index[1] - start_target).days
)
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
def test_get_reporting_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(meter_data)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_reporting_data_with_start(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(meter_data, start=blackout_end_date)
assert meter_data.shape != reporting_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_start_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date, max_days=None
)
assert meter_data.shape != reporting_data.shape == (9607, 1)
assert len(warnings) == 0
def test_get_reporting_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
with pytest.raises(NoReportingDataError):
get_reporting_data(meter_data, start=pd.Timestamp("2030").tz_localize("UTC"))
def test_get_reporting_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
reporting_data, warnings = get_reporting_data(
meter_data, start=start, max_days=None
)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_start"
assert (
warning.description
== "Data does not have coverage at requested reporting start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_reporting_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
reporting_data, warnings = get_reporting_data(meter_data, end=end, max_days=None)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_end"
assert (
warning.description
== "Data does not have coverage at requested reporting end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_reporting_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_terms_unrecognized_method(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index, term_lengths=[365], method="unrecognized")
def test_get_terms_unsorted_index(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index[::-1], term_lengths=[365])
def test_get_terms_bad_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
terms = get_terms(
meter_data.index,
term_lengths=[60, 60, 60],
term_labels=["abc", "def"], # too short
)
def test_get_terms_default_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index, term_lengths=[60, 60, 60])
assert [t.label for t in terms] == ["term_001", "term_002", "term_003"]
def test_get_terms_custom_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(
meter_data.index, term_lengths=[60, 60, 60], term_labels=["abc", "def", "ghi"]
)
assert [t.label for t in terms] == ["abc", "def", "ghi"]
def test_get_terms_empty_index_input(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index[:0], term_lengths=[60, 60, 60])
assert len(terms) == 0
def test_get_terms_strict(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
strict_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="strict",
)
assert len(strict_terms) == 2
year1 = strict_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (12,)
assert (
year1.target_start_date
== pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert (
year1.target_end_date
== pd.Timestamp("2017-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year1.target_term_length_days == 365
assert (
year1.actual_start_date
== year1.index[0]
== pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
)
assert (
year1.actual_end_date
== year1.index[-1]
== pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
)
assert year1.actual_term_length_days == 332
assert year1.complete
year2 = strict_terms[1]
assert year2.index.shape == (13,)
assert year2.label == "year2"
assert year2.target_start_date == pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
assert (
year2.target_end_date
== pd.Timestamp("2018-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year2.target_term_length_days == 365
assert (
year2.actual_start_date
== year2.index[0]
== pd.Timestamp("2016-12-19 06:00:00+00:00", tz="UTC")
)
assert (
year2.actual_end_date
== year2.index[-1]
== pd.Timestamp("2017-12-22 06:00:00+0000", tz="UTC")
)
assert year2.actual_term_length_days == 368
assert year2.complete
def test_get_terms_nearest(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
nearest_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="nearest",
)
assert len(nearest_terms) == 2
year1 = nearest_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (13,)
assert year1.index[0] == pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
assert year1.index[-1] == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert (
year1.target_start_date
== pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year1.target_term_length_days == 365
assert year1.actual_term_length_days == 365
assert year1.complete
year2 = nearest_terms[1]
assert year2.label == "year2"
assert year2.index.shape == (13,)
assert year2.index[0] == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year2.index[-1] == pd.Timestamp("2018-01-20 06:00:00+0000", tz="UTC")
assert year2.target_start_date == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year1.target_term_length_days == 365
assert year2.actual_term_length_days == 364
assert not year2.complete # no remaining index
# check completeness case with a shorter final term
nearest_terms = get_terms(
meter_data.index,
term_lengths=[365, 340],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="nearest",
)
year2 = nearest_terms[1]
assert year2.label == "year2"
assert year2.index.shape == (12,)
assert year2.index[0] == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year2.index[-1] == pd.Timestamp("2017-12-22 06:00:00+00:00", tz="UTC")
assert year2.target_start_date == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year2.target_term_length_days == 340
assert year2.actual_term_length_days == 335
assert year2.complete # has remaining index
def test_term_repr(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index, term_lengths=[60, 60, 60])
assert repr(terms[0]) == (
"Term(label=term_001, target_term_length_days=60, actual_term_length_days=29,"
" complete=True)"
)
def test_remove_duplicates_df():
index = pd.DatetimeIndex(["2017-01-01", "2017-01-02", "2017-01-02"])
df = pd.DataFrame({"value": [1, 2, 3]}, index=index)
assert df.shape == (3, 1)
df_dedupe = remove_duplicates(df)
assert df_dedupe.shape == (2, 1)
assert list(df_dedupe.value) == [1, 2]
def test_remove_duplicates_series():
index = pd.DatetimeIndex(["2017-01-01", "2017-01-02", "2017-01-02"])
series = pd.Series([1, 2, 3], index=index)
assert series.shape == (3,)
series_dedupe = remove_duplicates(series)
assert series_dedupe.shape == (2,)
assert list(series_dedupe) == [1, 2]
def test_as_freq_hourly_to_daily(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
meter_data.iloc[-1]["value"] = np.nan
assert meter_data.shape == (19417, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (811,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21926.0
def test_as_freq_daily_to_daily(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
assert meter_data.shape == (810, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (810,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21925.8
def test_as_freq_hourly_to_daily_include_coverage(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
meter_data.iloc[-1]["value"] = np.nan
assert meter_data.shape == (19417, 1)
as_daily = as_freq(meter_data.value, freq="D", include_coverage=True)
assert as_daily.shape == (811, 2)
assert round(meter_data.value.sum(), 1) == round(as_daily.value.sum(), 1) == 21926.0
def test_clean_caltrack_billing_daily_data_billing(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
cleaned_data = clean_caltrack_billing_daily_data(meter_data, "billing_monthly")
assert cleaned_data.shape == (27, 1)
| pd.testing.assert_frame_equal(meter_data, cleaned_data) | pandas.testing.assert_frame_equal |
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Methods for processing VERIFICATION data.
"""
import os
import re
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import pickle
import requests
from collections import OrderedDict
from mosx.MesoPy import Meso
from mosx.obs.methods import get_obs_hourly, reindex_hourly
from mosx.util import generate_dates, get_array, get_ghcn_stid
def get_cf6_files(config, num_files=1):
"""
After code by Luke Madaus
Retrieves CF6 climate verification data released by the NWS. Parameter num_files determines how many recent files
are downloaded.
"""
# Create directory if it does not exist
site_directory = config['SITE_ROOT']
# Construct the web url address. Check if a special 3-letter station ID is provided.
nws_url = 'http://forecast.weather.gov/product.php?site=NWS&issuedby=%s&product=CF6&format=TXT'
try:
stid3 = config['station_id3']
except KeyError:
stid3 = config['station_id'][1:].upper()
nws_url = nws_url % stid3
# Determine how many files (iterations of product) we want to fetch
if num_files == 1:
if config['verbose']:
print('get_cf6_files: retrieving latest CF6 file for %s' % config['station_id'])
else:
if config['verbose']:
print('get_cf6_files: retrieving %s archived CF6 files for %s' % (num_files, config['station_id']))
# Fetch files
for r in range(1, num_files + 1):
# Format the web address: goes through 'versions' on NWS site which correspond to increasingly older files
version = 'version=%d&glossary=0' % r
nws_site = '&'.join((nws_url, version))
response = requests.get(nws_site)
cf6_data = response.text
# Remove the header
try:
body_and_footer = cf6_data.split('CXUS')[1] # Mainland US
except IndexError:
try:
body_and_footer = cf6_data.split('CXHW')[1] # Hawaii
except IndexError:
body_and_footer = cf6_data.split('CXAK')[1] # Alaska
body_and_footer_lines = body_and_footer.splitlines()
if len(body_and_footer_lines) <= 2:
body_and_footer = cf6_data.split('000')[2]
# Remove the footer
body = body_and_footer.split('[REMARKS]')[0]
# Find the month and year of the file
current_year = re.search('YEAR: *(\d{4})', body).groups()[0]
try:
current_month = re.search('MONTH: *(\D{3,9})', body).groups()[0]
current_month = current_month.strip() # Gets rid of newlines and whitespace
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%B %Y')
except: # Some files have a different formatting, although this may be fixed now.
current_month = re.search('MONTH: *(\d{2})', body).groups()[0]
current_month = current_month.strip()
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%m %Y')
# Write to a temporary file, check if output file exists, and if so, make sure the new one has more data
datestr = file_date.strftime('%Y%m')
filename = '%s/%s_%s.cli' % (site_directory, config['station_id'].upper(), datestr)
temp_file = '%s/temp.cli' % site_directory
with open(temp_file, 'w') as out:
out.write(body)
def file_len(file_name):
with open(file_name) as f:
for i, l in enumerate(f):
pass
return i + 1
if os.path.isfile(filename):
old_file_len = file_len(filename)
new_file_len = file_len(temp_file)
if old_file_len < new_file_len:
if config['verbose']:
print('get_cf6_files: overwriting %s' % filename)
os.remove(filename)
os.rename(temp_file, filename)
else:
if config['verbose']:
print('get_cf6_files: %s already exists' % filename)
else:
if config['verbose']:
print('get_cf6_files: writing %s' % filename)
os.rename(temp_file, filename)
def _cf6_wind(config):
"""
After code by <NAME>
This function is used internally only.
Generates wind verification values from climate CF6 files stored in SITE_ROOT. These files can be generated
externally by get_cf6_files.py. This function is not necessary if climo data from _climo_wind is found, except for
recent values which may not be in the NCDC database yet.
:param config:
:return: dict: wind values from CF6 files
"""
if config['verbose']:
print('_cf6_wind: searching for CF6 files in %s' % config['SITE_ROOT'])
allfiles = os.listdir(config['SITE_ROOT'])
filelist = [f for f in allfiles if f.startswith(config['station_id'].upper()) and f.endswith('.cli')]
filelist.sort()
if len(filelist) == 0:
raise IOError('No CF6 files found.')
if config['verbose']:
print('_cf6_wind: found %d CF6 files.' % len(filelist))
# Interpret CF6 files
if config['verbose']:
print('_cf6_wind: reading CF6 files')
cf6_values = {}
for file in filelist:
year, month = re.search('(\d{4})(\d{2})', file).groups()
infile = open('%s/%s' % (config['SITE_ROOT'], file), 'r')
for line in infile:
matcher = re.compile(
'( \d|\d{2}) ( \d{2}|-\d{2}| \d| -\d|\d{3})')
if matcher.match(line):
# We've found an ob line!
lsp = line.split()
day = int(lsp[0])
curdt = datetime(int(year), int(month), day)
cf6_values[curdt] = {}
# Wind
if lsp[11] == 'M':
cf6_values[curdt]['wind'] = 0.0
else:
cf6_values[curdt]['wind'] = float(lsp[11]) * 0.868976
return cf6_values
def _climo_wind(config, dates=None):
"""
Fetches climatological wind data using ulmo package to retrieve NCDC archives.
:param config:
:param dates: list of datetime objects
:return: dict: dictionary of wind values
"""
import ulmo
if config['verbose']:
print('_climo_wind: fetching data from NCDC (may take a while)...')
v = 'WSF2'
wind_dict = {}
D = ulmo.ncdc.ghcn_daily.get_data(get_ghcn_stid(config), as_dataframe=True, elements=[v])
if dates is None:
dates = list(D[v].index.to_timestamp().to_pydatetime())
for date in dates:
wind_dict[date] = {'wind': D[v].loc[date]['value'] / 10. * 1.94384}
return wind_dict
def pop_rain(series):
"""
Converts a series of rain values into 0 or 1 depending on whether there is measurable rain
:param series:
:return:
"""
new_series = series.copy()
new_series[series >= 0.01] = 1.
new_series[series < 0.01] = 0.
return new_series
def categorical_rain(series):
"""
Converts a series of rain values into categorical precipitation quantities a la MOS.
:param series:
:return:
"""
new_series = series.copy()
for j in range(len(series)):
if series.iloc[j] < 0.01:
new_series.iloc[j] = 0.
elif series.iloc[j] < 0.10:
new_series.iloc[j] = 1.
elif series.iloc[j] < 0.25:
new_series.iloc[j] = 2.
elif series.iloc[j] < 0.50:
new_series.iloc[j] = 3.
elif series.iloc[j] < 1.00:
new_series.iloc[j] = 4.
elif series.iloc[j] < 2.00:
new_series.iloc[j] = 5.
elif series.iloc[j] >= 2.00:
new_series.iloc[j] = 6.
else: # missing, or something else that's strange
new_series.iloc[j] = 0.
return new_series
def verification(config, output_file=None, use_cf6=True, use_climo=True, force_rain_quantity=False):
"""
Generates verification data from MesoWest and saves to a file, which is used to train the model and check test
results.
:param config:
:param output_file: str: path to output file
:param use_cf6: bool: if True, uses wind values from CF6 files
:param use_climo: bool: if True, uses wind values from NCDC climatology
:param force_rain_quantity: if True, returns the actual quantity of rain (rather than POP); useful for validation
files
:return:
"""
if output_file is None:
output_file = '%s/%s_verif.pkl' % (config['SITE_ROOT'], config['station_id'])
dates = generate_dates(config)
api_dates = generate_dates(config, api=True, api_add_hour=config['forecast_hour_start'] + 24)
# Read new data for daily values
m = Meso(token=config['meso_token'])
if config['verbose']:
print('verification: MesoPy initialized for station %s' % config['station_id'])
print('verification: retrieving latest obs and metadata')
latest = m.latest(stid=config['station_id'])
obs_list = list(latest['STATION'][0]['SENSOR_VARIABLES'].keys())
# Look for desired variables
vars_request = ['air_temp', 'wind_speed', 'precip_accum_one_hour']
vars_option = ['air_temp_low_6_hour', 'air_temp_high_6_hour', 'precip_accum_six_hour']
# Add variables to the api request if they exist
if config['verbose']:
print('verification: searching for 6-hourly variables...')
for var in vars_option:
if var in obs_list:
if config['verbose']:
print('verification: found variable %s, adding to data' % var)
vars_request += [var]
vars_api = ''
for var in vars_request:
vars_api += var + ','
vars_api = vars_api[:-1]
# Units
units = 'temp|f,precip|in,speed|kts'
# Retrieve data
obspd = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # IRC Behavioral Analysis - Visualization
# ### Imports
# In[1]:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
# ## Loading Data
# In[2]:
import os
log_names_mal = ['03','04','34','39','42','51','56','62']
log_names_benign = ['irc1']
log_names = log_names_mal + log_names_benign
# project_dir = '/Users/preneond/Documents/Work/Stratosphere/IRC-Research/IRC-Behavioral-Analysis/'
project_dir = '/home/prenek/IRC-Behavioral-Analysis/'
log_dir = os.path.join(project_dir, 'zeek/logs/')
out_dir = os.path.join(project_dir, 'python/out/')
plot_dir = os.path.join(project_dir, 'python/plots/')
fn_join_freq = 'join_freq.log'
fn_lev_dist = 'lev_dist.log'
logs_fn_join = [os.path.join(out_dir,l, fn_join_freq) for l in log_names]
logs_fn_join_mal = [os.path.join(out_dir,l, fn_join_freq) for l in log_names_mal]
logs_fn_join_benign = [os.path.join(out_dir,l, fn_join_freq) for l in log_names_benign]
logs_fn_privmsg = [os.path.join(out_dir,l, fn_lev_dist) for l in log_names]
logs_fn_privmsg_mal = [os.path.join(out_dir,l,fn_lev_dist) for l in log_names_mal]
logs_fn_privmsg_benign = [os.path.join(out_dir,l, fn_lev_dist) for l in log_names_benign]
# FIXME: read csv in chunks because the log is too big
df_privmsg_benign = None
chunksize = 10 ** 5
# df_tmp = None
for pcap, log in zip(log_names_benign, logs_fn_privmsg_benign):
print(pcap)
df_tmp = pd.read_csv(log, sep=';', encoding='utf-8', chunksize=chunksize)
df_tmp = pd.concat(df_tmp, ignore_index=True)
df_tmp['pcap'] = pcap
df_tmp['malicious'] = 0
df_privmsg_benign = | pd.concat([df_privmsg_benign, df_tmp], ignore_index=True, sort=True) | pandas.concat |
import torch
import numpy as np
from torch.autograd import Variable
from torch.utils import data
from torchvision.utils import save_image
import itertools
from torchvision import transforms
from torch.utils.data import Dataset
from PIL import Image
import copy
from utils import *
import pandas as pd
from models.cycle_GAN import Generator, Discriminator
import glob
import random
import os
import PIL
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def tensor2image(tensor):
image = 127.5*(tensor[0].cpu().float().numpy() + 1.0)
if image.shape[0] == 1:
image = np.tile(image, (3,1,1))
return image.astype(np.uint8)
class ImageDataset(Dataset):
def __init__(self, root, transforms_=None, unaligned=False, mode='train'):
self.transform = transforms.Compose(transforms_)
self.unaligned = unaligned
self.files_A = sorted(glob.glob(os.path.join(root, '%sA' % mode) + '/*.*'))
self.files_B = sorted(glob.glob(os.path.join(root, '%sB' % mode) + '/*.*'))
def __getitem__(self, index):
item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))
if self.unaligned:
item_B = self.transform(Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)]))
else:
item_B = self.transform(Image.open(self.files_B[index % len(self.files_B)]))
return {'A': item_A, 'B': item_B}
def __len__(self):
return max(len(self.files_A), len(self.files_B))
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch) / (self.n_epochs - self.decay_start_epoch)
class Client:
def __init__(self, id, config):
self.id = id
self.local_dir = 'clients/' + str(id) + '/'
dir_setup(self.local_dir)
# Now dataset is a united OBJECT in this version
self.dataset = None
dir_setup(self.local_dir + 'dataset/')
# add something new
# Only used in former vision
# self.label = []
# dir_setup(self.local_dir + 'label/')
# New models
self.generator_A2B = Generator(input_nc=config.input_nc, output_nc=config.output_nc).to(config.device)
self.generator_B2A = Generator(input_nc=config.output_nc, output_nc=config.input_nc).to(config.device)
dir_setup(self.local_dir + 'model/')
self.generator_name_A2B = "generator_A2B.pkl"
self.generator_name_B2A = "generator_B2A.pkl"
self.discriminator_A = Discriminator(input_nc=config.input_nc).to(config.device)
self.discriminator_B = Discriminator(input_nc=config.output_nc).to(config.device)
dir_setup(self.local_dir + 'model/')
self.discriminator_name_A = "discriminator_A.pkl"
self.discriminator_name_B = "discriminator_B.pkl"
# The number of samples the client owns (before really load data)
# self.num_data_owned_setup = 0
# It won't be used in cycle gan no more
dir_setup(self.local_dir + 'dataset/trainA/')
dir_setup(self.local_dir + 'dataset/trainB/')
dir_setup(self.local_dir + 'dataset/testA/')
dir_setup(self.local_dir + 'dataset/testB/')
# self config
self.config = config
# generated samples store
self.store_generated_root = 'results/'
dir_setup(self.local_dir + self.store_generated_root)
self.optimizer_G = torch.optim.Adam(itertools.chain(self.generator_A2B.parameters(), self.generator_B2A.parameters()),
lr=self.config.lr, betas=(0.5, 0.999))
self.optimizer_D_A = torch.optim.Adam(self.discriminator_A.parameters(), self.config.lr, betas=(0.5, 0.999))
self.optimizer_D_B = torch.optim.Adam(self.discriminator_B.parameters(), self.config.lr, betas=(0.5, 0.999))
# Learning scheduler setup
self.lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(self.optimizer_G, lr_lambda=LambdaLR(config.epochs, 0, config.decay_epoch).step)
self.lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(self.optimizer_D_A, lr_lambda=LambdaLR(config.epochs, 0, config.decay_epoch).step)
self.lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(self.optimizer_D_B, lr_lambda=LambdaLR(config.epochs, 0, config.decay_epoch).step)
"""# optimizer for training
# @property
@property
def optimizer_G(self):
return torch.optim.Adam(itertools.chain(self.generator_A2B.parameters(), self.generator_B2A.parameters()),
lr=self.config.lr, betas=(0.5, 0.999))
@property
def optimizer_D_A(self):
return torch.optim.Adam(self.discriminator_A.parameters(), self.config.lr, betas=(0.5, 0.999))
@property
def optimizer_D_B(self):
return torch.optim.Adam(self.discriminator_B.parameters(), self.config.lr, betas=(0.5, 0.999))
"""
# Not gonna be used
# def load_data(self, data_label_list):
# self.dataset.append(data_label_list[0])
# self.label.append(data_label_list[1])
def load_dataset_from_dir(self, dir):
transforms_ = [transforms.Resize(int(self.config.img_size * 1.12), PIL.Image.BICUBIC),
transforms.RandomCrop(self.config.img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
train_dataset = ImageDataset(dir, transforms_=transforms_, unaligned=True)
self.dataset = data.DataLoader(dataset=train_dataset,
batch_size=self.config.batch_size,
shuffle=self.config.shuffle,
collate_fn=self.config.collate_fn,
batch_sampler=self.config.batch_sampler,
num_workers=self.config.num_workers,
pin_memory=self.config.pin_memory,
drop_last=self.config.drop_last,
timeout=self.config.timeout,
worker_init_fn=self.config.worker_init_fn)
def load_model_from_path(self, model_path):
self.generator_A2B = torch.load(model_path + self.generator_name_A2B)
self.generator_B2A = torch.load(model_path + self.generator_name_B2A)
self.discriminator_A = torch.load(model_path + self.discriminator_name_A)
self.discriminator_B = torch.load(model_path + self.discriminator_name_B)
def load_model(self, generator_A2B, generator_B2A, discriminator_A, discriminator_B):
self.generator_A2B = copy.deepcopy(generator_A2B)
self.generator_B2A = copy.deepcopy(generator_B2A)
self.discriminator_A = copy.deepcopy(discriminator_A)
self.discriminator_B = copy.deepcopy(discriminator_B)
def num_data_owned(self):
return len(self.dataset)
# client writes logs
def log_write(self, epoch, loss_D, loss_G, loss_G_GAN, loss_G_identity, loss_G_cycle):
loss_data_frame = | pd.DataFrame(columns=None, index=[epoch], data=[[loss_D, loss_G, loss_G_GAN, loss_G_identity, loss_G_cycle]]) | pandas.DataFrame |
#!/usr/bin/env python3
'''
Splits dataset into train/test/val
Author: <NAME>
Date: 10/16/2019
'''
import os
import argparse
import pandas as pd
import numpy as np
import csv
import shutil
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import train_test_split
try:
import utilities as utilities
except ImportError:
import utility.utilities as utilities
# For nepsa_all
# nepsa_all = ['B-DATE','I-DATE', 'B-EVENT','I-EVENT', 'B-NUM','I-NUM', 'B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE']
nepsa_all = ['B-PER','I-PER',
'B-ORG','I-ORG',
'B-LOC','I-LOC',
'B-MISC','I-MISC',
'B-FEEDBACK','I-FEEDBACK',
'B-GENERAL','I-GENERAL',
'B-PROFANITY','I-PROFANITY',
'B-VIOLENCE','I-VIOLENCE']
# For nepsa_target
# nepsa_target= ['B-FEEDBACK','I-FEEDBACK', 'B-DATE','I-DATE', 'B-EVENT','I-EVENT', 'B-NUM','I-NUM', 'B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE', 'B-GENERAL','I-GENERAL', 'B-PROFANITY','I-PROFANITY', 'B-VIOLENCE','I-VIOLENCE']
nepsa_target= ['B-PER','I-PER',
'B-ORG','I-ORG',
'B-LOC','I-LOC',
'B-MISC','I-MISC']
# For nepsa_aspect
# nepsa_aspect = ['B-DATE','I-DATE', 'B-EVENT','I-EVENT', 'B-NUM','I-NUM', 'B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE', 'B-PER','I-PER', 'B-ORG','I-ORG', 'B-LOC','I-LOC', 'B-MISC','I-MISC']
nepsa_aspect = ['B-FEEDBACK','I-FEEDBACK',
'B-GENERAL','I-GENERAL',
'B-PROFANITY','I-PROFANITY',
'B-VIOLENCE','I-VIOLENCE']
forbid = {
'nepsa_all' : nepsa_all,
'nepsa_target' : nepsa_target,
'nepsa_aspect' : nepsa_aspect
}
def text_tag_convert(input_file, logger, seq_len, verbose=False):
dir_name = os.path.dirname(input_file)
output_dir = os.path.join(dir_name, 'text_tag_only')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
sent_file = os.path.join(output_dir, 'text_only.txt')
tag_file = os.path.join(output_dir, 'tag_only.txt')
MIN_SEQ_LENGTH = seq_len[0]
MAX_SEQ_LENGTH = seq_len[1]
with open(input_file,'r', encoding='utf-8') as in_file, open(sent_file,'w', encoding='utf-8') as txt_f, open(tag_file,'w', encoding='utf-8') as tag_f:
sentence = []
tag = []
max_length=0
max_sentence=''
max_counter=0
min_counter=0
sent_counter=0
line_num=0
j=0
for i,row in enumerate(in_file):
#To know which line is defunct in file
#print(i+1)
row = row.strip().split()
# Assuming input file has four columns
# token, start_position, end_position, entity_type
# Changed to greater than 1 to
# fit old nepali-ner data as well
if len(row)>1:
sentence.append(row[0])
tag.append(row[-1])
else:
line_num+=1
if len(sentence) > max_length:
max_length = len(sentence)
max_sentence=sentence
j=line_num
if len(sentence) < MAX_SEQ_LENGTH and len(sentence) > MIN_SEQ_LENGTH:
txt_f.write(' '.join(sentence)+'\n')
tag_f.write(' '.join(tag)+'\n')
sent_counter+=1
else:
if len(sentence) > MAX_SEQ_LENGTH:
max_counter+=1
if verbose:
logger.info("Length of longer sentence = {}".format(len(sentence)))
else:
min_counter+=1
if verbose:
logger.info("Length of shorter sentence = {}".format(len(sentence)))
sentence = []
tag = []
logger.info("Max sentence length limit = {}".format(MAX_SEQ_LENGTH))
logger.info("Min sentence length limit = {}".format(MIN_SEQ_LENGTH))
logger.info("Longest sentence length = {}".format(max_length))
logger.info("Longest sentence at line number = {}".format(j))
logger.info("Longest sentence counter = {}".format(max_counter))
logger.info("Shortest sentence counter = {}".format(min_counter))
logger.info("% of sentence removed = {}%".format(max_counter+min_counter/line_num * 100))
logger.info("Total number of sentence before removal= {}".format(line_num))
logger.info("Total number of sentence after removal= {}".format(sent_counter))
in_file.close()
txt_f.close()
tag_f.close()
logger.info("Text and Tag files are stored in {}".format(output_dir))
logger.info("******************************************************")
return sent_file, tag_file
'''
Function to write dataframe into files
'''
def write_df(df, fname, logger, split_type):
invalid_counter = 0
with open(fname, 'w', encoding='utf-8') as f:
for i, r in df.iterrows():
# Splits the TEXT and TAG into chunks
text = r['TEXT'].split()
tag = r['TAG'].split()
tag = ['O' if x not in forbid[split_type] else x for x in tag]
# Remove specific lines having these categories
# if not set(tag).intersection(set(['B-SARCASM','I-SARCASM', 'B-OUTOFSCOPE','I-OUTOFSCOPE'])):
# Remove if it contains only 'O'
if list(set(tag)) != ['O']:
for t1, t2 in zip(text, tag):
f.write(t1+' '+t2+'\n')
f.write('\n')
else:
invalid_counter+=1
logger.info('Number of sentences containing only \'O\': {}'.format(invalid_counter))
logger.info('Created: {}'.format(fname))
f.close()
return invalid_counter
'''
Partitions the given data into chunks
Create train/test file accordingly
'''
def split_train_test(source_path, save_path, logger, split_type):
sent_file = os.path.join(source_path, 'text_only.txt')
tag_file = os.path.join(source_path, 'tag_only.txt')
logger.info("Saving path: {}".format(save_path))
# if not os.path.exists(save_path):
# os.mkdir(save_path)
train_fname = os.path.join(save_path,'train.txt')
test_fname = os.path.join(save_path, 'test.txt')
val_fname = os.path.join(save_path, 'dev.txt')
df_txt = pd.read_csv(sent_file, delimiter='\n', encoding='utf-8',
skip_blank_lines=True, header=None,
quoting=csv.QUOTE_NONE, names=['TEXT'])
df_tag = pd.read_csv(tag_file, delimiter='\n', encoding='utf-8',
skip_blank_lines=True, header=None,
quoting=csv.QUOTE_NONE, names=['TAG'])
df = df_txt.join(df_tag).sample(frac=1).reset_index(drop=True)
# To split into train and intermediate 80/20
mask = np.random.rand(len(df)) < 0.8
train_df = df[mask]
intermediate_df = df[~mask]
# To split intermediate into 10/10 into test and dev
val_mask = np.random.rand(len(intermediate_df)) < 0.5
test_df = intermediate_df[val_mask]
val_df = intermediate_df[~val_mask]
# Write those train/test dataframes into files
invalid_train_count = write_df(train_df, train_fname, logger, split_type)
invalid_test_count = write_df(test_df, test_fname, logger, split_type)
invalid_val_count = write_df(val_df, val_fname, logger, split_type)
total_invalid = invalid_train_count + invalid_test_count + invalid_val_count
total_data_length = len(train_df) + len(test_df) + len(val_df)
# Print stat
logger.info("Length of train dataset: {}".format(len(train_df) - invalid_train_count))
logger.info("Length of test dataset: {}".format(len(test_df) - invalid_test_count))
logger.info("Length of val dataset: {}".format(len(val_df) - invalid_val_count))
logger.info("Total dataset reduced by: {:.3f}%".format((total_invalid / total_data_length) * 100))
'''
Partitions the given data into chunks
Create train/test file accordingly
***Obsolete yet for reference***
'''
def split_train_test_csv(source_path, save_path, logger):
logger.info("Saving path: {}".format(save_path))
train_fname = os.path.join(save_path,'train.txt')
test_fname = os.path.join(save_path, 'test.txt')
val_fname = os.path.join(save_path, 'dev.txt')
df_txt = pd.read_csv(source_path, delimiter=',', encoding='utf-8',
skip_blank_lines=True, header=['ss', 'ac', 'at', 'text'],
quoting=csv.QUOTE_MINIMAL, names=['TEXT'])
df = df_txt.sample(frac=1).reset_index(drop=True)
# To split into train and intermediate 80/20
mask = np.random.rand(len(df)) < 0.8
train_df = df[mask]
intermediate_df = df[~mask]
# To split intermediate into 10/10 into test and dev
val_mask = np.random.rand(len(intermediate_df)) < 0.5
test_df = intermediate_df[val_mask]
val_df = intermediate_df[~val_mask]
train_df.to_csv(train_fname, header=False, index=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ", encoding='utf-8')
test_df.to_csv(test_fname, header=False, index=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ", encoding='utf-8')
val_df.to_csv(val_fname, header=False, index=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ", encoding='utf-8')
# Print stat
logger.info("Length of train dataset: {}".format(len(train_df)))
logger.info("Length of test dataset: {}".format(len(test_df)))
logger.info("Length of val dataset: {}".format(len(val_df)))
def write_csv(df, fname):
df.to_csv(fname, header=False, index=False,
quoting=csv.QUOTE_MINIMAL,
escapechar=" ",
encoding='utf-8')
'''
Partitions the given data using GroupShuffleSplit
This function will split train/test/val for each
aspect category equally
Split 80/10/10 for all the category
** Not based on the whole document
'''
def split_csv(source_path, save_path, logger):
logger.info("Saving path: {}".format(save_path))
train_fname = os.path.join(save_path,'train.txt')
test_fname = os.path.join(save_path, 'test.txt')
val_fname = os.path.join(save_path, 'dev.txt')
df_txt = pd.read_csv(source_path, delimiter=',',
encoding='utf-8',
skip_blank_lines=True,
header=None,
names=['ss', 'ac', 'at', 'text'])
# Split the df based on sentiment strength
# into positive and negative
gss = GroupShuffleSplit(test_size=.20, n_splits=1, random_state = 163).split(df_txt, groups=df_txt['ss'])
# Get positive and negative dataframe
for positive_df, negative_df in gss:
# Get data based on the index
negative = df_txt.iloc[negative_df]
positive = df_txt.iloc[positive_df]
# Split 80/10/10 -> train, test, val
# based on sentiment strength
train_neg, test_val_neg = train_test_split(negative, test_size=0.2)
train_pos, test_val_pos = train_test_split(positive, test_size=0.2)
test_neg, val_neg = train_test_split(test_val_neg, test_size=0.5)
test_pos, val_pos = train_test_split(test_val_pos, test_size=0.5)
# Concat negative and positive dataframe and shuffle
train_df = pd.concat([train_pos, train_neg], ignore_index=True).sample(frac=1).reset_index(drop=True)
test_df = pd.concat([test_pos, test_neg], ignore_index=True).sample(frac=1).reset_index(drop=True)
val_df = | pd.concat([val_pos, val_neg], ignore_index=True) | pandas.concat |
"""Event Study Abnormal Returns
- key developments CAR, BHAR and post-event drift
<NAME>
License: MIT
"""
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
import os
import time
from finds.database import SQL
from finds.busday import BusDay
from finds.structured import PSTAT, CRSP, Benchmarks
from finds.backtesting import EventStudy
from settings import settings
LAST_DATE = settings['crsp_date']
ECHO = True
sql = SQL(**settings['sql'], echo=ECHO)
user = SQL(**settings['user'], echo=ECHO)
bd = BusDay(sql)
keydev = PSTAT(sql, bd)
crsp = CRSP(sql, bd, rdb=None)
bench = Benchmarks(sql, bd)
eventstudy = EventStudy(user, bench, LAST_DATE)
outdir = os.path.join(settings['images'], 'events')
# event window parameters
end = 20201201
beg = 19890101 # 20020101
minobs = 250
left, right, post = -1, 1, 21
# str formatter to pretty print event and role description given their id's
eventformat = lambda e, r: "{event} ({eventid}) {role} [{roleid}]".format(
event=keydev.event_[e], eventid=e, role=keydev.role_[r], roleid=r)
events = sorted(keydev.event_.keys()) # list of eventid's
roles = sorted(keydev.role_.keys()) # list of roleid's
## Helpers to merge events and crsp, and screen stock universe
# to lookup prevailing exchange and share codes by permno and date
shrcd = crsp.build_lookup('permno', 'shrcd')
exchcd = crsp.build_lookup('permno', 'exchcd')
def event_pipeline(eventstudy, mincap=300000, **arg):
"""helper to merge keydev events and crsp, and screen stock universe"""
df = keydev.get_linked(
dataset='keydev',
date_field='announcedate',
fields=['keydevid',
'keydeveventtypeid',
'keydevtoobjectroletypeid'],
where=(f"announcedate >= {arg['beg']} and announcedate <= {arg['end']}"
f" and keydeveventtypeid = {arg['eventid']} "
f" and keydevtoobjectroletypeid = {arg['roleid']}"))\
.drop_duplicates(['permno','announcedate'])\
.set_index(['permno','announcedate'], drop=False)
stk = arg['stocks'].get_many(
dataset='daily',
permnos=df['permno'],
date_field='date',
dates=arg['stocks'].bd.offset(df['announcedate'], left-1),
fields=['prc', 'shrout']).fillna(0)
df['cap'] = (stk['prc'].abs() * stk['shrout']).values
df['exchcd'] = [exchcd(r.permno, r.date) for r in stk.itertuples()]
df['shrcd'] = [shrcd(r.permno, r.date) for r in stk.itertuples()]
r = (df['cap'].gt(mincap) & # require cap > $300M
df['exchcd'].isin([1,2,3]) & # primary exchange
df['shrcd'].isin([10,11])).values # domestic common stocks
rows = eventstudy(crsp, df[r], left, right, post, 'announcedate')
return df.loc[rows.to_records(index=False).tolist()] # restrict df to rows
## Compute abnormal returns of all events
# %%capture
tic = time.time()
for i, eventid in enumerate(events):
for roleid in roles:
# retrieve all observations of this eventid, roleid
df = event_pipeline(eventstudy, stocks=crsp, beg=beg, end=end,
eventid=eventid, roleid=roleid,
left=left, right=right, post=post)
if len(df) < minobs: # require min number of events
continue
# retrieve announcement window returns
r = eventstudy(crsp, df, left, right, post, 'announcedate')
if r['date'].nunique() < minobs: # require min number of dates
continue
# compute both BHAR and CAR averages, plot and save
bhar = eventstudy.fit(car=False, name='bhar')
car = eventstudy.fit(car=True, name='car')
eventstudy.write(label=f"{eventid}_{roleid}")
s = pd.concat([bhar, car], axis=1).T
print(eventformat(eventid, roleid))
print(s.to_string(float_format='%.4f', index=False))
print()
fig, axes = plt.subplots(2, 1, clear=True, num=1, figsize=(10,12))
eventstudy.plot(title=eventformat(eventid, roleid),
vline=right, ax=axes[0], name='bhar')
eventstudy.plot(title='', vline=right, ax=axes[1], name='car')
if outdir:
plt.savefig(os.path.join(outdir, f"{eventid}_{roleid}.jpg"))
print('Elapsed:', time.time()-tic, 'secs')
## Summarize BHAR's of all events, by 3-day event window abnormal returns
df = eventstudy.read(name='bhar')\
.set_index('permno').sort_values('window', ascending=False)
dx = DataFrame(df.index.str.split('_').to_list()).astype(int)
df.index = pd.MultiIndex.from_frame(dx).set_names(['eventid','roleid'])
df['event'] = keydev.event_[df.index.get_level_values(0)].values
df['role'] = keydev.role_[df.index.get_level_values(1)].values
mindays = (df['days']>1000).values
print(df[mindays].iloc[:10].drop(columns='name')\
.to_string(formatters={'effective':'{:.0f}'.format}, float_format='%.4f',
index=False))
print(df[mindays].iloc[::-1].iloc[:10].drop(columns='name')\
.to_string(formatters={'effective':'{:.0f}'.format}, float_format='%.4f',
index=False))
print(df[mindays].iloc[:10].drop(columns=['name'])\
.to_latex(index=False, formatters={'effective':'{:.0f}'.format}))
print(df[mindays].iloc[::-1].iloc[:10].drop(columns=['name'])\
.to_latex(index=False, formatters={'effective':'{:.0f}'.format}))
print(df.sort_values('post_t').to_string(float_format='%.4f'))
df.drop(columns='name')
## Show single plots for each of three events
eventid, roleid = 80, 1
eventid, roleid = 26, 1
df = event_pipeline(eventstudy, stocks=crsp, eventid=eventid, roleid=roleid,
beg=beg, end=end, left=left, right=right, post=post)
bhar = eventstudy.fit(car=False)
fig, ax = plt.subplots(clear=True, num=1, figsize=(10,6))
eventstudy.plot(title=eventformat(eventid, roleid), vline=right, ax=ax)
## show single plot by market cap and half-period
midcap = 20000000
for i, (eventid, roleid) in enumerate([[50,1], [83,1]]):
#eventid, roleid = 50, 1
#eventid, roleid = 83, 1
df = event_pipeline(eventstudy, stocks=crsp, eventid=eventid, roleid=roleid,
beg=beg, end=end, left=left, right=right, post=post)
halfperiod = np.median(df['announcedate'])
sample = {'[FirstHalf]': df['announcedate'].ge(halfperiod).values,
'[SecondHalf]': df['announcedate'].lt(halfperiod).values,
'[Large]': df['cap'].ge(midcap).values,
'[Small]': df['cap'].lt(midcap).values,
'': None}
for ifig, (label, rows) in enumerate(sample.items()):
fig, ax = plt.subplots(clear=True, num=1+ifig, figsize=(5,6))
bhar = eventstudy.fit(rows=rows, car=False)
eventstudy.plot(title=eventformat(eventid, roleid) + ' ' + label,
drift=True, ax=ax, c=f"C{i*5+ifig}")
plt.savefig(os.path.join(outdir, label + f"{eventid}_{roleid}.jpg"))
for i, (eventid, roleid) in enumerate([[80,1], [26,1]]):
#eventid, roleid = 50, 1
#eventid, roleid = 83, 1
df = event_pipeline(eventstudy, stocks=crsp, eventid=eventid, roleid=roleid,
beg=beg, end=end, left=left, right=right, post=post)
halfperiod = np.median(df['announcedate'])
sample = {'[FirstHalf]': df['announcedate'].ge(halfperiod).values,
'[SecondHalf]': df['announcedate'].lt(halfperiod).values,
'[Large]': df['cap'].ge(midcap).values,
'[Small]': df['cap'].lt(midcap).values,
'': None}
for ifig, (label, rows) in enumerate(sample.items()):
fig, ax = plt.subplots(clear=True, num=1+ifig, figsize=(5,6))
bhar = eventstudy.fit(rows=rows, car=False)
eventstudy.plot(title=eventformat(eventid, roleid) + ' ' + label,
drift=False, ax=ax, c=f"C{i*5+ifig}")
plt.savefig(os.path.join(outdir, label + f"{eventid}_{roleid}.jpg"))
#plt.show()
## Show Max Order Statistic and Bonferroni Adjustment
import statsmodels.api as sm
import scipy
from pandas.api import types
class MaxStat:
"""Max Order Statistic probability distributions"""
def __init__(self, dist=scipy.stats.norm, n=None, **params):
self.dist_ = dist
self.params_ = params
self.n = n
def cdf(self, z, n=None):
"""cdf for max order statistic"""
return [self.cdf(y, n) for y in z] if types.is_list_like(z)\
else self.dist_.cdf(z, **self.params_)**(n or self.n)
def pdf(self, z, n=None):
"""cdf for max order statistic"""
n = n or self.n
return [self.pdf(y, n) for y in z] if types.is_list_like(z)\
else self.dist_.pdf(z, **self.params_) * n * self.cdf(z, n=n-1)
def ppf(self, z, n=None):
"""inverse cdf for max order statistic"""
return [self.ppf(y, n) for y in z] if types.is_list_like(z)\
else self.dist_.ppf(z, **self.params_)**(n or self.n)
def pvalue(self, z, n=None):
"""z-value for max order statistic"""
return [self.pvalue(y, n) for y in z] if | types.is_list_like(z) | pandas.api.types.is_list_like |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((rows, cols, ztime))
outliers_hants = pd.np.empty((rows, cols, ztime))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[m, n, :])
y[pd.np.isnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = pd.np.where(outliers_hants,
values_hants,
original_values)
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = | pd.np.sin(ang) | pandas.np.sin |
"""
A simple mean reversion strategy with back testing result. The strategy only works when market is ranging,
which is around 70% of the time.
Rules:
Entry:
1. Place a long limit order if:
1) price breaks 20 days low
2) RSI between 30 to 70
3) with entry price as: 20 days low - 5pips adj
2. Place a short limit order if:
1) price breaks 20 days high
2) RSI between 30 to 70
3) with entry price as: 20 days high + 5pips adj
3. Cancel the order if it cannot be filled in 3 hours
4. Maximum 4 orders allowed for each long or short direction. Do not want to against the trend
SL:
long: entry - 14 days ATR
Short: entry + 14 days ATR
TP:
Long: entry + 14 days ATR
Short: entry - 14 days ATR
Position Size:
%2 risk
"""
import logging
from datetime import timedelta
import pandas as pd
from src.backtester import BackTester
from src.orders.order import Order, OrderSide, OrderStatus
logger = logging.getLogger(__name__)
def process_pending(order, ohlc):
# If the order cannot be filled within next 3 hours, cancel it
if pd.to_datetime(ohlc['time']) - pd.to_datetime(order.order_date) <= timedelta(hours=3):
if (order.is_long and ohlc['low'] <= order.entry) or \
(order.is_short and ohlc['high'] >= order.entry):
logger.debug(pd.to_datetime(ohlc['time']) - pd.to_datetime(order.order_date))
logger.info(f"Fill {order.side} order [{order.id}] @ {order.entry} @ {ohlc['time']} [order date: {order.order_date}]")
order.fill(ohlc['time'])
else:
order.cancel(ohlc['time'])
def process_filled(order, ohlc):
if order.is_long:
if ohlc['low'] <= order.sl:
order.close_with_loss(ohlc['time'])
elif ohlc['high'] >= order.tp:
order.close_with_win(ohlc['time'])
else:
if ohlc['high'] >= order.sl:
order.close_with_loss(ohlc['time'])
elif ohlc['low'] <= order.tp:
order.close_with_win(ohlc['time'])
def run(instrument: str, window: int, max_orders: int, entry_adj: float, tp_adj: float, start_date: str = None, end_date: str = None, output_result: bool = False):
"""
Back testing the strategy
:param instrument: ccy pair, eg. EUR_USD
:param window: period of high or low
:param max_orders: max orders allowed in the same direction
:param entry_adj: entry price adj
:param tp_adj: tp adj
:param start_date: str
:param end_date: str
:param output_result: output to csv for investigations
:return:
list of tested orders
"""
price_df = pd.read_csv(f'c:/temp/{instrument.lower()}_h1_enrich.csv')
'''
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 time 65770 non-null object
1 open 65770 non-null float64
2 high 65770 non-null float64
3 low 65770 non-null float64
4 close 65770 non-null float64
5 last_10_high 65770 non-null float64
6 last_20_high 65770 non-null float64
7 last_10_low 65770 non-null float64
8 last_20_low 65770 non-null float64
9 day_close 65770 non-null float64
10 day_atr 65770 non-null float64
11 day_rsi 65770 non-null float64
12 day_ema_55 65770 non-null float64
'''
if start_date and end_date:
price_df = price_df[(price_df['time'] >= start_date) & (price_df['time'] < end_date)]
orders = []
for idx, ohlc in enumerate(price_df.to_dict('records')):
[process_pending(o, ohlc) for o in orders if o.is_pending]
[process_filled(o, ohlc) for o in orders if o.is_filled]
open_long = [o for o in orders if o.is_long and o.is_open]
open_short = [o for o in orders if o.is_short and o.is_open]
atr = ohlc['day_atr']
if ohlc['high'] == ohlc[f'last_{window}_high'] and len(open_short) < max_orders and 30 <= ohlc['day_rsi'] <= 70:
# Place a short limit order
entry = ohlc['high'] + entry_adj
orders.append(
Order(
order_date=ohlc['time'],
side=OrderSide.SHORT,
instrument=instrument,
entry=entry,
sl=entry + atr,
tp=entry - atr - tp_adj,
status=OrderStatus.PENDING
)
)
elif ohlc['low'] == ohlc[f'last_{window}_low'] and len(open_long) < max_orders and 30 <= ohlc['day_rsi'] <= 70:
# Place a long limit order
entry = ohlc['low'] - entry_adj
orders.append(
Order(
order_date=ohlc['time'],
side=OrderSide.LONG,
instrument=instrument,
entry=entry,
sl=entry - atr,
tp=entry + atr + tp_adj,
status=OrderStatus.PENDING
)
)
if output_result:
output_csv(instrument, price_df, orders)
return orders
def output_csv(instrument: str, price_feed: pd.DataFrame, orders: list):
price_feed = price_feed.set_index('time')
order_df = pd.DataFrame([
{
'time': o.order_date,
'create_date': o.order_date,
'side': o.side,
'entry': o.entry,
'sl': o.sl,
'tp': o.tp,
'pnl_in_pips': o.pnl * lot_size,
'outcome': o.outcome,
'close_date': o.last_update,
'is_cancelled': o.is_cancelled,
} for o in orders]).set_index('time')
merged = price_feed.join(order_df, how="left")
print(merged)
merged.to_csv(f'C:/temp/{instrument.lower()}_back_test.csv')
if __name__ == '__main__':
instruments = [
('EUR_USD', 10000), ('USD_CHF', 10000), ('USD_CAD', 10000), ('EUR_GBP', 10000), ('USD_SGD', 10000), ('GBP_NZD', 10000),
('AUD_USD', 10000), ('GBP_AUD', 10000), ('GBP_USD', 10000), ('USD_JPY', 100), ('XAU_USD', 1), ('BCO_USD', 100)
]
dfs = []
back_tester = BackTester(strategy='Mean Reversion')
for ccy_pair, lot_size in instruments:
test_orders = run(instrument=ccy_pair, window=20, max_orders=4, entry_adj=0.0005, tp_adj=0, start_date='2010-01-01', end_date='2020-12-31', output_result=False)
back_tester.lot_size = lot_size
print(f"{'*' * 30} {ccy_pair} {'*' * 30}")
back_tester.print_stats(test_orders)
df = | pd.DataFrame([{'time': o.order_date, 'pnl': o.pnl * lot_size} for o in test_orders]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/nikhilparab17/Time-Series-Forecasting-for-CO2-Levels/blob/master/src/co2_forecasting_arima.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# In[ ]:
#get_ipython().system(' pip install pmdarima')
#!pip install pmdarima
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
from pmdarima.arima import auto_arima
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tools.eval_measures import rmse
from statsmodels.tsa.stattools import adfuller
# Dataset
DATASET = "../data/co2_dataset_preprocess.xlsx"
# function to load excel data
def loadExcelData(filepath):
print("Loading .xlsx data...")
excelData = pd.ExcelFile(filepath)
data = excelData.parse()
return data
# function to convert to date-time series
def convert_datetime(data):
data.index = data['YEAR'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))
del data['YEAR']
return data
# difference operation
def difference(data,order=1):
data_diff = data.diff(order)
return data_diff[order:]
# stationarity test
def check_stationary(data):
out = adfuller(data)
p_val = out[1]
print("p-val:", p_val)
if p_val < 0.05:
return True
else:
return False
# data visualization
def data_visualization(data, feature):
# time series
plt.plot(data[feature].index, data[feature], label = feature)
plt.legend()
plt.xlabel("Years")
plt.ylabel(feature + " (ppm)")
plt.savefig(feature + "_time-series.png")
plt.show()
plt.close()
# box plot
plt.boxplot(data[feature])
plt.ylabel(feature + " (ppm)")
plt.savefig(feature +"_boxplot.png")
plt.show()
plt.close()
# seasonal decomposition
sd = seasonal_decompose(data[feature], model= "additive")
fig, ax = plt.subplots(4,figsize=(12,9))
ax[0].plot(sd.observed, color='red', label = "Observed")
ax[0].legend()
ax[1].plot(sd.seasonal, color = 'green', label = "Seasonal")
ax[1].legend()
ax[2].plot(sd.resid, color = 'black', label = "Residual")
ax[2].legend()
ax[3].plot(sd.trend, color = 'blue', label= "Trend")
ax[3].legend()
fig.savefig(feature + "_seasonal_decompose.png")
plt.show()
plt.close()
# calculate 'p' using Auto Correlation Function(ACF)
def acf(x, feature = 'CO2 Levels', l=3):
plot_acf(x[feature], lags=l)
plt.xlabel("Lag")
plt.savefig("CO2_ACF.png")
plt.close()
# calculate 'q' using Partial Auto Correlation Function(PACF)
def pacf(x, feature ='CO2 Levels', l=3):
plot_pacf(x[feature], lags=l)
plt.xlabel("Lag")
plt.savefig("CO2_PACF.png")
plt.close()
# train and fit ARIMA model
def train_and_fit_arima(x, test_split = 0.2):
# run auto-arima grid search
stepwise_model= auto_arima(x, exogenous=None, start_p=0, d=1, start_q=0,
max_p=3, max_d=1, max_q=3,
start_P=0, D=1, start_Q=0, max_P=3, max_D=3,
max_Q=3, max_order=10, m=12, seasonal=True,
trace=True,error_action='ignore',
suppress_warnings=True,stepwise=False,
approximation=False)
print(stepwise_model.aic())
print(stepwise_model.summary())
split=len(x) - int(test_split * len(x))
train = x[0:split]
test = x[split:]
stepwise_model.fit(train)
future_forecast = stepwise_model.predict(n_periods=len(test))
future_forecast = | pd.DataFrame(future_forecast, index=test.index, columns=['Prediction']) | pandas.DataFrame |
from json import load
from matplotlib.pyplot import title
from database.database import DbClient
from discord import Embed
import pandas as pd
from util.data import load_data
class Analytics:
def __init__(self, server_id: str, db):
self.server_id = server_id
self.db = db
@staticmethod
def no_data_embed(topic: str) -> Embed:
"""CREATE AN EMBED IF NO DATA WAS COLLECTED"""
embed = Embed(title="SORRY", description=f"Sorry, but there were no `{topic}` data collected on this server!")
return embed
async def analyze_message(self):
"""ANALYZE THE MESSAGE DATA"""
data = await load_data(self.db, self.server_id)
data = data["message"]
if len(data) == 0:
return self.no_data_embed("message")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
channelid_counts = pd.value_counts(df["channelid"])
role_counts = pd.value_counts(df["roles"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Message counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_delete(self):
"""ANALYZE MESSAGE DELETE"""
data = await load_data(self.db, self.server_id)
data = data["message_delete"]
if len(data) == 0:
return self.no_data_embed("message delete")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message delete ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message delete counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message delete from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message delete counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message delete counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_edit(self):
"""ANALYZE MESSAGE EDIT"""
data = await load_data(self.db, self.server_id)
data = data["message_edit"]
if len(data) == 0:
return self.no_data_embed("message edit")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message edit ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message edits counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message edits from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message edits counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message edits counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_reaction(self):
"""ANALYZE THE REACTION DATA"""
data = await load_data(self.db, self.server_id)
data = data["reaction"]
if len(data) == 0:
return self.no_data_embed("reaction")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
name_count = pd.value_counts(df["reactionname"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Reaction ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed reaction data"),
Embed(title=embed_title, description="Reaction counted by name:\n"f"```{name_count}```"),
Embed(title=embed_title, description="Reaction counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Reaction send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Reaction counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Reaction counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_botrequests(self):
"""ANALYZE THE BOT-REQUESTS DATA"""
data = await load_data(self.db, self.server_id)
data = data["bot_requests"]
if len(data) == 0:
return self.no_data_embed("bot-requests")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
name_count = pd.value_counts(df["cmdname"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
embed_title = "Bot-Requests ~ Analytics"
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed bot-requests data"),
Embed(title=embed_title, description="Executed CMD-names counted:\n"f"```{name_count}```"),
Embed(title=embed_title, description="Bot-Requests messages counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Bot-Requests messages send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Bot-Requests counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Bot-Requests counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_botmsg(self):
"""ANALYZE THE BOT MSG DATA"""
data = await load_data(self.db, self.server_id)
data = data["bot_msg"]
if len(data) == 0:
return self.no_data_embed("bot-message")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
channelid_counts = pd.value_counts(df["channelid"])
role_counts = pd.value_counts(df["roles"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Bot-Message ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed bot-message data"),
Embed(title=embed_title, description=f"Total bot messages: {len(data)}"),
Embed(title=embed_title, description="BotMessages counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="BotMessages send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="BotMessages send in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="BotMessages send on which day:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_users(self): # TODO show last 10 users at timestamp
data = await load_data(self.db, self.server_id)
data = data["users"]
if len(data) == 0:
return self.no_data_embed("users")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
result = df.head(10)
#df["timestamp"] = pd.to_datetime(df["timestamp"])
#df["hours"] = df["timestamp"].dt.hour
#df["weekday"] = df["timestamp"].dt.day_name()
#hours_count = pd.value_counts(df["hours"])
#weekday_count = pd.value_counts(df["weekday"])
embed_title = "Users ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed users data"),
Embed(title=embed_title, description=f"```{result}```")
#Embed(title=embed_title, description="Users counted in which hours:\n"f"```{hours_count}```"),
#Embed(title=embed_title, description="Users counted in which hours:\n"f"```{hours_count}```"),
#Embed(title=embed_title, description="Users counted on which weekdays:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_userjoin(self):
data = await load_data(self.db, self.server_id)
data = data["userjoins"]
if len(data) == 0:
return self.no_data_embed("userjoins")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Userjoin ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Userjoins counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Userjoins counted on which weekdays:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_userleave(self):
data = await load_data(self.db, self.server_id)
data = data["userleave"]
if len(data) == 0:
return self.no_data_embed("userleave")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Userleave ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Userleaves counted in which hour:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Userleaves counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_mentions(self):
data = await load_data(self.db, self.server_id)
data = data["mentions"]
if len(data) == 0:
return self.no_data_embed("mentions")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
ment_roles_counts = pd.value_counts(df["ment_role"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Mentions ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed mentions data"),
Embed(title=embed_title, description="Mentions counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Mentions send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Roles mentioned:\n"f"```{ment_roles_counts}```"),
Embed(title=embed_title, description="Mentions counted in which hour:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Mentions counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_status(self): # TODO falsche felder
data = await load_data(self.db, self.server_id)
data = data["status"]
if len(data) == 0:
return self.no_data_embed("status")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
game_counts = pd.value_counts(df["game"])
role_counts = pd.value_counts(df["roles"])
embed_title = "Status/Game ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Which games play the most:\n"f"```{game_counts}```"),
Embed(title=embed_title, description="Game played from which roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Game counted in which hour:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Game counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_user_ban(self):
data = await load_data(self.db, self.server_id)
data = data["user_ban"]
if len(data) == 0:
return self.no_data_embed("user ban")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
df["timestamp"] = | pd.to_datetime(df["timestamp"]) | pandas.to_datetime |
""" Retrieve and process data """
from pathlib import Path
import geojson
import pandas as pd
import requests
import tqdm
import yaml
from loguru import logger
daily = "https://opendata.ecdc.europa.eu/covid19/subnationalcasedaily/csv"
weekly = "https://opendata.ecdc.europa.eu/covid19/subnationalcaseweekly/csv"
data_links = {"daily": daily, "weekly": weekly}
logger.add("logs/main.txt")
if __name__ == "__main__":
# Retrieve
for freq, link in data_links.items():
csv_file = requests.get(link, allow_redirects=True)
with open(f"data/{freq}.csv", "wb") as f:
f.write(csv_file.content)
logger.info("Data retrieved from ECDC")
# Process
files = {"daily": Path("./data/daily.csv"), "weekly": Path("./data/weekly.csv")}
data = pd.DataFrame()
for freq, file in files.items():
df = pd.read_csv(file)
if freq == "weekly":
# Updated every Wed
fmt = "%Y-W%W-%w"
add_day = df.year_week.apply(lambda x: str(x) + "-3")
df["date"] = pd.to_datetime(add_day, format=fmt)
df.drop("year_week", axis=1, inplace=True)
elif freq == "daily":
df["date"] = | pd.to_datetime(df.date) | pandas.to_datetime |
from datetime import datetime
import numpy as np
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
to_datetime,
)
import pandas._testing as tm
from pandas.tseries import offsets
class TestAsFreq:
def test_asfreq_resample_set_correct_freq(self):
# GH#5613
# we test if .asfreq() and .resample() set the correct value for .freq
df = DataFrame(
{"date": ["2012-01-01", "2012-01-02", "2012-01-03"], "col": [1, 2, 3]}
)
df = df.set_index( | to_datetime(df.date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 20:37:15 2021
@author: skrem
"""
import pandas as pd
import numpy as np
# import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.preprocessing
from sklearn import metrics
import scipy.stats
import scipy.optimize
import seaborn as sns
import matplotlib.patheffects as path_effects
import os
import copy
scaler = sk.preprocessing.MinMaxScaler()
degree_sign = u'\N{DEGREE SIGN}'
"Get global params and pass them to locals"
import settings_init
import settings_transformations
from Avg_data_getter import Avg_data_getter
if settings_init.storage_location is not None:
file_location = settings_init.file_location
Mode = settings_init.Mode
On_len_s = settings_init.On_len_s
Off_len_s = settings_init.Off_len_s
Cycle_len_s = settings_init.Cycle_len_s
repeats = settings_init.repeats
Stim_width_um = settings_init.Stim_width_um
conds_list = settings_init.conds_list
response_avg_dur = settings_transformations.response_avg_dur
baseline_avg_dur = settings_transformations.baseline_avg_dur
indeces_per_s = settings_transformations.indeces_per_s
total_time = settings_transformations.total_time
vis_ang_list = settings_transformations.vis_ang_list
seconds_list = settings_transformations.seconds_list
avg_df = settings_transformations.avg_df
avg_array = settings_transformations.avg_array
ROI_number = settings_transformations.ROI_number
"Functions____________________________________________________________________"
def Get_event_data(roi = "All", event = "All", normalize = "0", plot = "0", data = file_location):
"""Returns a data for selected events specified (based on Mode), and computes
response and baseline average.
Hint: To select multiple ROIs for a single event or multiple events from a
single ROI, specify as variable eg.g ROI_13_14_15_event_8 =
Get_avg_response((13, 14, 15), (8)). Selecting both multiple ROIs and
multiple events is unstable and will yield unexpected results.
Parameters
----------
roi_select: Tuple or array
ROIs from which data is extracted. Default loops through all ROIs.
Script written to be naive to wheter input is tuple (one ROI) or
array (many ROIs)
event_select: Tuple or array
Events from which data is extracted. Default loops through all events.
Naive to tuple (one event) or arrays (many events)
normalize : 0 or 1
Normalize data so range is from 0 to 1 (no/yes)
plot: 0 or 1
Plot sampled data
*data: If given (as string to directory), script loads new, external datafile
Returns
-------
ROI_responses, ROI_baselines, Average_response, Average_baseline
"""
# if data != file_location:
"""
TODO
- This is not the neatest solution... IF I am to do this, then I should
seriously change the label to NOT BE THE SAME AS GLOBAL PARAMS. What I am
doing currently is just a bit nasty...
"""
alt_data = Avg_data_getter(data)
avg_df = alt_data[0] #"""A test"""
avg_array = alt_data[1]
ROI_number = alt_data[2]
# label_list = alt_data[3]
#new improvements
if roi == "All":
roi = np.arange(0, ROI_number)
else:
roi = roi
if isinstance(roi, int) == True:
roi = np.array([roi])
# print("roi was int(), converted to numpy array")
#print("Warning: 'roi_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
if event == "All":
event = np.arange(0, Mode)
else:
event = event
if isinstance(event, int) == True:
event = np.array([event])
# print("event was int(), converted to numpy array")
#print("Warning: 'event_select' takes tuple, but single int was given. Single int was converted to (1,) array.")
ROI_responses = np.empty((0,1))
ROI_baselines = np.empty((0,1))
if normalize == 1:
norm_avg_array = np.copy(avg_array) #create duplicate to avoid overwriting original imported data matrix
for i in roi:
"""
TODO
- Fix the thing below... This is whats giving IndexError index 8 is out of bounds for axis 1 with size 8
= what happens is that as loop starts, for some reason, it gets to a certain recording and index is
out of bounds for the ROIs in the recording...
"""
curr_operation = scaler.fit_transform((norm_avg_array[:, i]).reshape(-1, 1)) #"""workaround"""
curr_operation = curr_operation.reshape(len(curr_operation))
norm_avg_array[:, i] = curr_operation
normalized_data_set = pd.DataFrame(data = norm_avg_array, columns = np.arange(0, ROI_number))
data_set = normalized_data_set
else:
data_set = pd.DataFrame.copy(avg_df)
for i in roi: #This script samples and extracts data at given intervals
for j in event:
#Get response values:
start_index_res = (On_len_s - response_avg_dur + (Cycle_len_s * j)) * indeces_per_s #set start position for current sampling
end_index_res = (On_len_s + (Cycle_len_s * j)) * indeces_per_s #end position for current sampling
curr_series_res = ((data_set[i].loc[start_index_res:end_index_res]))
curr_series_res = curr_series_res.to_numpy()
ROI_responses = np.append(curr_series_res, ROI_responses)
#Get baseline values:
start_index_bsl = (Cycle_len_s - baseline_avg_dur + (Cycle_len_s * j)) * indeces_per_s
end_index_bsl = (Cycle_len_s + (Cycle_len_s * j)) * indeces_per_s
curr_series_bsl = ((data_set[i].loc[start_index_bsl:end_index_bsl]))
curr_series_bsl = curr_series_bsl.to_numpy()
ROI_baselines = np.append(curr_series_bsl, ROI_baselines)
Average_response = np.average(ROI_responses)
Average_baseline = np.average(ROI_baselines)
if plot == 1:
if len(roi) == 1:
base_colors = mpl.cm.get_cmap('gist_rainbow')
color_list = base_colors(np.linspace(0, 1, ROI_number))
ROI_color = color_list[int(roi)]
else:
ROI_color = 'b'
fig, (ax1, ax2) = plt.subplots(1, 2, sharey = True, figsize = (10, 5))
plt.subplots_adjust(wspace = 0)
if isinstance(roi, int) == True:
plt.suptitle("Sampled activity for ROI {}, event {}".format(int(roi), int(event)))
else:
plt.suptitle("Sampled activity for ROIs {}, event {}".format((roi), (event)))
# plt.figure(0)
ax1.set_title("Response period")
if normalize == 0:
ax1.set_ylabel("Z-score (raw)")
if normalize == 1:
ax1.set_ylabel("Z-score (normalised)")
ax1.set_xlabel("Sample sequence")
ax1.plot(ROI_responses, c = ROI_color)
# plt.figure(1)
ax2.set_title("Baseline period")
# ax2.set_ylabel("Z-score")
ax2.set_xlabel("Sample sequence")
ax2.plot(ROI_baselines, c = ROI_color)
#plt.vlines(np.linspace(0, len(ROI_resp_array.flatten('F')), Mode), np.amin(ROI_resp_array), np.amax(ROI_resp_array), colors = 'k')
# print("Avg respone: {}, Avg baseline: {}".format(Average_response, Average_baseline))
return ROI_responses, ROI_baselines, Average_response, Average_baseline
def Get_interval_data(roi, interval_start_s, interval_end_s, normalize = "0", plot = "0"):
"""Returns data from given ROI within specified time interval (s)
Parameters
-------------
roi: int
Which ROI to sample data from. Only one can be chosen at a time.
interval_start_s: int
Start of sampling interval (in seconds)
interval_end_s: int
End of sampling interval (in seconds)
normalize : 0 or 1
Normalize data so range is from 0 to 1 (no/yes)
plot: 0 or 1
Plot sampled data
Returns
-------
interval_data, interval_data_with_s
"""
if normalize == 1:
norm_avg_array = np.copy(avg_array) #create duplicate to avoid overwriting original imported data matrix
curr_operation = scaler.fit_transform((norm_avg_array[:,roi]).reshape(-1, 1)) #"""workaround"""
curr_operation = curr_operation.reshape(len(curr_operation))
norm_avg_array[:, roi] = curr_operation
normalized_data_set = pd.DataFrame(data = norm_avg_array, columns = np.arange(0, ROI_number)) #np.arange(0, ROI_number)
data_set = normalized_data_set
else:
data_set = pd.DataFrame.copy(avg_df)
interval_data = np.empty((0,1))
start_index = interval_start_s * indeces_per_s #set start position for current sampling
end_index = interval_end_s * indeces_per_s #end position for current sampling
curr_series_res = ((data_set[roi].loc[start_index:end_index]))
curr_series_res = curr_series_res.to_numpy()
interval_data = np.append(curr_series_res, interval_data)
if interval_end_s > total_time:
time_in_s = np.linspace(interval_start_s, total_time, len(interval_data))
else:
time_in_s = np.linspace(interval_start_s, interval_end_s, len(interval_data))
interval_data_with_s = np.column_stack((interval_data, time_in_s))
if plot == 1:
if isinstance(roi, int) is True:
base_colors = mpl.cm.get_cmap('gist_rainbow')
color_list = base_colors(np.linspace(0, 1, ROI_number))
ROI_color = color_list[roi]
else:
ROI_color = 'b'
plt.figure(0, dpi = 800)
if normalize == 0:
plt.ylabel("Z-score (raw)")
if normalize == 1:
plt.ylabel("Z-score (normalised)")
plt.title("Sampled interval data from ROI{}".format(roi))
x_axis = time_in_s
plt.plot(x_axis, interval_data, c=ROI_color)
plt.xlabel("Time (s)")
for m in range(Mode):
plt.axvspan((m * Cycle_len_s), ((m * Cycle_len_s) + On_len_s),
color='r', alpha=0.25, lw=0)
if interval_end_s > total_time:
plt.xlim([interval_start_s, total_time])
else:
plt.xlim([interval_start_s, interval_end_s])
return interval_data, interval_data_with_s
def Plot_activity(ROIs = "All", shade = 1, **kwargs):
"""Plot activity of all or specified ROIs"""
if ROIs == "All":
to_plot = np.arange(0, ROI_number)
else:
to_plot = np.array(ROIs)
#Colormap
base_colors = mpl.cm.get_cmap('gist_rainbow') #hsv(x) for x in range(ROI_number)] <-- legacy solution
color_list = base_colors(np.linspace(0, 1, ROI_number))
#Calculate time interval for x-axis
time_in_s = np.linspace(0, total_time, len(avg_df))
#Build each individual ROI plot
# if ROIs == "All":
fig, ax1 = plt.subplots(len(to_plot), 1, sharex = 'col', sharey = False, dpi = 1200, figsize=(10, 15))
# else:
# fig, ax1 = plt.subplots(len(to_plot), 1, sharex = 'col', sharey = False, dpi = 800, figsize=(10, 15))
for v, i in enumerate(to_plot):
w = v+1
ax1[v] = plt.subplot(len(to_plot), 1, w)
ax1[v].plot(time_in_s, avg_df[i], color = color_list[i], linewidth=1.5)
sns.despine(left = True, right = True, bottom = True)
ax1[v].get_yaxis().set_visible(False)
ax1[v].set_title("ROI{}".format(i), x=-0.01, y=.5, size = 10)
if shade == 1:
for m in range(Mode):
ax1[v].axvspan(
(m * Cycle_len_s), ((m * Cycle_len_s) + On_len_s),
color = '#ffe0f9', lw = 0)#, alpha = 0)
# plt.setp(ax1[i-1].get_xticklabels(), visible=False) #This is a work around. Hides axis
#for every ax1 except last one, as #share-axis did not function properly.
plt.subplots_adjust(hspace = 0)
#Frame for adding titles and such
fig.add_subplot(111, frameon = False)
plt.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
plt.xlabel("Time (s)")
plt.title("Average ROI activity ({} trials)".format(repeats))
# ax2.spines["top"].set_visible(True)
# ax2.spines["bottom"].set_visible(False)
# ax2.spines["left"].set_visible(False)
# ax2.spines["right"].set_visible(False)
# ax2.axis("off")
if 'saveas' in kwargs:
# plt.figure(dpi = 2000)
plt.savefig(r'C://Users//skrem//OneDrive//Universitet//MSc//Experimental project//Figures//Python generated//{}'.format(kwargs['saveas']), dpi = 2000, bbox_inches='tight')
plt.figure(2, dpi=800)
ROI_overlap, bx = plt.subplots(1, 1, figsize=(15, 10))
bx.set_title("All ROI activity")
plt.locator_params(axis = 'x', tight = None, nbins = 30)
for i in to_plot:
bx.plot(seconds_list, avg_df[i], color = color_list[i], linewidth=0.75)
bx.set_xlabel("Time (s)")
bx.set_ylabel("Z-score")
def Get_RF_matrix(roi = 'All', normalize = 0, data = file_location):
"""Gives the receptive field as a matrix by computing the difference
between the response and baseline for each event, for specified ROIs."""
# avg_bsln = Get_event_data()[3]
if normalize == 0:
norm = 0
if normalize == 1:
norm = 1
x_axis = np.empty(0)
y_axis = np.empty(0)
# for i in reversed(range(int(Mode/2))):
for i in reversed(range(int(Mode/2))):
# x_axis = np.append(Get_event_data(roi, i)[3]-Get_event_data(roi, i)[2] - avg_bsln, x_axis)
x_axis = np.append(Get_event_data(roi, i, norm, data = data)[3]-Get_event_data(roi, i, norm, data = data)[2], x_axis)
# a = np.flip(a)
for j in reversed(range(int(Mode/2), Mode)):
# for j in reversed(range(int(Mode/2), Mode)):
# y_axis = np.append(Get_event_data(roi, j)[3]-Get_event_data(roi, j)[2] - avg_bsln, y_axis)
y_axis = np.append(Get_event_data(roi, j, norm, data = data)[3]-Get_event_data(roi, j, norm, data = data)[2], y_axis)
# b = np.flip(b)
RF_matrix = x_axis.reshape(int(Mode/2), 1) @ y_axis.reshape(1, int(Mode/2))
RF_matrix = np.rot90(RF_matrix, 1)
return RF_matrix, x_axis, y_axis
def Plot_RF(roi = 'All', normalize = 0, data = file_location, **kwargs):
if normalize == 0:
RF_matrix = Get_RF_matrix(roi, 0, data = data)[0]
if normalize == 1:
RF_matrix = Get_RF_matrix(roi, 1, data = data)[0]
if 'interpolation' in kwargs:
interpol = kwargs['interpolation']
else:
interpol = None
vis_ang_list_rounded = np.round(vis_ang_list, 1) #axis starts at 0
# vis_ang_list_rounded = np.round(np.absolute(vis_ang_list_alt), 1) #axis centered on 0
fig, ax1 = plt.subplots(1,1, figsize = (10, 10))
RF_plot = ax1.imshow(RF_matrix, cmap = 'bone', interpolation = interpol)
ax1.set_ylabel("Visual angle (°)", labelpad = 15)
ax1.set_yticks(np.arange(-.5, Mode/2))
ax1.set_yticklabels(np.flip(vis_ang_list_rounded))
ax1.yaxis.set_label_position("right")
ax1.yaxis.tick_right()
ax1.set_xlabel("Visual angle (°)", labelpad = 15)
ax1.set_xticks(np.arange(-.5, (Mode/2)))
ax1.set_xticklabels((vis_ang_list_rounded))
ax1.xaxis.set_label_position("top")
ax1.xaxis.tick_top()
ax2 = ax1.secondary_xaxis('bottom')
ax2.set_xticks(np.arange(0, Mode/2))
ax2.set_xticklabels(np.arange(1, round((Mode/2)) + 1))
ax2.set_xlabel("Bar location", labelpad = 15)
ax2 = ax1.secondary_yaxis('left')
ax2.set_yticks(np.arange(0, Mode/2))
ax2.set_yticklabels(reversed(np.arange(1, round((Mode/2)) + 1)))
ax2.set_ylabel("Bar location", labelpad = 15)
plt.grid(True, which = 'major', color = "grey")
plt.colorbar(RF_plot, fraction = 0.04 ,pad = .175, label = "Z-score difference (baseline avg. - response avg.)")
if roi == 'All':
plt.suptitle("Computed receptive field for all sampled ROIs", y = .90)
if "title" in kwargs:
plt.suptitle(kwargs["title"], y = .90)
else:
plt.suptitle("Computed receptive field for ROI {}".format(roi), y = .90)
if 'saveas' in kwargs:
plt.savefig(r'C://Users//skrem//OneDrive//Universitet//MSc//Experimental project//Figures//Python generated//{}'.format(kwargs['saveas']), dpi = 2000, bbox_inches='tight')
"""Consider this 3D RF plot too! https://stackoverflow.com/questions/44895117/colormap-for-3d-bar-plot-in-matplotlib-applied-to-every-bar
or https://www.geeksforgeeks.org/3d-surface-plotting-in-python-using-matplotlib/ or https://stackoverflow.com/questions/38698277/plot-normal-distribution-in-3d """
def gaus(x, a, b, c):
# a Gaussian distribution
return a * np.exp(-(x-b)**2/(2*c**2))
def find_near(input_array, target):
"""Return nearest value to specified target and its index in array"""
arr = np.asarray(input_array)
x = target
difference_array = np.abs(arr-x)
index = difference_array.argmin()
nearest = arr[index]
nearest_loc = index
return nearest, nearest_loc
def RF_profile(roi = 'All', normalize = 0, plot = 1, curvefit = 1, data = file_location, test_fit = True, title = 0, **kwargs):
"""Returns a barchart of X and Y response profiles for specified ROI. Differs
from RF_matrix_slice() in that RF_profile retrieves plot BEFORE matrix
multiplication and subsequent matrix slicing --> E.g. RF_profile draws on
raw"""
if normalize == 0:
norm = 0
if normalize == 1:
norm = 1
if 'example_data' in kwargs:
x_axis = kwargs['example_data'][0]
y_axis = kwargs['example_data'][1]
else:
x_axis = np.empty(0)
y_axis = np.empty(0)
for i in reversed(range(int(Mode/2))):
x_axis = np.append(Get_event_data(roi, i, norm, data = data)[3]-Get_event_data(roi, i, norm, data = data)[2], x_axis)
for j in (range(int(Mode/2), Mode)):
y_axis = np.append(Get_event_data(roi, j, norm, data = data)[3]-Get_event_data(roi, j, norm, data = data)[2], y_axis)
if plot == 1:
plt.figure(dpi = 800)
# plt.subplot(2, 1, 1)
plt.bar(np.arange(0, Mode/2), x_axis.reshape(int(Mode/2),), width=1, label = "X axis scores")
plt.bar(np.arange(0, Mode/2), y_axis.reshape(int(Mode/2),), width=.90, label = "Y axis scores")
axx = plt.gca()
axy = axx.secondary_xaxis('top')
if title == 1:
plt.title("ROI RF response profile (X and Y axes)")
axx.set_xlabel("Visual angle (°)")
axx.set_ylabel("Response (Z-score difference)")
plt.xticks(np.arange(-.5, (Mode/2)))
axx.set_xticklabels(np.round(vis_ang_list, 1))
axy.set_xticks(np.arange(0, Mode/2))
axy.set_xticklabels(np.arange(0, round((Mode/2))))
axy.set_xlabel("Bar position")
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), reversed(labels))
if curvefit == 1: #for plotting purposes
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis.reshape(int(Mode/2),)
y_ydata = y_axis.reshape(int(Mode/2),)
#Get curve params
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata),1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata),1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
#Plot curve
resolution = 1000
x=np.linspace(0, Mode/2, resolution)
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
if test_fit == True:
#Compute R^2 --> https://stackoverflow.com/questions/19189362/getting-the-r-squared-value-using-curve-fit
x_residuals = x_ydata - gaus(xdata, *popt_x) #Get residuals
x_ss_res = np.sum(x_residuals**2) #Calculate residual sum of squares
x_ss_tot = np.sum((x_ydata - np.mean(x_ydata))**2) #Total sum of squares
x_r_squared = 1 - (x_ss_res / x_ss_tot) #R^2 value
x_r = np.sqrt(x_r_squared)
y_residuals = y_ydata - gaus(xdata, *popt_y)
y_ss_res = np.sum(y_residuals**2)
y_ss_tot = np.sum((y_ydata - np.mean(y_ydata))**2)
y_r_squared = 1 - (y_ss_res / y_ss_tot)
y_r = np.sqrt(y_r_squared)
#Compute Adjusted R^2 --> https://www.statisticshowto.com/probability-and-statistics/statistics-definitions/adjusted-r2/
regs = len(np.array(gaus.__code__.co_varnames))-1 #Number of regressors (variables in model - constant)
x_n = len(x_ydata) #n number of points in data sample (of curve or data?)
x_r_squared_adj = 1 - ((1-x_r_squared)*(x_n - 1))/(x_n-regs-1)
y_n = x_n
y_r_squared_adj = 1 - ((1-y_r_squared)*(y_n - 1))/(y_n-regs-1)
if plot == 1:
#Put R^2 and Chi^2 values into a little table
table_content = np.array([["R", np.round(x_r, 2), np.round(y_r, 2)], ["R\u00b2", np.round(x_r_squared, 2), np.round(y_r_squared, 2)],["R\u2090\u00b2", np.round(x_r_squared_adj, 2), np.round(y_r_squared_adj, 2)]]) #["X\u00b2", np.round(x_chi_p, 2), np.round(y_chi_p, 2)]]) #placeholder
collabel = ('Fit', 'X', 'Y')
The_table = plt.table(cellText=table_content ,colLabels=collabel, colWidths = [0.05]*3, loc = 'bottom left', bbox = (-.1,-.4,.25,.25))
The_table.scale(1 * 1.5, 1)
if plot == 1:
x_curve_eq = r"$\ f(x) = %.2f e ^ {-\frac{(x - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_x[0], popt_x[1], 2*popt_x[2])
y_curve_eq = r"$\ f(y) = %.2f e ^ {-\frac{(y - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_y[0], popt_y[1], 2*popt_y[2])
plt.plot(x, yx, c='b', label="{}".format(x_curve_eq),
path_effects=[path_effects.Stroke(linewidth=4,
foreground = 'black'), path_effects.Normal()])
plt.plot(x, yy, c = 'orange', label = y_curve_eq,
path_effects=[path_effects.Stroke(linewidth = 4,
foreground = 'black'), path_effects.Normal()])
plt.xticks(np.arange(-.5, (Mode/2)))
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), (reversed(labels)))
axx.set_xticklabels(np.round(vis_ang_list, 1))
if plot == 1:
plt.show()
if curvefit == 0:
return x_axis, y_axis
if curvefit == 1 and test_fit == True:
return x_axis, y_axis, x_r_squared, y_r_squared
else:
return x_axis, y_axis
def RF_matrix_slice (roi = 'All', normalize = 0, plot = 1, curvefit = 1, data = file_location):
if normalize == 0:
RF_matrix = Get_RF_matrix(roi, 0, data)[0]
if normalize == 1:
RF_matrix = Get_RF_matrix(roi, 1, data)[0]
# RF_peak = np.amax(RF_matrix)
RF_peak_loc = np.where(RF_matrix == np.amax(RF_matrix))
y_axis_vals = RF_matrix[:, RF_peak_loc[1]]
x_axis_vals = RF_matrix[RF_peak_loc[0]]
if plot == 1:
plt.figure(dpi = 800)
plt.bar(np.arange(0, Mode/2), x_axis_vals.reshape(int(Mode/2),), width=1, label = "X axis scores")
plt.bar(np.arange(0, Mode/2), y_axis_vals.reshape(int(Mode/2),), width=.90, label = "Y axis scores")
axx = plt.gca()
axy = axx.secondary_xaxis('top')
plt.title("Slice through centre of RF matrix (X and Y axes)")
axx.set_xticks(np.arange(0, Mode/2))
axx.set_xlabel("Visual angle (°)")
axx.set_ylabel("Response (Z-score difference)")
axy.set_xticks(np.arange(0, Mode/2))
axy.set_xticklabels(np.arange(0, round((Mode/2))))
axy.set_xlabel("Bar position")
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), reversed(labels))
if curvefit == 1:
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis_vals.reshape(int(Mode/2),)
y_ydata = y_axis_vals.reshape(int(Mode/2),)
# popt_x, pcov_x = scipy.optimize.curve_fit(gaus, np.arange(0, int(Mode/2)), x_axis_vals.reshape(int(Mode/2),), maxfev=2500)
# popt_y, pcov_y = scipy.optimize.curve_fit(gaus, np.arange(0, int(Mode/2)), y_axis_vals.reshape(int(Mode/2),), maxfev=2500)
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
x=np.linspace(0, Mode/2, 1000)
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
x_curve_eq = r"$\ f(x) = %.2f e ^ {-\frac{(x - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_x[0], popt_x[1], 2*popt_x[2])
y_curve_eq = r"$\ f(y) = %.2f e ^ {-\frac{(y - %.2f)^2}{(%.2f)^2}} "\
"$" % (popt_y[0], popt_y[1], 2*popt_y[2])
plt.plot(x, yx, c='b', label="{}".format(x_curve_eq),
path_effects=[path_effects.Stroke(linewidth=4,
foreground = 'black'), path_effects.Normal()])
plt.plot(x, yy, c = 'orange', label = y_curve_eq,
path_effects=[path_effects.Stroke(linewidth = 4,
foreground = 'black'), path_effects.Normal()])
plt.xticks(np.arange(-.5, (Mode/2)))
handles, labels = axx.get_legend_handles_labels()
plt.legend(reversed(handles), reversed(labels))
axx.set_xticklabels(np.round(vis_ang_list, 1))
plt.show()
return x_axis_vals, y_axis_vals
def Compute_RF_size(roi = 'All', normalize = 0, plot = 0, data = file_location, test_fit = True, **kwargs):
"""https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2269911/"""
if 'example_data' in kwargs:
x_vals = kwargs['example_data'][0]
y_vals = kwargs['example_data'][1]
else:
if normalize == 0:
x_vals, y_vals = RF_profile(roi, 0, 0, data = data)[:2]
if normalize == 1:
x_vals, y_vals = RF_profile(roi, 1, 0, data = data)[:2]
xdata = np.arange(0, int(Mode/2))
x_ydata = x_vals.reshape(int(Mode/2),)
y_ydata = y_vals.reshape(int(Mode/2),)
try:
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
except Exception:
popt_x = pcov_x = popt_y = pcov_y = 0
Nofit = 'No fit'
print ("scipy.optimize.curve_fit maxfev reached, returned (None, None) dtype = object")
return Nofit, Nofit
resolution = 10000 #how many points on curve, more is better but computationally slower
index_to_visang = vis_ang_list[-1]/resolution
x=np.linspace(0, Mode/2, resolution)
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
criteria = 0.005
yx_peak = np.where(yx == np.amax(yx))[0][0]
if yx_peak == 0 or yx_peak == resolution:
yx_half_width = "Peak obscured"#None #return None if value falls outside range of data
yx_curve_indeces = np.where(yx > criteria)
yx_left_index = yx_curve_indeces[0][0]
yx_right_index = yx_curve_indeces[0][-1]
yx_half_width = ((yx_right_index - yx_left_index) * index_to_visang) / 2
if yx_left_index == 0 or yx_right_index == resolution:
yx_half_width = "Half-width obscured"
yy_peak = np.where(yy == np.amax(yy))[0][0]
if yy_peak == 0 or yy_peak == resolution:
yy_half_width = "Peak obscured"#None #return None if value falls outside range of data
yy_curve_indeces = np.where(yy > criteria)
yy_left_index = yy_curve_indeces[0][0]
yy_right_index = yy_curve_indeces[0][-1]
yy_half_width = ((yy_right_index - yy_left_index) * index_to_visang) / 2
if yy_left_index == 0: #or yy_right_index == resolution:
yy_half_width = "Half-width obscured"
if test_fit == True:
x_axis = np.empty(0)
y_axis = np.empty(0)
for i in reversed(range(int(Mode/2))):
x_axis = np.append(Get_event_data(roi, i, normalize, data = data)[3]-Get_event_data(roi, i, normalize, data = data)[2], x_axis)
for j in (range(int(Mode/2), Mode)):
y_axis = np.append(Get_event_data(roi, j, normalize, data = data)[3]-Get_event_data(roi, j, normalize, data = data)[2], y_axis)
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis.reshape(int(Mode/2),)
y_ydata = y_axis.reshape(int(Mode/2),)
x_y = x_ydata
y_y = y_ydata
X = gaus(xdata, *popt_x)
x_pearsons_r = scipy.stats.pearsonr(x_y, gaus(xdata, *popt_x))
x_r_sqrd = metrics.r2_score(x_y, gaus(xdata, *popt_x))
x_r_squared_adjusted = 1 - ((1 - x_r_sqrd)*(len(x_y) - 1)) / ((len(x_y) - len(popt_x) - 1))
spearmans_for_x = scipy.stats.spearmanr(x_y, gaus(xdata, *popt_x))
y_pearsons_r = scipy.stats.pearsonr(y_y, gaus(xdata, *popt_y))
y_r_sqrd = sk.metrics.r2_score(y_y, gaus(xdata, *popt_y))
y_r_squared_adjusted = 1 - ((1 - y_r_sqrd)*(len(y_y) - 1)) / ((len(y_y) - len(popt_y) - 1))
spearmans_for_y = scipy.stats.spearmanr(y_y, gaus(xdata, *popt_y))
if plot == 1:
plt.plot(np.linspace(0, vis_ang_list[-1], resolution), yx)
plt.plot(np.linspace(0, vis_ang_list[-1], resolution), yy)
if isinstance(yx_half_width, str) == False:
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang + yx_half_width/2, yx_right_index * index_to_visang - yx_half_width/2)
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang, yx_right_index * index_to_visang, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index*index_to_visang, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index * index_to_visang + yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
plt.vlines(x = yx_right_index*index_to_visang, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_right_index * index_to_visang - yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
if isinstance(yy_half_width, str) == False:
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang + yy_half_width/2, yy_right_index * index_to_visang - yy_half_width/2, colors = '#FF8317')
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang, yy_right_index * index_to_visang, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index*index_to_visang, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index * index_to_visang + yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.vlines(x = yy_right_index*index_to_visang, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_right_index * index_to_visang - yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.axvline(x = yx_peak*index_to_visang, c = 'g', linestyle = (0, (5, 10)))
plt.axvline(x = yy_peak*index_to_visang, c = 'g', linestyle = (0, (5, 10)))
# plt.xlim(0, 75)
plt.xlabel("Visual angle (°)")
print("Pearsons X: {}, {}".format(x_pearsons_r, y_pearsons_r))
print("R2: {} {}".format(x_r_sqrd, y_r_sqrd))
print("R2adj {}, {}".format(x_r_squared_adjusted, y_r_squared_adjusted))
print("Spearman R: {}, {}".format(spearmans_for_x, spearmans_for_y))
if 'title' in kwargs:
plt.title(kwargs["title"])
plt.show()
# return yx_RF_size, yy_RF_size
return yx_half_width, yy_half_width
def Model_RF_size(roi = 'All', normalize = 0, plot = 0, data = file_location, test_fit = True):
"""https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2269911/"""
if normalize == 0:
x_vals, y_vals = RF_profile(roi, 0, 0, 0, data = data)[:2]
if normalize == 1:
x_vals, y_vals = RF_profile(roi, 1, 0, 0, data = data)[:2]
xdata = np.arange(0, int(Mode/2))
x_ydata = x_vals.reshape(int(Mode/2),)
y_ydata = y_vals.reshape(int(Mode/2),)
try:
popt_x, pcov_x = scipy.optimize.curve_fit(gaus, xdata, x_ydata, maxfev=2500, p0 = np.array((max(x_ydata), np.argmax(x_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(x_ydata), np.inf, np.inf)))
popt_y, pcov_y = scipy.optimize.curve_fit(gaus, xdata, y_ydata, maxfev=2500, p0 = np.array((max(y_ydata), np.argmax(y_ydata), 1)), bounds = ((-np.inf, -np.inf, -np.inf), (max(y_ydata), np.inf, np.inf)))
except Exception:
popt_x = pcov_x = popt_y = pcov_y = 0
Nofit = 'No fit'
print ("scipy.optimize.curve_fit maxfev reached, returned (None, None) dtype = object")
return Nofit, Nofit
resolution = 10000 #how many points on curve, more is better but computationally slower
x=np.linspace(-Mode, Mode, resolution)
index_to_visang = vis_ang_list[-1]*4/resolution #multiply by 2 because
yx = gaus(x, *popt_x)
yy = gaus(x, *popt_y)
buffer_estimate = .05 #If the first index is within x percentage of half-height, count it as half-height
criteria = 0.005
yx_peak = np.where(yx == np.amax(yx))[0][0]
if yx_peak == 0 or yx_peak == resolution:
yx_half_width = "Peak obscured"#None #return None if value falls outside range of data
yx_curve_indeces = np.where(yx > criteria)
yx_left_index = yx_curve_indeces[0][0]
yx_right_index = yx_curve_indeces[0][-1]
yx_half_width = ((yx_right_index - yx_left_index) * index_to_visang) / 2
if yx_left_index == 0 or yx_right_index == resolution:
yx_half_width = "Half-width obscured"
yy_peak = np.where(yy == np.amax(yy))[0][0]
if yy_peak == 0 or yy_peak == resolution:
yy_half_width = "Peak obscured"#None #return None if value falls outside range of data
yy_curve_indeces = np.where(yy > criteria)
yy_left_index = yy_curve_indeces[0][0]
yy_right_index = yy_curve_indeces[0][-1]
yy_half_width = ((yy_right_index - yy_left_index) * index_to_visang) / 2
if yy_left_index == 0: #or yy_right_index == resolution:
yy_half_width = "Half-width obscured"
if test_fit == True:
x_axis = np.empty(0)
y_axis = np.empty(0)
for i in reversed(range(int(Mode/2))):
x_axis = np.append(Get_event_data(roi, i, normalize, data = data)[3]-Get_event_data(roi, i, normalize, data = data)[2], x_axis)
for j in (range(int(Mode/2), Mode)):
y_axis = np.append(Get_event_data(roi, j, normalize, data = data)[3]-Get_event_data(roi, j, normalize, data = data)[2], y_axis)
xdata = np.arange(0, int(Mode/2))
x_ydata = x_axis.reshape(int(Mode/2),)
y_ydata = y_axis.reshape(int(Mode/2),)
spearmans_for_x = scipy.stats.spearmanr(x_ydata, gaus(xdata, *popt_x))
x_r = spearmans_for_x[0]
spearmans_for_y = scipy.stats.spearmanr(y_ydata, gaus(xdata, *popt_y))
y_r = spearmans_for_y[0]
if plot == 1:
plt.plot((np.linspace(-vis_ang_list[-1]*2, vis_ang_list[-1]*2, resolution)), yx)
plt.plot((np.linspace(-vis_ang_list[-1]*2, vis_ang_list[-1]*2, resolution)), yy)
if isinstance(yx_half_width, str) == False:
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang - vis_ang_list[-1]*2 + yx_half_width/2, yx_right_index * index_to_visang - vis_ang_list[-1]*2 - yx_half_width/2)
plt.hlines(yx[int((yx_left_index + yx_peak) / 2)], yx_left_index * index_to_visang - vis_ang_list[-1]*2, yx_right_index * index_to_visang - vis_ang_list[-1]*2, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_left_index * index_to_visang - vis_ang_list[-1]*2 + yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
plt.vlines(x = yx_right_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yx_right_index * index_to_visang - vis_ang_list[-1]*2 - yx_half_width/2, ymin = 0, ymax = yx[int((yx_left_index + yx_peak) / 2)])
if isinstance(yy_half_width, str) == False:
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang - vis_ang_list[-1]*2 + yy_half_width/2, yy_right_index * index_to_visang - vis_ang_list[-1]*2 - yy_half_width/2, colors = '#FF8317')
plt.hlines(yy[int((yy_left_index + yy_peak) / 2)], yy_left_index * index_to_visang - vis_ang_list[-1]*2, yy_right_index * index_to_visang - vis_ang_list[-1]*2, linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_left_index * index_to_visang - vis_ang_list[-1]*2 + yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.vlines(x = yy_right_index*index_to_visang - vis_ang_list[-1]*2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], linestyle = 'dotted', colors = 'k')
plt.vlines(x = yy_right_index * index_to_visang - vis_ang_list[-1]*2 - yy_half_width/2, ymin = 0, ymax = yy[int((yy_left_index + yy_peak) / 2)], colors = '#FF8317')
plt.axvline(x = yx_peak*index_to_visang - vis_ang_list[-1]*2, c = 'g', linestyle = (0, (5, 10)))
plt.axvline(x = yy_peak*index_to_visang - vis_ang_list[-1]*2, c = 'g', linestyle = (0, (5, 10)))
plt.xlabel("Visual angle (°)")
plt.show()
# return yx_RF_size, yy_RF_size, x_r, y_r #, x_pearsons_r, y_pearsons_r
return yx_half_width, yy_half_width, x_r, y_r
def RF_estimates_list(function, stimfolder, resolutionfolder, rootfolder = 'D:\\Dissertation files\\Further analysis'):
"""Returns a list of RF estimates based on script Compute_RF_size, for each
condition, for each file, for each ROI."""
# stim = rootfolder + '\\' + stimfolder
res = rootfolder + '\\' + stimfolder + '\\' + resolutionfolder
conds = os.listdir(res)
# All_estimates = []
Compare_estimates = []
Total_ROIs = 0
Total_R_eligible = 0
for j in conds: #Conditions to loop through
print(j)
txt_files = []
dir_files = os.listdir(res + '\\' + j)
intermediate_list = []
for file in dir_files: #Build list of files to loop through
if file.endswith('.txt') is True:
txt_files.append(file)
for file in txt_files: #Then loop through those files
print(file)
file_dir = res + '\\' + j + '\\' + file
curr_data = Avg_data_getter(file_dir)
if file == txt_files[len(txt_files)-1]:
Compare_estimates.append(intermediate_list)
for roi in curr_data[0].columns:
estimate = function(roi, normalize = 1, plot = 0, data = file_dir)
print(r"Currently on ROI#:{} RF estimate: {} ".format(Total_ROIs, estimate[:2]), flush = True, end = '')
Total_ROIs += 1
# if isinstance(estimate[2], float) and isinstance(estimate[3], float):
if len(estimate) > 2:
if estimate[2] >= 0.5 and estimate[3] >= 0.5:
intermediate_list.append(estimate[:2])
Total_R_eligible += 1
print("R values: {}, {}".format(estimate[2], estimate[3]))
# else:
else:
print("R values: {}, {} REJECTED!".format(estimate[2], estimate[3]))
if roi == len(curr_data[0].columns)-1:
print(" - Number of ROIs in file = {}".format(len(curr_data[0].columns)))
print(" - Total number of ROIS = {}".format(Total_ROIs))
print(" - N ROIs with sufficient R = {}".format(Total_R_eligible))
Compare_estimates.append(conds)
return Compare_estimates
def Discard_junk_data(data_list, conditions = 4):
"""If index contains a string or the value 0, discard those indexes and
return a "cleaned" list. """
data_copy = copy.deepcopy(data_list)
conds = data_list[conditions][:]
cleaned_list = []
for i in range(conditions):
cleaned_list.append([])
for n, i in enumerate(data_copy[0:conditions]):
for j in data_copy[n]:
cleaned_list[n] = [k for k in data_copy[n]
if isinstance(k[0],str) is False
and isinstance(k[1],str) is False
and k[0] != 0 and k[1] != 0]
cleaned_list.append(conds)
return cleaned_list
def Plot_ellipses(X_width, Y_width, **kwargs):
fig = plt.figure(figsize = (5, 5), dpi = 500)
a = X_width
b = Y_width
if X_width > Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
if X_width < Y_width:
ecc = np.sqrt(Y_width**2 - X_width**2) / Y_width
if X_width == Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
""" -TODO: Implement eccentricity variable so that you can specify ecc"""
if 'ecc' in kwargs:
ecc = kwargs['ecc']
X_width = 1
Y_width = 1
t = np.linspace(0, 2*np.pi, 1000)
x = a * np.cos(t)
y = b * np.sin(t)
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.axis('off')
plt.text(.75, -.01, "Ecc = {}".format(np.round(ecc, 3)), transform = ax.transAxes)
plt.plot(x, y)
plt.show()
def Compute_ellipse(X_width, Y_width, plot = 0):
""" Computes the eccentricity, area, and perimiter of ellipse given X and Y dims.
(x - c₁)² / a² + (y - c₂)² / b² = 1, where....:
- (x, y) are the variables - the coordinates of an arbitrary point on the ellipse;
- (c₁, c₂) are the coordinates of the ellipse's center;
- a is the distance between the center and the ellipse's vertex, lying on the horizontal axis;
- b is the distance between the center and the ellipse's vertex, lying on the vertical axis.
c₁ and c₂ are assumed to be 0, 0, meaning ellipses are centered.
Returns
-------
X_dim: Vis ang (°)
Y_dim: Vis ang (°)
Eccentricity: Scale from 0 = Circle, 1 = basically flat
Area: Divided by stim_width_visang (so, mm) --> not currently true
"""
X_dim = X_width
Y_dim = Y_width
if X_width > Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
if X_width < Y_width:
ecc = np.sqrt(Y_width**2 - X_width**2) / Y_width
if X_width == Y_width:
ecc = np.sqrt(X_width**2 - Y_width**2) / X_width
# area = np.sqrt((np.pi * X_width * Y_width)) #Area of ellipses: Area = Pi * A * B
area = (np.pi * X_dim/2 * Y_dim/2) #Area of ellipses: Area = Pi * A * B
# perim = np.pi * (X_width + Y_width) * (1 + 3 *(X_width - Y_width)**2 / (10 + np.sqrt((4 - 3* X_width - Y_width)**2 / (X_width + Y_width)**2))) #Ramanujan approximation
if plot == 1:
Plot_ellipses(X_width, Y_width)
return X_dim, Y_dim, ecc, area #, area, perim
def RF_ellipses_list(two_dim_RF_list, conditions = 4):
RF_list = two_dim_RF_list
ellipse_list = []
for i in range(conditions):
ellipse_list.append([])
for n, i in enumerate(RF_list[:conditions]):
for j in RF_list[n]:
Ellipse_data = Compute_ellipse(j[0], j[1])
ellipse_list[n].append(Ellipse_data)
conds = two_dim_RF_list[conditions][:]
ellipse_list.append(conds)
return ellipse_list
def List_ellipse_params(ellipse_list, conditions = 4, get_avg = 0):
all_Xs = []
all_Ys = []
all_eccs = []
all_areas = []
for i in ellipse_list[:conditions]:
cond_x = []
cond_y = []
cond_ecc = []
cond_area = []
for n, j in enumerate(i):
cond_x.append(j[0])
cond_y.append(j[1])
cond_ecc.append(j[2])
cond_area.append(j[3])
if j == i[-1]:
all_Xs.append(cond_x)
all_Ys.append(cond_y)
all_eccs.append(cond_ecc)
all_areas.append(cond_area)
if get_avg == 1:
avg_Xs = np.empty((conditions,1))
avg_Ys = np.empty((conditions,1))
avg_eccs = np.empty((conditions,1))
avg_areas = np.empty((conditions,1))
for n, i in enumerate(all_Xs):
avg_Xs[n] = np.average(i)
for m, j in enumerate(all_Ys):
avg_Ys[m] = np.average(j)
for l, k in enumerate(all_eccs):
avg_eccs[l] = np.average(k)
for k, l in enumerate(all_areas):
avg_areas[k] = np.average(l)
return avg_Xs, avg_Ys, avg_eccs
else:
return all_Xs, all_Ys, all_eccs, all_areas
def ellipse_param_dfs(RF_ellipses):
All_Xs = List_ellipse_params(RF_ellipses)[0]
All_Ys = List_ellipse_params(RF_ellipses)[1]
All_eccs = List_ellipse_params(RF_ellipses)[2]
All_areas = List_ellipse_params(RF_ellipses)[3]
All_Xs_df = pd.DataFrame(All_Xs).transpose()
All_Ys_df = | pd.DataFrame(All_Ys) | pandas.DataFrame |
"""
Classes for representing datasets of images and/or coordinates.
"""
from __future__ import print_function
import json
import copy
import logging
import os.path as op
import numpy as np
import pandas as pd
import nibabel as nib
from .base import NiMAREBase
from .utils import (tal2mni, mni2tal, mm2vox, get_template, listify,
try_prepend, find_stem, get_masker)
LGR = logging.getLogger(__name__)
class Dataset(NiMAREBase):
"""
Storage container for a coordinate- and/or image-based meta-analytic
dataset/database.
Parameters
----------
source : :obj:`str`
JSON file containing dictionary with database information or the dict()
object
target : :obj:`str`
Desired coordinate space for coordinates. Names follow NIDM convention.
mask : `str`, `Nifti1Image`, or any nilearn `Masker`
Mask(er) to use. If None, uses the target space image, with all
non-zero voxels included in the mask.
"""
_id_cols = ['id', 'study_id', 'contrast_id']
def __init__(self, source, target='mni152_2mm', mask=None):
if isinstance(source, str):
with open(source, 'r') as f_obj:
self.data = json.load(f_obj)
elif isinstance(source, dict):
self.data = source
else:
raise Exception("`source` needs to be a file path or a dictionary")
# Datasets are organized by study, then experiment
# To generate unique IDs, we combine study ID with experiment ID
raw_ids = []
for pid in self.data.keys():
for cid in self.data[pid]['contrasts'].keys():
raw_ids.append('{0}-{1}'.format(pid, cid))
self.ids = raw_ids
# Set up Masker
if mask is None:
mask = get_template(target, mask='brain')
self.masker = get_masker(mask)
self.space = target
self._load_coordinates()
self._load_images()
self._load_annotations()
self._load_texts()
self._load_metadata()
def slice(self, ids):
"""
Return a reduced dataset with only requested IDs.
Parameters
----------
ids : array_like
List of study IDs to include in new dataset
Returns
-------
new_dset : :obj:`nimare.dataset.Dataset`
Redcued Dataset containing only requested studies.
"""
new_dset = copy.deepcopy(self)
new_dset.ids = ids
new_dset.coordinates = new_dset.coordinates.loc[new_dset.coordinates['id'].isin(ids)]
new_dset.images = new_dset.images.loc[new_dset.images['id'].isin(ids)]
new_dset.annotations = new_dset.annotations.loc[new_dset.annotations['id'].isin(ids)]
new_dset.texts = new_dset.texts.loc[new_dset.texts['id'].isin(ids)]
temp_data = {}
for id_ in ids:
pid, expid = id_.split('-')
if pid not in temp_data.keys():
temp_data[pid] = self.data[pid].copy() # make sure to copy
temp_data[pid]['contrasts'] = {}
temp_data[pid]['contrasts'][expid] = self.data[pid]['contrasts'][expid]
new_dset.data = temp_data
return new_dset
def update_path(self, new_path):
"""
Update paths to images. Prepends new path to the relative path for
files in Dataset.images.
Parameters
----------
new_path : :obj:`str`
Path to prepend to relative paths of files in Dataset.images.
"""
relative_path_cols = [c for c in self.images if c.endswith('__relative')]
for col in relative_path_cols:
abs_col = col.replace('__relative', '')
if abs_col in self.images.columns:
LGR.info('Overwriting images column {}'.format(abs_col))
self.images[abs_col] = self.images[col].apply(try_prepend, prefix=new_path)
def _load_annotations(self):
"""
Load labels in Dataset into DataFrame.
"""
# Required columns
columns = ['id', 'study_id', 'contrast_id']
# build list of ids
all_ids = []
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
all_ids.append([id_, pid, expid])
id_df = pd.DataFrame(columns=columns, data=all_ids)
id_df = id_df.set_index('id', drop=False)
exp_dict = {}
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
if 'labels' not in self.data[pid]['contrasts'][expid].keys():
continue
exp_dict[id_] = exp['labels']
temp_df = pd.DataFrame.from_dict(exp_dict, orient='index')
df = pd.merge(id_df, temp_df, left_index=True, right_index=True, how='outer')
df = df.reset_index(drop=True)
df = df.replace(to_replace='None', value=np.nan)
self.annotations = df
def _load_metadata(self):
"""
Load metadata in Dataset into DataFrame.
"""
# Required columns
columns = ['id', 'study_id', 'contrast_id']
# build list of ids
all_ids = []
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
all_ids.append([id_, pid, expid])
id_df = pd.DataFrame(columns=columns, data=all_ids)
id_df = id_df.set_index('id', drop=False)
exp_dict = {}
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
if 'metadata' not in self.data[pid]['contrasts'][expid].keys():
continue
exp_dict[id_] = exp['metadata']
temp_df = pd.DataFrame.from_dict(exp_dict, orient='index')
df = pd.merge(id_df, temp_df, left_index=True, right_index=True, how='outer')
df = df.reset_index(drop=True)
df = df.replace(to_replace='None', value=np.nan)
self.metadata = df
def _load_texts(self):
"""
Load texts in Dataset into a DataFrame.
"""
# Required columns
columns = ['id', 'study_id', 'contrast_id']
# build list of ids
all_ids = []
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
all_ids.append([id_, pid, expid])
id_df = pd.DataFrame(columns=columns, data=all_ids)
id_df = id_df.set_index('id', drop=False)
exp_dict = {}
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
if 'texts' not in self.data[pid]['contrasts'][expid].keys():
continue
exp_dict[id_] = exp['texts']
temp_df = pd.DataFrame.from_dict(exp_dict, orient='index')
df = | pd.merge(id_df, temp_df, left_index=True, right_index=True, how='outer') | pandas.merge |
# Author: <NAME>
# tomoyuki (at) genemagic.com
import sys
import argparse
import csv
import time
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--source', type=str, help="CSV data url or file.")
parser.add_argument('--fast', type=int, default=12, help="Fast length.")
parser.add_argument('--slow', type=int, default=26, help="Slow length.")
parser.add_argument('--signal', type=int, default=9, help="Signal length.")
parser.add_argument('--ma1', type=int, default=5, help="MA1 length.")
parser.add_argument('--ma2', type=int, default=25, help="MA2 length.")
parser.add_argument('--ma3', type=int, default=75, help="MA3 length.")
parser.add_argument('--bbp', type=int, default=20, help="BB period.")
parser.add_argument('--title', type=str, default="COVID19 Trend analysis.", help="Graph title.")
args = parser.parse_args()
return args
def process(args):
if args.source is None:
df = | pd.read_csv(sys.stdin) | pandas.read_csv |
import datetime as dt
import unittest
from unittest.mock import patch
import numpy as np
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal, assert_index_equal
import seaice.timeseries.warp as warp
from seaice.timeseries.common import SeaIceTimeseriesInvalidArgument
class Test_filter_failed_qa(unittest.TestCase):
def test_failed_qa_set_to_na(self):
columns = ['Foo', 'Bar', 'failed_qa', 'filename']
actual = pd.DataFrame([[1, 2, True, '/foo'], [1, 2, False, '/foo'], [1, 2, True, '/foo']],
columns=columns)
expected = pd.DataFrame([[np.nan, np.nan, True, ''],
[1, 2, False, '/foo'],
[np.nan, np.nan, True, '']], columns=columns)
actual = warp.filter_failed_qa(actual)
assert_frame_equal(expected, actual)
class Test_climatologyMeans(unittest.TestCase):
def test_means(self):
index = pd.period_range(start='2000-05', end='2016-05', freq='12M')
values = np.array([10, 20, 30, 40, 50, 50, 50, 50, 90, 99,
100, 100, 100, 100, 100, 100, 10])
climatology_years = (2010, 2015)
series = pd.Series(values, index=index)
expected = pd.Series(100, index=[5])
actual = warp.climatology_means(series, climatology_years)
| assert_series_equal(expected, actual) | pandas.util.testing.assert_series_equal |
#!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from itertools import product
# 3rd party:
from pandas import (
DataFrame, to_datetime, date_range,
unique, MultiIndex, concat
)
# Internal:
try:
from __app__.utilities import func_logger
except ImportError:
from utilities import func_logger
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'homogenise_dates',
'homogenise_demographics_dates'
]
@func_logger("homogenisation")
def homogenise_dates(d: DataFrame):
"""
Parameters
----------
d
Returns
-------
"""
d.date = to_datetime(d.date, format="%Y-%m-%d")
col_names = d.columns
date = date_range(
start=to_datetime(d.date).min(),
end=to_datetime(d.date).max()
)
dt_time_list = list()
for area_type in unique(d.areaType):
values = product(
[area_type],
| unique(d.loc[d.areaType == area_type, "areaCode"]) | pandas.unique |
from json import load as json_load
from pprint import pprint
from matplotlib.pyplot import subplots, tight_layout, show
from matplotlib.colors import ListedColormap
from numpy import mod
from pandas import DataFrame, set_option
| set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/np.float64(1+np.exp(-x))
def createweights(s):
layers=len(s)
layer=0
weights=[]
while layer<layers-1:
w=np.random.normal(0,.05,(s[layer],s[layer+1]))
weights.append(w)
layer=layer+1
return weights
def createbias(s):
layers=len(s)
layer=0
bias=[]
while layer<layers-1:
w=np.random.normal(0,.05,(s[layer+1]))
bias.append(w)
layer=layer+1
return bias
def predict(train,weights,bias,s):
layers=len(s)
layer=0
predict_on=[train]
while layer<layers-1:
pred=sigmoid(predict_on[layer]@weights[layer]+bias[layer])
predict_on.append(pred)
layer=layer+1
return predict_on
def backprop(predict_on,y, weights,bias, s,lr=.01):
layers=len(s)
layer=layers-1
error=predict_on[layer]-y
while layer>0:
inn=predict_on[layer-1]
outt=predict_on[layer]
eoo=error*outt*(1-outt)
gradw=inn.T@eoo
gradb=eoo
weights[layer-1]=weights[layer-1]-lr*gradw.reshape(weights[layer-1].shape)
bias[layer-1]=bias[layer-1]-lr*np.sum(gradb,axis=0)
error=error@weights[layer-1].T
layer=layer-1
return weights,bias
x=np.array([[1,0],[0,1],[1,1],[0,0]])
y=np.array([[1],[1],[0],[0]])
s=[2,3,3,1]
weights=createweights(s=s)
bias=createbias(s=s)
errs = []
for i in range(100000):
predict_on=predict(x,weights, bias,s=s)
errs.append(np.sum(abs(predict_on[-1]-y)))
print(np.sum(abs(predict_on[-1]-y)))
weights,bias=backprop(predict_on,y, weights, bias, s=s,lr=1)
plt.plot(errs)
#Apply on digits
import pandas as pd
training_data = pd.read_csv("c:/users/jliv/downloads/mnist_train.csv")
testing_data = pd.read_csv("c:/users/jliv/downloads/mnist_test.csv")
training_labels = training_data['5']
testing_labels = testing_data['7']
training_data.drop(['5'],axis=1,inplace=True)
testing_data.drop(['7'],axis=1,inplace=True)
training_onehot_y = pd.DataFrame()
training_onehot_y['lab'] = training_labels
lr = np.array(list(range(10)))
for i in lr:
training_onehot_y[i]=np.where(training_onehot_y['lab']==i,1,0)
training_onehot_y.drop(['lab'],axis=1,inplace=True)
training_labels.unique()
testing_labels.unique()
testing_map={i:testing_labels.unique()[i] for i in range(len(testing_labels.unique()))}
training_map={i:training_labels.unique()[i] for i in range(len(training_labels.unique()))}
testing_onehot_y = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import amex_cred_card_utils as autil
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
# load the train data set and test data set
df_train = pd.read_csv(r'D:\github\DataScience\hacker-earth\amex_credit_card\train.csv')
df_test = pd.read_csv(r'D:\github\DataScience\hacker-earth\amex_credit_card\test.csv')
# drop columns which are not fed in model
df_train.drop(['name'], axis=1, inplace=True)
df_test.drop(['name'], axis=1, inplace=True)
df_train.drop(['credit_limit'], axis=1, inplace=True)
df_test.drop(['credit_limit'], axis=1, inplace=True)
## Missing values handling
df_train = autil.fill_na_owns_car(df_train)
df_train = autil.fill_na_no_of_children(df_train)
df_train = autil.fill_na_no_of_days_employed(df_train)
df_train = autil.fill_na_total_family_members(df_train)
df_train = autil.fill_na_migrant_worker(df_train)
df_train = autil.fill_na_yearly_debt_payments(df_train)
df_test = autil.fill_na_owns_car(df_test)
df_test = autil.fill_na_no_of_children(df_test)
df_test = autil.fill_na_no_of_days_employed(df_test)
df_test = autil.fill_na_total_family_members(df_test)
df_test = autil.fill_na_migrant_worker(df_test)
df_test = autil.fill_na_yearly_debt_payments(df_test)
##predata processing - age comvert to range i.e. categorical variables
df_train = autil.age_to_range(df_train)
df_test = autil.age_to_range(df_test)
## we can now drop age as continuous variable
df_train.drop(['age'], axis=1, inplace=True)
df_test.drop(['age'], axis=1, inplace=True)
# convert age to categorical variable encoder
df_train = autil.lb_encode_age_group(df_train)
df_test = autil.lb_encode_age_group(df_test)
# dummy candidate gender ,owns_car ,owns_house
# columns = ['gender', 'owns_car', 'owns_house']
df_train = pd.get_dummies(data=df_train, columns=['gender'])
df_train = pd.get_dummies(data=df_train, columns=['owns_car'])
df_train = pd.get_dummies(data=df_train, columns=['owns_house'])
df_test = pd.get_dummies(data=df_test, columns=['gender'])
df_test = pd.get_dummies(data=df_test, columns=['owns_car'])
df_test = | pd.get_dummies(data=df_test, columns=['owns_house']) | pandas.get_dummies |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: | pd.Timestamp("2013-03-01 00:00:00") | pandas.Timestamp |
#!/usr/bin/env python
"""
A server providing relatedness information for bacterial genomes via a Restful API.
Implemented in python3, it uses
* in-memory data storage backed by MongoDb.
* optionally, a compiled free standing relatedness engine, CatWalk.
It loads configuration from a config file, which must be set in production.
If no config file is provided, it will run in 'testing' mode with the parameters
in default_test_config.json. This expects a mongodb database to be running on
the default port on local host. As a rough guide to the amount of space required in mongodb,
about 0.5MB of database is used per sequence, or about 2,000 sequences per GB.
If no config file is provided, it will run in 'testing' mode with the parameters
in default_test_config.json. This expects a mongodb database to be running on
the default port on local host. As a rough guide to the amount of space required in mongodb,
about 0.5MB of database is used per sequence, or about 2,000 sequences per GB.
All internal modules, and the restful API, are covered by unit testing.
For unit testing details, please see
run_tests.sh
A component of the findNeighbour4 system for bacterial relatedness monitoring
Copyright (C) 2021 <NAME> <EMAIL>
repo: https://github.com/davidhwyllie/findNeighbour4
This program is free software: you can redistribute it and/or modify
it under the terms of the MIT License as published
by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file.
"""
# import libraries
import os
import requests
import json
import logging
import logging.handlers
import warnings
import datetime
import io
import pandas as pd
from pathlib import Path
import markdown
import codecs
import sentry_sdk
import matplotlib
import dateutil.parser
import argparse
import networkx as nx
import uuid
from sentry_sdk import capture_message, capture_exception
from sentry_sdk.integrations.flask import FlaskIntegration
# flask
from flask import Flask, make_response, jsonify
from flask import request, abort, send_file
from flask_cors import (
CORS,
) # cross-origin requests are not permitted except for one resource, for testing
# config
from findn import BASE_PATH, DEFAULT_CONFIG_FILE
from findn.common_utils import ConfigManager
# reference based compression, storage and clustering modules
from findn.NucleicAcid import NucleicAcid
from findn.persistence import Persistence
from findn.cw_seqComparer import cw_seqComparer
from findn.py_seqComparer import py_seqComparer
from findn.guidLookup import guidDbSearcher # fast lookup of first part of guids
from snpclusters.ma_linkage import MixtureAwareLinkageResult
from findn.msa import MSAStore
# network visualisation
from findn.visualiseNetwork import snvNetwork
# server status visualisation
from findn.depictStatus import MakeHumanReadable
# only used for unit testing
from Bio import SeqIO
from urllib.parse import urljoin as urljoiner
class SQLiteBackendErrror(Exception):
"""can't use SQLite in a multithreaded environment"""
pass
class FN4InsertInProgressError(Exception):
"""can't insert two sequences at the same time; only synchronous inserts are allowed"""
pass
class findNeighbour4:
"""logic for maintaining a record of bacterial relatedness using SNP distances.
The high level arrangement is that
- This class interacts with sequences, partly held in memory
[handled by the cw_seqComparer class]
- cached data is accessed by an fn3persistence object
- methods in findNeighbour4() return native python3 objects.
- a web server, currently flask, handles the inputs and outputs of this class
- in particular, native python3 objects returned by this class are serialised by the Flask web server code.
"""
def __init__(self, CONFIG, PERSIST):
"""Using values in CONFIG, starts a server with CONFIG['NAME'] on port CONFIG['PORT'].
CONFIG contains Configuration parameters relevant to the reference based compression system which lies
at the core of the server.
INPUTREF: the path to fasta format reference file.
EXCLUDEFILE: a file containing the zero-indexed positions in the supplied sequences which should be ignored in all cases.
Typically, this is because the software generating the mapped fasta file has elected not to call these regions,
in any samples, e.g. because of difficulty mapping to these regions.
Such regions can occupy up 5- 20% of the genome and it is important for efficient working of this software
that these regions are supplied for exclusion on sequence loading. Not doing so will slow loading, and markedly increase
memory requirements, but will not alter the results produced.
DEBUGMODE: Controls operation of the server:
DEBUGMODE = 0 1 2
Run server in production mode (errors logged, not returned to client) Y N N
Run server in debug mode (errors reported to client) N Y Y
Create Database if it does not exist Y Y Y
Delete all data on startup N N Y
Enable /restart endpoint, which restarts empty server (for testing) N N Y
SERVERNAME: the name of the server. used as the name of mongodb database which is bound to the server.
PRECOMPARER_PARAMETERS: parameters passed to precomparer. ** DOCS NEEDED ON THIS**. It is important that these values are either set to 'fail always' -route all precompared samples to the py_seqComparer - or are the result of calibration. preComparer_calibration will do the calibration process automatically.
FNPERSISTENCE_CONNSTRING: a valid mongodb connection string. if shard keys are set, the 'guid' field is suitable key.
Note: if a FNPERSISTENCE_CONNSTRING environment variable is present, then the value of this will take precedence over any values in the config file.
This allows 'secret' connstrings involving passwords etc to be specified without the values going into a configuraton file.
MAXN_STORAGE: The maximum number of Ns in the sequence <excluding those defined in > EXCLUDEFILE which should be indexed.
Other files, e.g. those with all Ns, will be tagged as 'invalid'. Although a record of their presence in the database
is kept, they are not compared with other sequences.
MAXN_PROP_DEFAULT: if the proportion not N in the sequence exceeds this, the sample is analysed, otherwise considered invalid.
LOGFILE: the log file used
LOGLEVEL: default logging level used by the server. Valid values are DEBUG INFO WARNING ERROR CRITICAL
SNPCEILING: links between guids > this are not stored in the database
REPACK_FREQUENCY: see /docs/repack_frequency.md
CLUSTERING: a dictionary of parameters used for clustering. In the below example, there are two different
clustering settings defined, one named 'SNV12_ignore' and the other 'SNV12_include.
{'SNV12_ignore' :{'snv_threshold':12, 'mixed_sample_management':'ignore', 'mixture_criterion':'p_value1', 'cutoff':0.001},
'SNV12_include':{'snv_threshold':12, 'mixed_sample_management':'include', 'mixture_criterion':'p_value1', 'cutoff':0.001}
}
Each setting is defined by four parameters:
snv_threshold: clusters are formed if samples are <= snv_threshold from each other
mixed_sample_management: this defines what happens if mixed samples are detected.
Suppose there are three samples, A,B and M. M is a mixture of A and B.
A and B are > snv_threshold apart, but their distance to M is zero.
If mixed_sample_management is
'ignore', one cluster {A,B,M} is returned
'include', two clusters {A,M} and {B,M}
'exclude', three clusters are returns {A},{B},{C}
mixture_criterion: sensible values include 'p_value1','p_value2','p_value3' but other output from py_seqComparer._msa() is also possible.
these p-values arise from three different tests for mixtures. Please see py_seqComparer._msa() for details.
cutoff: samples are regarded as mixed if the mixture_criterion is less than or equal to this value.
SENTRY_URL: optional. If provided, will launch link Sentry to the flask application using the API key provided. See https://sentry.io for a description of this service.
Note: if a FN_SENTRY_URL environment variable is present, then the value of this will take precedence over any values in the config file.
This allows 'secret' connstrings involving passwords etc to be specified without the values going into a configuraton file.
LISTEN_TO: optional. If missing, will bind to localhost (only) on 127.0.0.1. If present, will listen to requests from the IP stated. if '0.0.0.0', the server will respond to all external requests.
An example CONFIG is below:
{
"DESCRIPTION":"A test server operating in ../unitTest_tmp, only suitable for testing",
"IP":"127.0.0.1",
"INPUTREF":"reference/TB-ref.fasta",
"EXCLUDEFILE":"reference/TB-exclude.txt",
"DEBUGMODE":0,
"SERVERNAME":"TBSNP",
"FNPERSISTENCE_CONNSTRING":"mongodb://127.0.0.1",
"MAXN_STORAGE":100000,
"MAXN_PROP_DEFAULT":0.70,
"PRECOMPARER_PARAMETERS":{},
"LOGFILE":"../unitTest_tmp/logfile.log",
"LOGLEVEL":"INFO",
"SNPCEILING": 20,
"SERVER_MONITORING_MIN_INTERVAL_MSEC":0,
"SENTRY_URL":"https://c******************@sentry.io/1******",
"CLUSTERING":{'SNV12_ignore' :{'snv_threshold':12, 'mixed_sample_management':'ignore', 'mixture_criterion':'pvalue_1', 'cutoff':0.001},
'SNV12_include':{'snv_threshold':12, 'mixed_sample_management':'include', 'mixture_criterion':'pvalue_1', 'cutoff':0.001}
},
"LISTEN_TO":"127.0.0.1"
}
Some of these settings are read when the server is first-run, stored in a database, and the server will not
change the settings on re-start even if the config file is changed. Examples are:
SNPCEILING
MAXN_PROP_DEFAULT
EXCLUDEFILE
INPUTREF
CLUSTERING
PRECOMPARER_PARAMETERS
These settings cannot be changed because they alter the way that the data is stored; if you want to change
the settings, the data will have to be re-loaded.
However, most other settings can be changed and will take effect on server restart. These include:
server location
IP
SERVERNAME
REST_PORT
LISTEN_TO (optional)
internal logging
LOGFILE
LOGLEVEL
where the database connection binds to
FNPERSISTENCE_CONNSTRING
Note: if a FNPERSISTENCE_CONNSTRING environment variable is present, then the value of this will take precedence over any values in the config file.
This allows 'secret' connstrings involving passwords etc to be specified without the values going into a configuration file.
related to what monitoring the server uses
SERVER_MONITORING_MIN_INTERVAL_MSEC (optional)
related to error handling
SENTRY_URL (optional)
Note: if a FN_SENTRY URL environment variable is present, then the value of this will take precedence over any values in the config file.
This allows 'secret' connstrings involving passwords etc to be specified without the values going into a configuraton file.
PERSIST is a storage object needs to be supplied. The fn3Persistence class in mongoStore or rdbmsStore are suitable objects.
"""
# store the persistence object as part of this object
self.PERSIST = PERSIST
self.CONFIG = CONFIG
# set up an MSA store
self.ms = MSAStore(
PERSIST=PERSIST, in_ram_persistence_time=300
) # max time to store msas in seconds
# does not test CONFIG. Assumes it has been checked by read_config()
# set easy to read properties from the config
self.reference = CONFIG["reference"]
self.excludePositions = set(CONFIG["excludePositions"])
self.debugMode = CONFIG["DEBUGMODE"]
self.maxNs = CONFIG["MAXN_STORAGE"]
self.snpCeiling = CONFIG["SNPCEILING"]
self.preComparer_parameters = CONFIG["PRECOMPARER_PARAMETERS"]
self.maxn_prop_default = CONFIG["MAXN_PROP_DEFAULT"]
self.clustering_settings = CONFIG["CLUSTERING"]
## start setup
# initialise nucleic acid analysis object
self.objExaminer = NucleicAcid()
# formatting utility
self.mhr = MakeHumanReadable()
# load catwalk and in-ram searchable data assets
self.gs = guidDbSearcher(PERSIST=PERSIST, recheck_interval_seconds=60)
logging.info("Prepopulating catwalk")
self.prepopulate_catwalk()
logging.info("Prepopulating clustering")
self.prepopulate_clustering()
# log database state
db_summary = self.PERSIST.summarise_stored_items()
self.PERSIST.server_monitoring_store(
what="dbManager", message="OnServerStartup", guid="-", content=db_summary
)
# set up an in-RAM only precomparer for use by pairwise comparisons. There is no SNP ceiling
self.sc = py_seqComparer(
reference=self.reference,
excludePositions=self.excludePositions,
snpCeiling=1e9,
maxNs=self.maxNs,
)
# log startup event to sentry, if configured. This automatically catches version number.
startup_message = "findneighbour4_server {0} started on PID {1}.".format(
CONFIG["SERVERNAME"], os.getpid()
)
capture_message(startup_message)
logging.info(startup_message)
def pairwise_comparison(self, guid1, guid2):
"""compares two sequences which have already been stored
Parameter:
guid1 on sequence identifier
guid2 the second sequence identifier
Returns
exact distance. no threshold is applied. Returns neNo if either sequence does not exist."""
obj1 = self.PERSIST.refcompressedsequence_read(guid1)
obj2 = self.PERSIST.refcompressedsequence_read(guid2)
# we generate new uuids to identify the sequence, as self.sc can be accessed by different threads
# by providing new uuids for each sequence, one thread can't delete the other's sequences
uuid1 = str(uuid.uuid1())
uuid2 = str(uuid.uuid1())
if obj1 is None or obj2 is None:
return None # no sequence
# store object in the precomparer
self.sc.persist(obj1, uuid1) # store in the preComparer
self.sc.persist(obj2, uuid2) # store in the preComparer
dist = self.sc.compare(uuid1, uuid2)
self.sc.remove(uuid1)
self.sc.remove(uuid2)
return {"guid1": guid1, "guid2": guid2, "dist": dist}
def prepopulate_catwalk(self):
"""initialise cw_seqComparer, which interfaces with catwalk were necessary
If self.debugMode == 2 (a unittesting setting) the catwalk is emptied on startup"""
self.hc = cw_seqComparer(
reference=self.reference,
maxNs=self.maxNs,
snpCeiling=self.snpCeiling,
excludePositions=self.excludePositions,
preComparer_parameters=self.preComparer_parameters,
PERSIST=self.PERSIST,
unittesting=(self.debugMode == 2),
localstore=None
)
def prepopulate_clustering(self):
"""loads in memory data into the cw_seqComparer object (which may include catwalk) from database storage"""
# clustering is performed by findNeighbour4_cluster, and results are stored in the database backend
# the clustering object used here is just a reader for the clustering status.
logging.info("findNeighbour4 is loading clustering data.")
self.clustering = (
{}
) # a dictionary of clustering objects, one per SNV cutoff/mixture management setting
for clustering_name in self.clustering_settings.keys():
self.clustering[clustering_name] = MixtureAwareLinkageResult(
PERSIST=self.PERSIST, name=clustering_name, serialisation=None
)
logging.info("set up clustering access object {0}".format(clustering_name))
def reset(self):
"""restarts the server, deleting any existing data"""
if not self.debugMode == 2:
logging.info("Call to reset ignored as debugMode is not 2")
return # no action taken by calls to this unless debugMode ==2
else:
logging.info("Deleting existing data and restarting")
self.PERSIST._delete_existing_data()
self.hc.restart_empty() # restart catwalk empty
self.prepopulate_clustering()
# check it worked
residual_sample_ids = self.hc.guids()
if not len(residual_sample_ids) == 0:
raise ValueError(
"Catwalk restarted, but with residual samples in it. Residual samples are {0}".format(
self.hc.guids()
)
)
logging.info(
"reset executed. There are no samples in the catWalk application."
)
def server_monitoring_store(self, message="No message supplied", guid=None):
"""reports server memory information to store"""
try:
hc_summary = self.hc.summarise_stored_items()
except AttributeError: # no hc object, occurs during startup
hc_summary = {}
db_summary = self.PERSIST.summarise_stored_items()
mem_summary = self.PERSIST.memory_usage()
self.PERSIST.server_monitoring_store(
message=message,
what="server",
guid=guid,
content={**hc_summary, **db_summary, **mem_summary},
)
def first_run(self, do_not_persist_keys):
"""actions taken on first-run only.
Include caching results from config_file to database, unless they are in do_not_persist_keys"""
logging.info("First run situation: parsing inputs, storing to database. ")
# create a config dictionary
config_settings = {}
# store start time
config_settings["createTime"] = datetime.datetime.now()
# store description
config_settings["description"] = self.CONFIG["DESCRIPTION"]
# store clustering settings
self.clustering_settings = self.CONFIG["CLUSTERING"]
config_settings["clustering_settings"] = self.clustering_settings
# store precomparer settings
# if len(self.CONFIG['PRECOMPARER_PARAMETERS'])>0:
self.PERSIST.config_store("preComparer", self.CONFIG["PRECOMPARER_PARAMETERS"])
# load the excluded bases
excluded = set()
if self.CONFIG["EXCLUDEFILE"] is not None:
with open(self.CONFIG["EXCLUDEFILE"], "rt") as f:
rows = f.readlines()
for row in rows:
excluded.add(int(row))
logging.info("Noted {0} positions to exclude.".format(len(excluded)))
config_settings["excludePositions"] = list(sorted(excluded))
# load reference
with open(self.CONFIG["INPUTREF"], "rt") as f:
for r in SeqIO.parse(f, "fasta"):
config_settings["reference"] = str(r.seq)
# persist other config settings.
for item in self.CONFIG.keys():
if item not in do_not_persist_keys:
config_settings[item] = self.CONFIG[item]
self.PERSIST.config_store("config", config_settings)
# log database sizes
db_summary = self.PERSIST.summarise_stored_items()
self.PERSIST.server_monitoring_store(
what="dbManager", message="OnFirstRun", guid="-", content=db_summary
)
self.server_monitoring_store(message="First run complete.")
logging.info("First run actions complete.")
def insert(self, guid, dna):
"""insert DNA called guid into the server,
persisting it in both RAM and on disc, and updating any clustering.
Returns a status code and an explanatory message
"""
# clean, and provide summary statistics for the sequence
logging.info("Preparing to insert: {0}".format(guid))
# check guid. it has to be less than 60 characters long.
if len(guid) >= 60:
return_status_code = 403
return_text = "Supplied sample identifier is unacceptable. Length = {0} (max = 60)".format(
guid
)
logging.info("{0}-{1}".format(return_status_code, return_text))
return return_status_code, return_text
if not self.exist_sample(guid): # if the guid is not already there
self.server_monitoring_store(message="About to insert", guid=guid)
logging.info("Guid is not present: {0}".format(guid))
# insert sequence into the sequence store.
self.objExaminer.examine(dna) # examine the sequence
cleaned_dna = self.objExaminer.nucleicAcidString.decode()
refcompressedsequence = self.hc.compress(cleaned_dna) # compress it
# addition should be an atomic operation, in which all the component complete or do not complete.
# we use a semaphore to do this.
if self.PERSIST.lock(1, guid): # true if an insert lock was acquired
# if failure occurs, errors should be raised
try:
self.hc.persist(
refcompressedsequence,
guid,
{"DNAQuality": self.objExaminer.composition},
)
return_status_code, return_text = 200, "Guid {0} inserted.".format(
guid
)
logging.info("Insert succeeded {0}".format(guid))
except Exception as e:
# the database server may be refusing connections, or busy. This is observed occasionally in real-world use
error_message = "Error raised on persisting {0}".format(guid)
for key in refcompressedsequence:
if isinstance(refcompressedsequence[key], set):
n = len(refcompressedsequence[key])
else:
n = refcompressedsequence[key]
error_message = error_message + "| {0} len={1}".format(key, n)
capture_exception(e) # log what happened in Sentry
logging.exception(error_message)
logging.exception(e)
return_status_code, return_text = 503, error_message
finally:
self.PERSIST.unlock(1) # release the lock if it was acquired
else:
# lock acquisition failed, indicative of another process inserting at present
info_msg = """A lock is in place preventing insertion. This may arise because
(i) a separate process is inserting data;
in this case, retry later or
(ii) if you are only inserting with one load script synchronously, it may reflect the lock being held because of an error or crash TBD.
The findNeighbour4_lockmonitor should unlock it automatically in 90 seconds.
If needed, you can reset the lock as follows: fn4_configure <path to config file> --drop_locks"""
logging.warning("An insert lock prevented insertion {0}".format(guid))
logging.info(info_msg)
return_status_code, return_text = 409, info_msg
else:
logging.info("Already present, no insert needed: {0}".format(guid))
return_status_code, return_text = 201, "Guid {0} is already present".format(
guid
)
logging.info("{0}-{1}".format(return_status_code, return_text))
return return_status_code, return_text
def exist_sample(self, guid):
"""determine whether the sample exists in the system.
The test requires that the guid is in the database, which occurs on sample addition.
Parameters:
guid: the sequence identifier
Returns:
either True, or False
"""
## this call measures presence on disc
return self.PERSIST.guid_exists(guid)
def validity_sample(self, guid):
"""determine whether the sample exists in RAM
Parameters:
guid: the sequence identifier
Returns:
-1 The guid does not exist
0 The guid exists and the sequence is valid
1 The guid exists and the sequence is invalid
-2 The guid exists, but there is no DNAQuality.valid key
Results < 0 will represent errors if the guid is known to exist
"""
## this call measures presence on disc
return self.PERSIST.guid_valid(guid)
def server_time(self):
"""returns the current server time"""
return {
"server_name": self.CONFIG["SERVERNAME"],
"server_time": datetime.datetime.now().isoformat(),
}
def server_name(self):
"""returns information about the server"""
return {
"server_name": self.CONFIG["SERVERNAME"],
"server_description": self.CONFIG["DESCRIPTION"],
}
def server_config(self):
"""returns the config file with which the server was launched
This may be highly undesirable, and is only available in DEBUG mode.
as it reveals the internal server architecture including
backend databases and perhaps connection strings with passwords.
"""
if self.debugMode == 2:
retVal = self.CONFIG.copy()
retVal["excludePositions"] = list(
retVal["excludePositions"]
) # can't serialise a set
return retVal
else:
return None
def server_nucleotides_excluded(self):
"""returns the nucleotides excluded by the server"""
return {
"exclusion_id": self.hc.excluded_hash(),
"excluded_nt": list(self.hc.excluded),
}
def server_memory_usage(self, max_reported=None):
"""reports recent server memory activity"""
# record the current status
self.server_monitoring_store(message="Server memory usage requested", guid=None)
if max_reported is None:
max_reported = 100 # a default
return self.PERSIST.recent_server_monitoring(max_reported=max_reported)
def server_database_usage(self, max_reported=None):
"""reports recent server memory activity"""
# record the current status
if max_reported is None:
max_reported = 100 # a default
return self.PERSIST.recent_database_monitoring(max_reported=max_reported)
def neighbours_within_filter(self, guid, snpDistance, cutoff=0, returned_format=1):
"""returns a list of guids, and their distances, by a sample quality cutoff
returns links either as
format 1 [[otherGuid, distance]]
or as
format 2 [[otherGuid, distance, N_just1, N_just2, N_either]] [LEGACY - disabled - will yield NotImplementedError]
or as
format 3 [otherGuid, otherGuid2, otherGuid3]
or as
format 4 [{'guid':otherGuid, 'snv':distance}, {'guid':otherGuid2, 'snv':distance2}]
"""
# check the query is of good quality
inScore = self.PERSIST.guid_quality_check(guid, float(cutoff))
if inScore is None:
raise KeyError(
"{0} not found".format(guid)
) # that's an error, raise KeyError
elif inScore is False:
return [] # bad sequence; just to report no links
# if it is of good quality, then we look for links
idList = list()
# gets the similar sequences from the database;
retVal = self.PERSIST.guid2neighbours(
guid=guid, cutoff=snpDistance, returned_format=returned_format
)
# run a quality check on the things our sample is like.
# extract the quality guids, independent of the format requested.
sampleList = retVal["neighbours"]
if cutoff == 0 or cutoff is None:
return sampleList
# otherwise, filter by quality
idList = []
for sa in sampleList:
if isinstance(sa, list):
idList.append(sa[0]) # add the guid
elif isinstance(sa, str):
idList.append(sa)
elif isinstance(sa, dict):
idList.append(sa["guid"])
else:
raise TypeError(
"Unknown format returned {0} {1}".format(type(sa), sampleList)
)
guid2qual = self.PERSIST.guid2quality(idList)
# Filter to get good matching guids
goodGuids = set()
cutoff = float(cutoff)
for guid in guid2qual.keys():
if guid2qual[guid] >= cutoff:
goodGuids.add(guid)
else:
pass # print("findNeighbour4.neighbours_within_filter, excluded on quality filter", guid2qual[guid], cutoff, guid)
# note one could put a filter to identify samples based on Ns here: these statistics are return in the sampleList
# assemble output by filtering sampleList
finalOutput = list()
for sa in sampleList:
if isinstance(sa, list):
guid = sa[0]
elif isinstance(sa, str):
guid = sa
elif isinstance(sa, dict):
guid = sa["guid"]
if guid in goodGuids:
finalOutput.append(sa)
return finalOutput
def get_all_guids(self):
return self.PERSIST.guids()
def get_all_valid_guids(self):
return self.PERSIST.guids_valid()
def get_all_invalid_guids(self):
return self.PERSIST.guids_invalid()
def guids_with_quality_over(self, cutoff=0):
rs = self.PERSIST.guid2propACTG_filtered(float(cutoff))
if rs is None:
return []
else:
return list(rs.keys())
def guids_added_after_sample(self, guid):
res = self.PERSIST.guids_added_after_sample(guid)
return res
def get_all_guids_examination_time(self):
res = self.PERSIST.guid2ExaminationDateTime()
# isoformat all the keys, as times are not json serialisable
retDict = res
for key in retDict:
retDict[key] = retDict[key].isoformat()
return retDict
def get_all_annotations(self):
return self.PERSIST.guid_annotations()
def get_one_annotation(self, guid):
return self.PERSIST.guid_annotation(guid)
def sequence(self, guid):
"""gets masked sequence for the guid, in fasta format"""
if not self.exist_sample(guid):
return None
try:
seq = self.hc.uncompress_guid(guid)
return {
"guid": guid,
"invalid": 0,
"comment": "Masked sequence, as stored",
"masked_dna": seq,
}
except ValueError:
return {
"guid": guid,
"invalid": 1,
"comment": "No sequence is available, as invalid sequences are not stored",
}
def create_app(config_file=None):
"""creates a findNeighbour4 flask application
if config_file is passed, this should point to a configuration file.
if environment variable FLASK_APP_CONFIG_FILE is set, the value of this used as the path to a config file.
if config_file is None and FLASK_APP_CONFIG_FILE is not set, the default (testing) config is used
"""
## construct flask application
if os.environ.get("FN4_SERVER_CONFIG_FILE") is not None:
config_file = os.environ.get("FN4_SERVER_CONFIG_FILE")
if config_file is None:
config_file = DEFAULT_CONFIG_FILE
warnings.warn(
"No config file name supplied ; using a configuration ('default_test_config.json') suitable only for testing, not for production. "
)
cfm = ConfigManager(config_file)
CONFIG = cfm.read_config()
########################### SET UP LOGGING #####################################
# create a log file if it does not exist.
logdir = os.path.dirname(CONFIG["LOGFILE"])
Path(os.path.dirname(CONFIG["LOGFILE"])).mkdir(parents=True, exist_ok=True)
# set up logger
loglevel = logging.INFO
if "LOGLEVEL" in CONFIG.keys():
if CONFIG["LOGLEVEL"] == "WARN":
loglevel = logging.WARN
elif CONFIG["LOGLEVEL"] == "DEBUG":
loglevel = logging.DEBUG
# configure logging object
logfile = os.path.join(
logdir, "server-{0}".format(os.path.basename(CONFIG["LOGFILE"]))
)
file_handler = logging.handlers.RotatingFileHandler(
logfile, mode="a", maxBytes=1e7, backupCount=7
)
formatter = logging.Formatter(
"%(asctime)s | %(pathname)s:%(lineno)d | %(funcName)s | %(levelname)s | %(message)s "
)
file_handler.setFormatter(formatter)
logging.info("Logging to {0}".format(logfile))
########################### prepare to launch server ###############################################################
RESTBASEURL = "http://{0}:{1}".format(CONFIG["IP"], CONFIG["REST_PORT"])
######################### CONFIGURE HELPER APPLICATIONS ######################
# plotting engine
# prevent https://stackoverflow.com/questions/27147300/how-to-clean-images-in-python-django
matplotlib.use("agg")
pm = Persistence()
PERSIST = pm.get_storage_object(
dbname=CONFIG["SERVERNAME"],
connString=CONFIG["FNPERSISTENCE_CONNSTRING"],
debug=CONFIG["DEBUGMODE"],
verbose=True,
)
# check is it not sqlite. We can't run sqlite in a multithreaded environment like flask.
# you need a proper database managing concurrent connections, such as mongo, or a standard rdbms
if PERSIST.using_sqlite:
raise SQLiteBackendErrror(
"""Can't use SQlite as a backend for findNeighbour4_server.
A database handing sessions is required. Some unit tests use SQLite;
however, you can't run integration tests (test_server_rdbms.py) against sqlite.
To test the server, edit the config/default_test_config_rdbms.json file's "FNPERSISTENCE_CONNSTRING": connection string to
point to an suitable database. For more details of how to do this, see installation instructions on github."""
)
# instantiate server class
try:
fn = findNeighbour4(CONFIG, PERSIST)
except Exception as e:
logging.exception("Error raised on instantiating findNeighbour4 object")
logging.exception(e)
raise
######################## START THE SERVER ###################################
# default parameters for unit testing only; will be overwritten if config file provided.
RESTBASEURL = "http://127.0.0.1:5020"
# initialise Flask
app = Flask(__name__)
CORS(app) # allow CORS
app.logger.setLevel(loglevel)
app.logger.addHandler(file_handler)
app.logger.info("Logging to {0}".format(logfile))
# launch sentry if API key provided
# determine whether a FN_SENTRY_URLenvironment variable is present,
# if so, the value of this will take precedence over any values in the config file.
# This allows 'secret' connstrings involving passwords etc to be specified without the values going into a configuraton file.
if os.environ.get("FN_SENTRY_URL") is not None:
CONFIG["SENTRY_URL"] = os.environ.get("FN_SENTRY_URL")
print("Set Sentry connection string from environment variable")
else:
print("Using Sentry connection string from configuration file.")
if "SENTRY_URL" in CONFIG.keys():
app.logger.info(
"Launching communication with Sentry bug-tracking service, with 1% transactional logging"
)
sentry_sdk.init(
CONFIG["SENTRY_URL"],
integrations=[FlaskIntegration()],
traces_sample_rate=0.01,
)
if CONFIG["DEBUGMODE"] > 0:
app.config["PROPAGATE_EXCEPTIONS"] = True
def isjson(content):
"""returns true if content parses as json, otherwise false. used by unit testing."""
try:
json.loads(content.decode("utf-8"))
return True
except json.decoder.JSONDecodeError:
return False
def tojson(content):
"""json dumps, formatting dates as isoformat"""
def converter(o):
if isinstance(o, datetime.datetime):
return o.isoformat()
else:
return json.JSONEncoder.default(o)
return json.dumps(content, default=converter)
# --------------------------------------------------------------------------------------------------
@app.errorhandler(404)
def not_found(error):
json_err = jsonify(
{"error": "Not found (custom error handler for mis-routing)"}
)
return make_response(json_err, 404)
# --------------------------------------------------------------------------------------------------
@app.teardown_appcontext
def shutdown_session(exception=None):
fn.PERSIST.closedown() # close database connection
def do_GET(relpath):
"""makes a GET request to relpath.
Used for unit testing."""
url = urljoiner(RESTBASEURL, relpath)
session = requests.Session()
session.trust_env = False
response = session.get(url=url, timeout=None)
session.close()
return response
def do_POST(relpath, payload):
"""makes a POST request to relpath.
Used for unit testing.
payload should be a dictionary"""
url = urljoiner(RESTBASEURL, relpath)
if not isinstance(payload, dict):
raise TypeError("not a dict {0}".format(payload))
response = requests.post(url=url, data=payload)
return response
def render_markdown(md_file):
"""render markdown as html"""
with codecs.open(md_file, mode="r", encoding="utf-8") as f:
text = f.read()
html = markdown.markdown(text, extensions=["tables"])
return html
@app.route("/", methods=["GET"])
def routes():
"""returns server info page"""
routes_file = BASE_PATH / "doc" / "rest-routes.md"
return make_response(render_markdown(routes_file))
@app.route("/ui/info", methods=["GET"])
def server_info():
"""returns server info page"""
routes_file = BASE_PATH / "doc" / "serverinfo.md"
return make_response(render_markdown(routes_file))
@app.route("/api/v2/raise_error/<string:component>/<string:token>", methods=["GET"])
def raise_error(component, token):
"""* raises an error internally. Can be used to test error logging. Disabled unless in debug mode.
/api/v2/raise_error/*component*/*token*/
Valid values for component are:
main - raise error in main code
persist - raise in PERSIST object
clustering - raise in clustering
py_seqComparer - raise in py_seqComparer.
"""
if not fn.debugMode == 2:
# if we're not in debugMode==2, then this option is not allowed
abort(404, "Calls to /raise_error are only allowed with debugMode == 2")
if component == "main":
raise ZeroDivisionError(token)
elif component == "clustering":
clustering_names = list(fn.clustering_settings.keys())
if len(clustering_names) == 0:
raise ValueError(
"no clustering settings defined; cannot test error generation in clustering"
)
else:
clustering_name = clustering_names[0]
fn.clustering_settings[clustering_name].raise_error(token)
elif component == "py_seqComparer":
fn.hc.raise_error(token)
elif component == "persist":
fn.PERSIST.raise_error(token)
else:
raise KeyError(
"Invalid component called. Allowed: main;persist;clustering;py_seqComparer."
)
def construct_msa(guids, output_format, what):
"""constructs multiple sequence alignment for guids
and returns in one of 'fasta' 'json-fasta', 'html', 'json' or 'json-records','interactive' format.
what is one of 'N','M','N_or_M'
"""
# test whether the MSA is cached
this_token = fn.ms.get_token(what, False, guids) # no outgroup, guids
msa_result = fn.ms.load(this_token) # recover any stored version
if msa_result is None:
logging.info(
"Asked to recover MSA id = {0} but it did not exist. Recomputing this".format(
this_token
)
)
msa_result = fn.hc.multi_sequence_alignment(
guids=guids, uncertain_base_type=what
) # make it
# this may fail if all the samples are invalid
if msa_result is not None:
fn.ms.persist(this_token, msa_result)
else:
abort(406, "no sequences of adequate quality exist")
if output_format == "fasta":
return make_response(msa_result.msa_fasta())
elif output_format == "json-fasta":
return make_response(json.dumps({"fasta": msa_result.msa_fasta()}))
elif output_format == "html":
return make_response(msa_result.msa_html())
elif output_format == "interactive":
return make_response(msa_result.msa_interactive_depiction())
elif output_format == "json":
return make_response(json.dumps(msa_result.serialise()))
elif output_format == "json-records":
df = msa_result.df # a guid column is expected by the web front end.
df["guid"] = df.index.to_list()
return make_response(df.to_json(orient="records"))
else:
raise ValueError("Invalid output format {0}".format(output_format))
@app.route("/api/v2/reset", methods=["POST"])
def reset():
"""deletes any existing data from the server"""
if not fn.debugMode == 2:
# if we're not in debugMode==2, then this option is not allowed
abort(404, "Calls to /reset are only allowed with debugMode == 2")
else:
fn.reset()
return make_response(json.dumps({"message": "reset completed"}))
@app.route("/api/v2/monitor", methods=["GET"])
@app.route("/api/v2/monitor/<string:report_type>", methods=["GET"])
def monitor(report_type="Report"):
"""returns an html/bokeh file, generated by findNeighbour4_monitor,
and stored in a database. If not report_type is specified, uses 'Report',
which is the name of the default report produced by findNeighbour4_monitor"""
html = fn.PERSIST.monitor_read(report_type)
if html is None:
html = "No report of type {0} is available. Reports are generated by a separate process, findNeighbour4_monitor.py, which may not be running.".format(
report_type
)
return html
@app.route(
"/api/v2/clustering/<string:clustering_algorithm>/<int:cluster_id>/network",
methods=["GET"],
)
@app.route(
"/api/v2/clustering/<string:clustering_algorithm>/<int:cluster_id>/minimum_spanning_tree",
methods=["GET"],
)
def cl2network(clustering_algorithm, cluster_id):
"""produces a cytoscape.js compatible graph from a cluster ,
either from the network (comprising all edges < snp cutoff)
or as a minimal spanning tree.
"""
# validate input
fn.clustering[clustering_algorithm].refresh()
try:
res = fn.clustering[clustering_algorithm].clusters2guidmeta(
after_change_id=None
)
except KeyError:
# no clustering algorithm of this type
return make_response(
tojson("no clustering algorithm {0}".format(clustering_algorithm)), 404
)
# check guids
df = pd.DataFrame.from_records(res)
# check guids
df = pd.DataFrame.from_records(res)
if len(df.index) == 0:
return make_response(
tojson({"success": 0, "message": "No samples exist for that cluster"})
)
else:
df = df[df["cluster_id"] == cluster_id] # only if there are records
guids = sorted(df["guid"].tolist())
# data validation complete. construct outputs
snv_threshold = fn.clustering[clustering_algorithm].snv_threshold
snvn = snvNetwork(snv_threshold=snv_threshold)
E = []
for guid in guids:
is_mixed = int(fn.clustering[clustering_algorithm].is_mixed(guid))
snvn.G.add_node(guid, is_mixed=is_mixed)
for guid in guids:
res = fn.PERSIST.guid2neighbours(
guid, cutoff=snv_threshold, returned_format=1
)
for (guid2, snv) in res["neighbours"]:
if guid2 in guids: # don't link outside the cluster
E.append((guid, guid2))
snvn.G.add_edge(guid, guid2, weight=snv, snv=snv)
if request.base_url.endswith("/minimum_spanning_tree"):
snvn.G = nx.minimum_spanning_tree(snvn.G)
retVal = snvn.network2cytoscapejs()
retVal[
"message"
] = "{0} cluster #{1}. Minimum spanning tree is shown. Red nodes are mixed.".format(
clustering_algorithm, cluster_id
)
else:
retVal = snvn.network2cytoscapejs()
retVal[
"message"
] = "{0} cluster #{1}. Network of all edges < cutoff shown. Red nodes are mixed.".format(
clustering_algorithm, cluster_id
)
retVal["success"] = 1
return make_response(tojson(retVal))
@app.route("/api/v2/multiple_alignment/guids", methods=["POST"])
def msa_guids():
"""performs a multiple sequence alignment on a series of POSTed guids,
delivered in a dictionary, e.g.
{'guids':'guid1;guid2;guid3',
'output_format':'json'}
Valid values for output_format are:
json
json-records
html
json-fasta
fasta
interactive
Valid values for what are
N
M
N_or_M
"""
# validate input
valid_output_formats = [
"html",
"json",
"fasta",
"json-fasta",
"json-records",
"interactive",
]
request_payload = request.form.to_dict()
if "output_format" not in request_payload or "guids" not in request_payload:
abort(
405,
"output_format and guids are not present in the POSTed data {0}".format(
request_payload.keys()
),
)
guids = request_payload["guids"].split(
";"
) # coerce both guid and seq to strings
output_format = request_payload["output_format"]
if "what" in request_payload.keys():
what = request_payload["what"]
else:
what = "N" # default to N
if what not in ["N", "M", "N_or_M"]:
abort(404, "what must be one of N M N_or_M, not {0}".format(what))
if output_format not in valid_output_formats:
abort(
404,
"output_format must be one of {0} not {1}".format(
valid_output_formats, output_format
),
)
# check guids
try:
valid_guids = fn.get_all_valid_guids()
except Exception as e:
capture_exception(e)
abort(500, e)
guids = set(guids)
missing_guids = guids - valid_guids
if len(missing_guids) > 0:
capture_message(
"asked to perform multiple sequence alignment with the following missing guids: {0}".format(
missing_guids
)
)
abort(
405,
"asked to perform multiple sequence alignment with the following missing or invalid guids: {0}".format(
missing_guids
),
)
# data validation complete. construct outputs
return construct_msa(guids, output_format, what)
@app.route(
"/api/v2/multiple_alignment_cluster/<string:clustering_algorithm>/<int:cluster_id>/<string:output_format>",
methods=["GET"],
)
def msa_guids_by_cluster(clustering_algorithm, cluster_id, output_format):
"""performs a multiple sequence alignment on the contents of a cluster
Valid values for format are:
json
json-records
fasta
html
interactive
"""
# validate input; update clustering
try:
fn.clustering[clustering_algorithm].refresh()
except KeyError:
# no clustering algorithm of this type
return make_response(
tojson("no clustering algorithm {0}".format(clustering_algorithm)), 404
)
if output_format not in [
"html",
"json",
"json-records",
"json-fasta",
"fasta",
"interactive",
]:
abort(
404,
"not available: output_format must be one of html, json, json-records, fasta, interactive not {0}".format(
output_format
),
)
# check guids
res = fn.clustering[clustering_algorithm].clusters2guidmeta(
after_change_id=None
)
df = | pd.DataFrame.from_records(res) | pandas.DataFrame.from_records |
import sys
sys.path.append('../')
from matplotlib import figure
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.collections import PatchCollection
from matplotlib.colors import ListedColormap
import os
from tqdm import tqdm
### Config folders
config_data = pd.read_csv('config.csv', sep=',', header=None, index_col=0)
figures_path = config_data.loc['figures_dir_art'][1]
results_path = config_data.loc['results_dir'][1]
results_test_path = config_data.loc['results_test_dir'][1]
ages_data_path = config_data.loc['bogota_age_data_dir'][1]
houses_data_path = config_data.loc['bogota_houses_data_dir'][1]
### Arguments
import argparse
parser = argparse.ArgumentParser(description='Dynamics visualization.')
parser.add_argument('--population', default=10000, type=int,
help='Speficy the number of individials')
parser.add_argument('--type_sim', default='intervention', type=str,
help='Speficy the type of simulation to plot')
args = parser.parse_args()
number_nodes = args.population
pop = number_nodes
### Read functions
def load_results_ints(type_res,n,int_effec,schl_occup,type_mask,frac_people_mask,ventilation,path=results_path):
read_path = os.path.join(path,'{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_ND_{}.csv'.format(str(n),str(int_effec),
str(schl_occup),type_mask,str(frac_people_mask),str(ventilation),type_res))
read_file = pd.read_csv(read_path)
return read_file
def load_results_ints_test(type_res,n,int_effec,schl_occup,layer,path=results_path):
read_path = os.path.join(path,str(n),'{}_layerInt_{}_inter_{}_schoolcap_{}_{}.csv'.format(str(n),str(layer),str(int_effec),
str(schl_occup),type_res))
read_file = pd.read_csv(read_path)
return read_file
### Read file
results_path = os.path.join(results_path,'intervention',str(pop))
###------------------------------------------------------------------------------------------------------------------------------------------------------
### Plot proportional areas (each mask type) for each level of ventilation
def nested_circles(data, labels=None, c=None, ax=None,
cmap=None, norm=None, textkw={}):
ax = ax or plt.gca()
data = np.array(data)
R = np.sqrt(data/data.max())
p = [plt.Circle((0,r), radius=r) for r in R[::-1]]
arr = data[::-1] if c is None else np.array(c[::-1])
col = PatchCollection(p, cmap=cmap, norm=norm, array=arr)
ax.add_collection(col)
ax.axis("off")
ax.set_aspect("equal")
ax.autoscale()
if labels is not None:
kw = dict(color="k", va="center", ha="center")
kw.update(textkw)
ax.text(0, R[0], labels[0], **kw)
for i in range(1, len(R)):
ax.text(0, R[i]+R[i-1], labels[i], **kw)
return col
# from pylab import *
# cmap = cm.get_cmap('gist_heat_r', 5) # PiYG
# colors_plt = []
# for i in range(cmap.N):
# rgba = cmap(i)
# # rgb2hex accepts rgb or rgba
# colors_plt.append(matplotlib.colors.rgb2hex(rgba))
# colors_plt = colors_plt[1:4]
# def plot_examples(cms):
# """
# helper function to plot two colormaps
# """
# np.random.seed(19680801)
# data = np.random.randn(30, 30)
# fig, axs = plt.subplots(1, 2, figsize=(6, 3), constrained_layout=True)
# for [ax, cmap] in zip(axs, cms):
# psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4)
# fig.colorbar(psm, ax=ax)
# plt.show()
# viridisBig = cm.get_cmap('Reds_r', 512)
# newcmp = ListedColormap(viridisBig(np.linspace(0.95, 0.5, 256)))
# school_cap = 0.35
# fraction_people_masked = 1.0
# ventilation_vals = 0.0
# inter_ = 0.4
# masks = ['cloth','surgical','N95']
# masks_labels = ['Cloth','Surgical','N95']
# masks_labels = dict(zip(masks,masks_labels))
# df_list = []
# for m, mask_ in enumerate(masks):
# res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,ventilation_vals,path=results_path)
# for itr_ in range(10):
# res_read_i = res_read['iter'] == itr_
# res_read_i = pd.DataFrame(res_read[res_read_i])
# end_cases = res_read_i['E'].iloc[-1]
# df_res_i = pd.DataFrame(columns=['iter','mask','frac_mask','interven_eff','ventilation','end_cases'])
# df_res_i['iter'] = [int(itr_)]
# df_res_i['mask'] = masks_labels[mask_]
# df_res_i['frac_mask'] = r'{}%'.format(int(fraction_people_masked*100))
# df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
# df_res_i['ventilation'] = str(ventilation_vals)
# df_res_i['end_cases'] = end_cases*100
# df_list.append(df_res_i)
# df_final_E_lowVent = pd.concat(df_list)
# df_final_E_lowVent_meds = df_final_E_lowVent.groupby('mask').median().reset_index()
# percentagesData_E_lowVent_mends = list(df_final_E_lowVent_meds['end_cases'])
# percentagesLabels_E_lowVent_mends = [r'{:.2f}%'.format(end_cases) for end_cases in df_final_E_lowVent_meds['end_cases']]
# nested_circles(percentagesData_E_lowVent_mends,labels=percentagesLabels_E_lowVent_mends,cmap='copper',textkw=dict(fontsize=14))
# plt.show()
# test_vals = [8.420,100-8.420]
# test_labels = list("AB")
# nested_circles(test_vals, labels=test_labels, cmap="copper", textkw=dict(fontsize=14))
# plt.show()
# fig,ax = plt.subplots(1,1,figsize=(7, 6))
# sns.pointplot(ax=ax, data=df_final_E_v, x='end_cases', y='frac_mask', hue='mask', linestyles='',dodge=0.3,palette='plasma',alpha=0.8)
# ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
# plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
# ax.set_xlabel(r'Infections per 10,000',fontsize=17)
# ax.set_ylabel(r'Individuals wearing masks ($\%$)',fontsize=17)
# ax.set_title(r'Total infections | schools at {}$\%$, low ventilation'.format(str(school_cap*100)),fontsize=17)
# plt.xticks(size=16)
# plt.yticks(size=16)
# #plt.xlim([4850,6000])
# save_path = os.path.join(figures_path,'point_plots','totalInfections_n_{}_schoolcap_{}_ventilation_{}_inter_{}.png'.format(str(pop),str(0.35),str(ventilation_vals[0]),str(inter_)))
# plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
# school_cap = 0.35
# fraction_people_masked = 1.0
# ventilation_vals = 15.0
# inter_ = 0.4
# masks = ['cloth','surgical','N95']
# masks_labels = ['Cloth','Surgical','N95']
# masks_labels = dict(zip(masks,masks_labels))
# df_list = []
# for m, mask_ in enumerate(masks):
# res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,ventilation_vals,path=results_path)
# for itr_ in range(10):
# res_read_i = res_read['iter'] == itr_
# res_read_i = pd.DataFrame(res_read[res_read_i])
# end_cases = res_read_i['E'].iloc[-1]
# df_res_i = pd.DataFrame(columns=['iter','mask','frac_mask','interven_eff','ventilation','end_cases'])
# df_res_i['iter'] = [int(itr_)]
# df_res_i['mask'] = masks_labels[mask_]
# df_res_i['frac_mask'] = r'{}%'.format(int(fraction_people_masked*100))
# df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
# df_res_i['ventilation'] = str(ventilation_vals)
# df_res_i['end_cases'] = end_cases*100
# df_list.append(df_res_i)
# df_final_E_highVent = pd.concat(df_list)
# df_final_E_highVent_meds = df_final_E_highVent.groupby('mask').median().reset_index()
# percentagesData_E_lowVent_mends = list(df_final_E_highVent_meds['end_cases'])
# percentagesLabels_E_lowVent_mends = [r'{:.2f}%'.format(end_cases) for end_cases in df_final_E_highVent_meds['end_cases']]
# nested_circles(percentagesData_E_lowVent_mends,labels=percentagesLabels_E_lowVent_mends,cmap='copper',textkw=dict(fontsize=14))
# plt.show()
# test_vals = [30.035,100-30.035]
# test_labels = list("AB")
# nested_circles(test_vals, labels=test_labels, cmap="copper", textkw=dict(fontsize=14))
# plt.show()
###------------------------------------------------------------------------------------------------------------------------------------------------------
### Bar plots testes
intervention_effcs = [0.0,0.2,0.4]
school_cap = [0.35] #,0.35]
layers_test = ['work','community','all']
layers_labels = ['Intervención sobre sitios de trabajo','Intervención sobre comunidad','Intervención completa']
layers_labels = dict(zip(layers_test,layers_labels))
df_list = []
for l, layer_ in enumerate(layers_test):
for i, inter_ in enumerate(intervention_effcs):
for j, schl_cap_ in enumerate(school_cap):
res_read = load_results_ints_test('soln_cum',args.population,inter_,schl_cap_,layer_,results_test_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_cases = res_read_i['E'].iloc[-1]
df_res_i = pd.DataFrame(columns=['iter','Inter.Layer','interven_eff','end_cases'])
df_res_i['iter'] = [int(itr_)]
df_res_i['Inter.Layer'] = layers_labels[layer_]
df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
df_res_i['end_cases'] = end_cases*100
df_list.append(df_res_i)
df_final_E = pd.concat(df_list)
fig,ax = plt.subplots(1,1,figsize=(9, 6))
sns.catplot(ax=ax, data=df_final_E, y='interven_eff', x='end_cases', hue='Inter.Layer',kind='bar',palette='Blues',alpha=0.7,legend=False)
#ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
plt.legend(bbox_to_anchor=(1.02,0.6),title='',frameon=False, fontsize=16)
#plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
plt.ylabel(r'Efficiencia de intervención, ($\%$)',fontsize=17)
plt.xlabel(r'% Infectados',fontsize=17)
plt.title(r'Infecciones totales | Colegios al {}%'.format(str(int(school_cap[0]*100))),fontsize=17)
plt.xticks(size=16)
plt.yticks(size=16)
save_path = os.path.join(figures_path,'bar_plots','layersInter_totalInfections_n_{}_schoolcap_{}_.png'.format(str(pop),str(school_cap[0])))
plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
###------------------------------------------------------------------------------------------------------------------------------------------------------
### Bar plots
# End infections plotting ventilation and mask
intervention_effcs = [0.0,0.2,0.4] #,0.6]
interv_legend_label = [r'$0\%$ intervention efficiency',r'$20\%$ intervention efficiency',r'$40\%$ intervention efficiency'] #,r'$40\%$ intervention efficiency',r'$60\%$ intervention efficiency'] #,r'No intervention, schools $100\%$ occupation']
school_cap = 0.35
fraction_people_masked = 1.0
ventilation_vals = [0.0,5.0,8.0,15.0]
ventilation_labels = ['Cero','Baja','Media','Alta']
ventilation_labels = dict(zip(ventilation_vals,ventilation_labels))
masks = ['cloth','surgical','N95']
masks_labels = {'cloth':'Tela','surgical':'Quirúrgicos','N95':'N95'}
states_ = ['S', 'E', 'I1', 'I2', 'I3', 'D', 'R']
df_list = []
inter_ = intervention_effcs[0]
for m, mask_ in enumerate(masks):
for j, vent_ in enumerate(ventilation_vals):
res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,vent_,path=results_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_cases = res_read_i['E'].iloc[-1]
df_res_i = pd.DataFrame(columns=['iter','Tacapobas','interven_eff','ventilation','end_cases'])
df_res_i['iter'] = [int(itr_)]
df_res_i['Tacapobas'] = str(masks_labels[mask_])
df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
df_res_i['ventilation'] = ventilation_labels[vent_]
df_res_i['end_cases'] = end_cases*100
df_list.append(df_res_i)
df_final_E = pd.concat(df_list)
plt.figure(figsize=(7,6))
sns.catplot(data=df_final_E, x='ventilation', y='end_cases', hue='Tacapobas',kind='bar',palette='Reds_r',alpha=0.8)
#ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
#plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
plt.xlabel('Ventilación',fontsize=17)
plt.ylabel(r'% Infectados',fontsize=17)
plt.title(r'Infecciones totales | colegios {}$\%$, intervención {}$\%$'.format(str(int(school_cap*100)),str(int(inter_*100))),fontsize=17)
plt.xticks(size=16)
plt.yticks(size=16)
plt.ylim([0,101])
#plt.show()
save_path = os.path.join(figures_path,'bar_plots','totalInfections_n_{}_inter_{}_schoolcap_{}_.png'.format(str(pop),str(inter_),str(0.35)))
plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
# End deaths plotting ventilation and mask
inter_ = intervention_effcs[2]
for m, mask_ in enumerate(masks):
for j, vent_ in enumerate(ventilation_vals):
res_read = load_results_ints('soln_cum',args.population,inter_,school_cap,mask_,fraction_people_masked,vent_,path=results_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_dead = res_read_i['D'].iloc[-1]
df_res_i = pd.DataFrame(columns=['iter','Mask','interven_eff','ventilation','end_dead'])
df_res_i['iter'] = [int(itr_)]
df_res_i['Mask'] = str(masks_labels[mask_])
df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
df_res_i['ventilation'] = ventilation_labels[vent_]
df_res_i['end_dead'] = end_dead*100
df_list.append(df_res_i)
df_final_D = | pd.concat(df_list) | pandas.concat |
# -*- coding: utf-8 -*-
import re,pandas as pd,numpy as np
from pandas import DataFrame
import os
pathDir=os.listdir(r'C:\Users\aklasim\Desktop\Py6.11 Pdf\t1')
pt=(r'C:\Users\aklasim\Desktop\Py6.11 Pdf\t1')
cols=['工单编号','上级工单编号','项目编号','工单描述','上级工单描述','施工单位','合同号','计划服务费','开工日期','完工日期','作业类型','通知单创建','通知单批准','计划','待审','下达','验收确认','完工确认','完工时间','打印者','打印日期','工序号','工作中心','控制码','工序内容','计划量','签证','物料编码','物料描述','单位计划量','出库量','签证']
l=[]
x=0
l1=[]
dfb = pd.DataFrame(columns=['工单编号', '上级工单编号', '项目编号', '工单描述', '上级工单描述', '施工单位', '合同号', '计划服务费','开工日期', '完工日期', '作业类型', '通知单创建', '通知单批准', '计划', '待审', '下达', '验收确认','完工确认', '完工时间', '打印者', '打印日期', '工序号', '工作中心', '控制码', '工序内容', '计划量',
'签证', '物料编码', '物料描述', '单位计划量', '出库量', '签证', '单位', '数量确认'])
for filename in pathDir:
x=x+1
df = pd.DataFrame(index=range(30), columns=cols)
def gg(rg,n):
e=[]
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
d=re.search(rg,line)
if d:
d=str(d.group())
e.append(d)
print(e)
df[n]=pd.Series(e)
f.close()
desc=gg('工单描述\s\S+','工单描述')#desc = re.findall('工单描述\s\S+', line)
n=gg('工单编号\s\d+','工单编号')
up_n=gg('上级工单编号\s\d+','上级工单编号') #sup_desc = re.findall('上级工单描述\s\d+', line)
pro_n=gg('项目编号\s\d+','项目编号') #pro_co=re.findall('项目编号\s\d+',line)
unit=gg('施工单位\s\S+','施工单位')#unit= re.findall('施工单位\s\S+', line)
contr_co=gg('合同号\s\d+','合同号') #contr_co = re.findall('合同号\s\d+', line)
cost=gg('计划服务费\s+\d+\,*\d*\.\d+','计划服务费')#cost = re.findall('计划服务费\s+\d+\,*\d*\.\d+', line)
#if len(cost)>0:
# money=cost[0].split()[1]
start_d=gg('开工日期\s\S+','开工日期')#start_d = re.findall('开工日期\s\S+', line)
over_d=gg('完工日期\s\S+','完工日期')#over_d = re.findall('完工日期\s\S+', line)
worktp = gg('作业类型\s\S+', '作业类型')#worktp = re.findall('作业类型\s\S+', line)
#ntc_crt = re.findall('通知单创建\s\S+', line)
#ntc_pmt = re.findall('通知单批准\s\S+', line)
#plan = re.findall('计划\s\S+', line)
#ass= re.findall('待审\s\S+', line)
#order= re.findall('下达\s\S+', line)
#acpt_ck = re.findall('验收确认\s\S+', line)
#fns_ck = re.findall('完工确认\s\S+', line)
#fns_tm = re.findall('完工时间\s\S+', line)
#printer = re.findall('打印者:\S+', line)
#prt_d = re.findall('打印日期:\d+-\d+-\d+', line)
ntc_crt = gg('通知单创建\s\S+', '通知单创建')
ntc_pmt = gg('通知单批准\s\S+', '通知单批准')
plan = gg('计划\s\S+', '计划')
ass= gg('待审\s\S+', '待审')
order= gg('下达\s\S+', '下达')
acpt_ck = gg('验收确认\s\S+', '验收确认')
fns_ck = gg('完工确认\s\S+', '完工确认')
fns_tm = gg('完工时间\s\S+', '完工时间')
printer = gg('打印者:\S+', '打印者')
prt_d = gg('打印日期:\d+-\d+-\d+', '打印日期')
wp_num = []
wk_ctr = []
ctr_code = []
wp_contts = []
cert = []
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
proc_set = re.findall('(^\d+)\s(\D+\d*)(\D+\d*)\s((\S*\d*\s*\.*)+)(\d+\.*\d*\D+)+\n', line)#426
if proc_set:# 工序号/工作中心/控制码/工序内容/签证
sets=list(proc_set[0])
wp_num.append(sets[0])
wk_ctr.append (sets[1])
ctr_code.append (sets[2])
wp_contts.append (sets[3])
cert.append (sets[5])
df['工序号']=pd.Series(wp_num)
df['工作中心']=pd.Series(wk_ctr)
df['控制码']=pd.Series(ctr_code)
df['工序内容']=pd.Series(wp_contts)
df['签证']=pd.Series(cert)
wp_num = []
mat_code = []
mat_descr = []
msr_unit = []
all_num = []
cert=[]
f.close()
f = open(pt + '\\' + filename, encoding='gbk')
for line in f:
mat_set = re.findall('(^\d+)\s(\d+)\s((\S*\s*)+)\s(\D)\s((\d\.*\d*\s*)+)\n', line) # 140
if mat_set: # 工序号/物料编码/物料描述/单位/数量确认/计划量/出库量/签证
sets = list(mat_set[0])
wp_num.append(sets[0])
mat_code.append(sets[1])
mat_descr.append(sets[2])
msr_unit.append(sets[4])
all_num.append(sets[5])
cert.append(sets[6])
df['工序号']=pd.Series(wp_num)
df['物料编码']=pd.Series(mat_code)
df['物料描述']=pd.Series(mat_descr)
df['单位']=pd.Series(msr_unit)
df['数量确认']=pd.Serie | s(all_num) | pandas.Series |
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
from __future__ import print_function, division
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas.api.types import is_integer_dtype
from librmm_cffi import librmm as rmm
from cudf.dataframe import columnops, datetime, string
from cudf.utils import cudautils, utils
from cudf.dataframe.buffer import Buffer
from cudf.comm.serialize import register_distributed_serializer
from cudf.bindings.nvtx import nvtx_range_push, nvtx_range_pop
from cudf.bindings.cudf_cpp import np_to_pa_dtype
from cudf._sort import get_sorted_inds
import cudf.bindings.reduce as cpp_reduce
import cudf.bindings.replace as cpp_replace
import cudf.bindings.binops as cpp_binops
import cudf.bindings.sort as cpp_sort
import cudf.bindings.unaryops as cpp_unaryops
import cudf.bindings.copying as cpp_copying
import cudf.bindings.hash as cpp_hash
from cudf.bindings.cudf_cpp import get_ctype_ptr
class NumericalColumn(columnops.TypedColumnBase):
def __init__(self, **kwargs):
"""
Parameters
----------
data : Buffer
The code values
mask : Buffer; optional
The validity mask
null_count : int; optional
The number of null values in the mask.
dtype : np.dtype
Data type
"""
super(NumericalColumn, self).__init__(**kwargs)
assert self._dtype == self._data.dtype
def replace(self, **kwargs):
if 'data' in kwargs and 'dtype' not in kwargs:
kwargs['dtype'] = kwargs['data'].dtype
return super(NumericalColumn, self).replace(**kwargs)
def serialize(self, serialize):
header, frames = super(NumericalColumn, self).serialize(serialize)
assert 'dtype' not in header
header['dtype'] = serialize(self._dtype)
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
data, mask = cls._deserialize_data_mask(deserialize, header, frames)
col = cls(data=data, mask=mask, null_count=header['null_count'],
dtype=deserialize(*header['dtype']))
return col
def binary_operator(self, binop, rhs, reflect=False):
if isinstance(rhs, NumericalColumn) or np.isscalar(rhs):
out_dtype = np.result_type(self.dtype, rhs.dtype)
return numeric_column_binop(
lhs=self,
rhs=rhs,
op=binop,
out_dtype=out_dtype,
reflect=reflect
)
else:
msg = "{!r} operator not supported between {} and {}"
raise TypeError(msg.format(binop, type(self), type(rhs)))
def unary_operator(self, unaryop):
return numeric_column_unaryop(self, op=unaryop,
out_dtype=self.dtype)
def unary_logic_op(self, unaryop):
return numeric_column_unaryop(self, op=unaryop,
out_dtype=np.bool_)
def unordered_compare(self, cmpop, rhs):
return numeric_column_compare(self, rhs, op=cmpop)
def ordered_compare(self, cmpop, rhs):
return numeric_column_compare(self, rhs, op=cmpop)
def _apply_scan_op(self, op):
out_col = columnops.column_empty_like_same_mask(self, dtype=self.dtype)
cpp_reduce.apply_scan(self, out_col, op, inclusive=True)
return out_col
def normalize_binop_value(self, other):
other_dtype = np.min_scalar_type(other)
if other_dtype.kind in 'biuf':
other_dtype = np.promote_types(self.dtype, other_dtype)
if np.isscalar(other):
other = np.dtype(other_dtype).type(other)
return other
else:
ary = utils.scalar_broadcast_to(
other,
shape=len(self),
dtype=other_dtype
)
return self.replace(data=Buffer(ary), dtype=ary.dtype)
else:
raise TypeError('cannot broadcast {}'.format(type(other)))
def astype(self, dtype):
if self.dtype == dtype:
return self
elif (dtype == np.dtype('object') or
np.issubdtype(dtype, np.dtype('U').type)):
if len(self) > 0:
if self.dtype in (np.dtype('int8'), np.dtype('int16')):
dev_array = self.astype('int32').data.mem
else:
dev_array = self.data.mem
dev_ptr = get_ctype_ptr(dev_array)
null_ptr = None
if self.mask is not None:
null_ptr = get_ctype_ptr(self.mask.mem)
kwargs = {
'count': len(self),
'nulls': null_ptr,
'bdevmem': True
}
data = string._numeric_to_str_typecast_functions[
np.dtype(dev_array.dtype)
](dev_ptr, **kwargs)
else:
data = []
return string.StringColumn(data=data)
elif np.issubdtype(dtype, np.datetime64):
return self.astype('int64').view(
datetime.DatetimeColumn,
dtype=dtype,
data=self.data.astype(dtype)
)
else:
col = self.replace(data=self.data.astype(dtype),
dtype=np.dtype(dtype))
return col
def sort_by_values(self, ascending=True, na_position="last"):
sort_inds = get_sorted_inds(self, ascending, na_position)
col_keys = cpp_copying.apply_gather_column(self, sort_inds.data.mem)
col_inds = self.replace(data=sort_inds.data,
mask=sort_inds.mask,
dtype=sort_inds.data.dtype)
return col_keys, col_inds
def to_pandas(self, index=None):
if self.null_count > 0 and self.dtype == np.bool:
# Boolean series in Pandas that contains None/NaN is of dtype
# `np.object`, which is not natively supported in GDF.
ret = self.astype(np.int8).to_array(fillna=-1)
ret = pd.Series(ret, index=index)
ret = ret.where(ret >= 0, other=None)
ret.replace(to_replace=1, value=True, inplace=True)
ret.replace(to_replace=0, value=False, inplace=True)
return ret
else:
return pd.Series(self.to_array(fillna='pandas'), index=index)
def to_arrow(self):
mask = None
if self.has_null_mask:
mask = pa.py_buffer(self.nullmask.mem.copy_to_host())
data = pa.py_buffer(self.data.mem.copy_to_host())
pa_dtype = np_to_pa_dtype(self.dtype)
out = pa.Array.from_buffers(
type=pa_dtype,
length=len(self),
buffers=[
mask,
data
],
null_count=self.null_count
)
if self.dtype == np.bool:
return out.cast(pa.bool_())
else:
return out
def _unique_segments(self):
""" Common code for unique, unique_count and value_counts"""
# make dense column
densecol = self.replace(data=self.to_dense_buffer(), mask=None)
# sort the column
sortcol, _ = densecol.sort_by_values(ascending=True)
# find segments
sortedvals = sortcol.to_gpu_array()
segs, begins = cudautils.find_segments(sortedvals)
return segs, sortedvals
def unique(self, method='sort'):
# method variable will indicate what algorithm to use to
# calculate unique, not used right now
if method != 'sort':
msg = 'non sort based unique() not implemented yet'
raise NotImplementedError(msg)
segs, sortedvals = self._unique_segments()
# gather result
out_col = cpp_copying.apply_gather_array(sortedvals, segs)
return out_col
def unique_count(self, method='sort', dropna=True):
if method != 'sort':
msg = 'non sort based unique_count() not implemented yet'
raise NotImplementedError(msg)
segs, _ = self._unique_segments()
if dropna is False and self.null_count > 0:
return len(segs)+1
return len(segs)
def value_counts(self, method='sort'):
if method != 'sort':
msg = 'non sort based value_count() not implemented yet'
raise NotImplementedError(msg)
segs, sortedvals = self._unique_segments()
# Return both values and their counts
out_vals = cpp_copying.apply_gather_array(sortedvals, segs)
out2 = cudautils.value_count(segs, len(sortedvals))
out_counts = NumericalColumn(data=Buffer(out2), dtype=np.intp)
return out_vals, out_counts
def all(self):
return bool(self.min(dtype=np.bool_))
def any(self):
if self.valid_count == 0:
return False
return bool(self.max(dtype=np.bool_))
def min(self, dtype=None):
return cpp_reduce.apply_reduce('min', self, dtype=dtype)
def max(self, dtype=None):
return cpp_reduce.apply_reduce('max', self, dtype=dtype)
def sum(self, dtype=None):
return cpp_reduce.apply_reduce('sum', self, dtype=dtype)
def product(self, dtype=None):
return cpp_reduce.apply_reduce('product', self, dtype=dtype)
def mean(self, dtype=np.float64):
return np.float64(self.sum(dtype=dtype)) / self.valid_count
def mean_var(self, ddof=1, dtype=np.float64):
mu = self.mean(dtype=dtype)
n = self.valid_count
asum = np.float64(self.sum_of_squares(dtype=dtype))
div = n - ddof
var = asum / div - (mu ** 2) * n / div
return mu, var
def sum_of_squares(self, dtype=None):
return cpp_reduce.apply_reduce('sum_of_squares', self, dtype=dtype)
def round(self, decimals=0):
mask = None
if self.has_null_mask:
mask = self.nullmask
rounded = cudautils.apply_round(self.data.mem, decimals)
return NumericalColumn(data=Buffer(rounded), mask=mask,
dtype=self.dtype)
def applymap(self, udf, out_dtype=None):
"""Apply a elemenwise function to transform the values in the Column.
Parameters
----------
udf : function
Wrapped by numba jit for call on the GPU as a device function.
out_dtype : numpy.dtype; optional
The dtype for use in the output.
By default, use the same dtype as *self.dtype*.
Returns
-------
result : Column
The mask is preserved.
"""
if out_dtype is None:
out_dtype = self.dtype
out = columnops.column_applymap(udf=udf, column=self,
out_dtype=out_dtype)
return self.replace(data=out, dtype=out_dtype)
def default_na_value(self):
"""Returns the default NA value for this column
"""
dkind = self.dtype.kind
if dkind == 'f':
return self.dtype.type(np.nan)
elif dkind in 'iu':
return -1
elif dkind == 'b':
return False
else:
raise TypeError(
"numeric column of {} has no NaN value".format(self.dtype))
def find_and_replace(self, to_replace, value):
"""
Return col with *to_replace* replaced with *value*.
"""
to_replace_col = columnops.as_column(to_replace)
value_col = columnops.as_column(value)
replaced = self.copy()
to_replace_col, value_col, replaced = numeric_normalize_types(
to_replace_col, value_col, replaced)
cpp_replace.replace(replaced, to_replace_col, value_col)
return replaced
def fillna(self, fill_value, inplace=False):
"""
Fill null values with *fill_value*
"""
if np.isscalar(fill_value):
# castsafely to the same dtype as self
fill_value_casted = self.dtype.type(fill_value)
if not np.isnan(fill_value) and (fill_value_casted != fill_value):
raise TypeError(
"Cannot safely cast non-equivalent {} to {}".format(
type(fill_value).__name__, self.dtype.name
)
)
fill_value = fill_value_casted
else:
fill_value = columnops.as_column(fill_value, nan_as_null=False)
# cast safely to the same dtype as self
if | is_integer_dtype(self.dtype) | pandas.api.types.is_integer_dtype |
# Just import the numpy missing values ## TODO SEE APPENDIX
from numpy import NaN, NAN, nan
print(NaN == True)
print(NaN == False)
print(NaN == 0)
print(NaN == '')
print(NaN == NaN)
print(NaN == nan)
print(NaN == NAN)
print(nan == NAN)
import pandas as pd
print(pd.isnull(NaN))
print(pd.isnull(nan))
print(pd.isnull(NAN))
print(pd.notnull(NaN))
print(pd.notnull(42))
print( | pd.notnull('missing') | pandas.notnull |
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from csv import reader
class Curve(object):
def __init__(self, numbers, lo_bd, up_bd):
self.numbers = numbers
self.up_bd = up_bd
self.lo_bd = lo_bd
self.steps = (up_bd-lo_bd) // numbers
self.filename_all = './Output_Curve'
self.seg_initial()
self.curve_initial()
self.output_initial_curve()
def seg_initial(self):
segments = []
for i in range(self.lo_bd, self.up_bd + self.steps, self.steps):
curr_step = i // self.steps
if i == self.lo_bd:
value = 50
self.intial_slope_set = value
else:
value = value - 0.02*self.steps #/10
#value = 50 - curr_step *0.4
#value = (100 - 2*i // self.steps)
segments.append([i, value])
self.segments = segments
def seg_update(self, point_1, point_2):
point_1_x = point_1[0]
point_1_y = point_1[1]
point_2_x = point_2[0]
point_2_y = point_2[1]
for i in range(self.numbers + 1):
curr = self.segments[i]
curr_x = curr[0]
curr_y = curr[1]
if curr_x <= point_1_x and curr_y <= point_1_y:
self.segments[i][1] = point_1_y
elif curr_x >= point_2_x and curr_y >= point_2_y:
self.segments[i][1] = point_2_y
self.curve_initial() #需要把point_X and point_Y更新下
print(self.segments)
def curve_initial(self):
df = | pd.DataFrame(self.segments, columns=['x','y']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import json
import os
import pandas as pd
import sklearn.datasets
def data(dataset="bio_eventrelated_100hz"):
"""Download example datasets.
Download and load available `example datasets <https://github.com/neuropsychology/NeuroKit/tree/master/data#datasets>`_.
Note that an internet connexion is necessary.
Parameters
----------
dataset : str
The name of the dataset. The list and description is
available `here <https://neurokit2.readthedocs.io/en/master/datasets.html#>`_.
Returns
-------
DataFrame
The data.
Examples
---------
>>> import neurokit2 as nk
>>>
>>> data = nk.data("bio_eventrelated_100hz")
"""
# TODO: one could further improve this function with like
# selectors 'ecg=True, eda=True, restingstate=True' that would
# find the most appropriate dataset
dataset = dataset.lower()
path = "https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/"
# Specific requests
if dataset == "iris":
data = sklearn.datasets.load_iris()
return pd.DataFrame(data.data, columns=data["feature_names"])
if dataset in ["eeg", "eeg.txt"]:
return pd.read_csv(path + "eeg.txt").values[:, 0]
# Add extension
if dataset in ["bio_resting_8min_200hz"]:
dataset += ".json"
# Specific case for json file
if dataset.endswith(".json"):
if "https" not in dataset:
data = pd.read_json(path + dataset, orient="index")
else:
data = pd.read_json(dataset, orient="index")
df = {}
for participant, row in data.iterrows():
for _, data_string in row.items():
data_list = json.loads(data_string)
data_pd = pd.DataFrame(data_list)
df[participant] = data_pd
return df
# General case
file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable
if ext == "":
if dataset not in ["rsp_200hz"]:
df = | pd.read_csv(path + dataset + ".csv") | pandas.read_csv |
'''
This is a Deep Neural Network Model to classify Rock and Mines.
Useful in Naval Mine Detection.
'''
# Importing essentials modules
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
# Model save path
model_path="saved_model/model.ckpt"
# Prepairing and Splitting dataset for training and validation
def prep_dataset(dataset):
print("----------DATASET DIMENSION---------")
print("\n")
print(dataset.shape)
print("\n")
print("----------DATASET SAMPLE-----------")
print("\n")
print(dataset.head(5))
print("\n")
x=dataset[dataset.columns[0:60]].values
y=dataset[dataset.columns[60]]
colors=['red','blue']
plt.title("Visualization of Rock(R) and Mine(M) labels in the dataset")
plt.xlabel("Number of detections")
plt.ylabel("Object")
plt.xlim(0,250)
y.value_counts().plot.barh(figsize=(15, 5), grid=True,color=colors)
plt.show()
encoder = LabelEncoder()
encoder.fit(y)
y=encoder.transform(y)
# one-hot encoding the classes
y=pd.get_dummies(y)
# Shuffling the data
x,y=shuffle(x,y,random_state=1)
# Splitting dataset
train_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.20)
return (train_x,test_x,train_y,test_y)
# Computational Graph
def neural_network_model(data):
hidden_layer_1={'weights':tf.Variable(tf.random_normal([n_cols,hl1_nodes])),'biases':tf.Variable(tf.random_normal([hl1_nodes]))}
hidden_layer_2={'weights':tf.Variable(tf.random_normal([hl1_nodes,hl2_nodes])),'biases':tf.Variable(tf.random_normal([hl2_nodes]))}
hidden_layer_3={'weights':tf.Variable(tf.random_normal([hl2_nodes,hl3_nodes])),'biases':tf.Variable(tf.random_normal([hl3_nodes]))}
hidden_layer_4={'weights':tf.Variable(tf.random_normal([hl3_nodes,hl4_nodes])),'biases':tf.Variable(tf.random_normal([hl4_nodes]))}
output_layer={'weights':tf.Variable(tf.random_normal([hl4_nodes,n_classes])),'biases':tf.Variable(tf.random_normal([n_classes]))}
l1=tf.add(tf.matmul(data,hidden_layer_1['weights']),hidden_layer_1['biases'])
# Activation function
tf.nn.relu(l1)
l2=tf.add(tf.matmul(l1,hidden_layer_2['weights']),hidden_layer_2['biases'])
# Activation function
tf.nn.relu(l2)
l3=tf.add(tf.matmul(l2,hidden_layer_3['weights']),hidden_layer_3['biases'])
# Activation function
tf.nn.relu(l3)
l4=tf.add(tf.matmul(l3,hidden_layer_4['weights']),hidden_layer_4['biases'])
# Activation function
tf.nn.relu(l4)
output=tf.add(tf.matmul(l4,output_layer['weights']),output_layer['biases'])
return output
def train_neural_network(X):
prediction = neural_network_model(X)
# Cost function
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=Y))
# Using Adam Optimizer
optimizer=tf.train.AdamOptimizer().minimize(cost)
# Creating a saver object
saver=tf.train.Saver()
# Starting session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_list.append(epoch)
sess.run(optimizer,feed_dict={X:x_train,Y:y_train})
cost_value=sess.run(cost,feed_dict={X:x_train,Y:y_train})
cost_list.append(cost_value)
correct_prediction=tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float64))
accuracy_value=sess.run(accuracy,feed_dict={X:x_train,Y:y_train})
train_accuracy_list.append(accuracy_value)
print("Epoch ",epoch," cost ",cost_value," Train Accuracy ",accuracy_value)
correct_prediction=tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1))
test_accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float64))
test_accuracy_value=sess.run(test_accuracy,feed_dict={X:x_test,Y:y_test})
print("Final Test Accuracy = ",test_accuracy_value*100," %")
save_path=saver.save(sess,model_path)
print("\n")
print("MODEL SAVED ALONG WITH WEIGHTS AND BIASES...")
# Visualizing Loss and training Accuracy
def visualize_cost_accuracy_graph():
plt.plot(epoch_list,cost_list,color='blue')
plt.title("Cost Function vs Epoch")
plt.xlabel("Epoch")
plt.ylabel("Cost Function")
plt.show()
plt.plot(epoch_list,train_accuracy_list,color='blue')
plt.title("Training Accuracy vs Epoch")
plt.xlabel("Epoch")
plt.ylabel("Training Accuracy")
plt.show()
# Reading Dataset
sonar_data= | pd.read_csv("sonar.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import functools
from sklearn.model_selection import train_test_split
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
df: pandas dataframe, output dataframe with joined generic drug name
'''
df = pd.merge(df, ndc_df, left_on='ndc_code', right_on='NDC_Code', how='inner')
df = df.rename(columns={'Non-proprietary Name':'generic_drug_name'})
return df
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
- first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
df.sort_values(['patient_nbr', 'encounter_id'], inplace=True)
first_encounter_df = df.groupby('patient_nbr').first().reset_index()
return first_encounter_df
#Question 6
def patient_dataset_splitter(df, predictor, patient_key='patient_nbr'):
'''
df: pandas dataframe, input dataset that will be split
patient_key: string, column that is the patient id
return:
- train: pandas dataframe,
- validation: pandas dataframe,
- test: pandas dataframe,
'''
X, y = df.drop(columns=[predictor]), df[predictor]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, stratify=y)
X_valid, X_test, y_valid, y_test = train_test_split(X_test, y_test, test_size=0.5, stratify=y_test)
train = pd.concat([X_train, y_train], axis=1)
validation = | pd.concat([X_valid, y_valid], axis=1) | pandas.concat |
""" Analyze MCMC output - chain length, etc. """
# Built-in libraries
import glob
import os
import pickle
# External libraries
import cartopy
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator
#from matplotlib.colors import Normalize
import matplotlib.colors as colors
import numpy as np
import pandas as pd
import pymc
from scipy import stats
from scipy.stats.kde import gaussian_kde
from scipy.stats import norm
from scipy.stats import truncnorm
from scipy.stats import uniform
from scipy.stats import linregress
from scipy.stats import lognorm
#from scipy.optimize import minimize
import xarray as xr
# Local libraries
import class_climate
import class_mbdata
import pygem_input as input
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_massbalance as massbalance
import pygemfxns_modelsetup as modelsetup
import run_calibration as calibration
#%%
# Paper figures
option_observation_vs_calibration = 0
option_papermcmc_prior_vs_posterior = 0
option_papermcmc_modelparameter_map_and_postvprior = 0
option_metrics_histogram_all = 0
option_metrics_vs_chainlength = 1
option_correlation_scatter = 0
option_regional_priors = 0
option_glacier_mb_vs_params = 0
option_papermcmc_hh2015_map = 0
# Others
option_glacier_mcmc_plots = 0
option_raw_plotchain = 0
option_convertcal2table = 0
option_plot_era_normalizedchange = 0
# Export option
mcmc_output_netcdf_fp_3chain = input.output_filepath + 'cal_opt2_spc_20190815_3chain/'
mcmc_output_netcdf_fp_all = input.output_filepath + 'cal_opt2_spc_20190806/'
hh2015_output_netcdf_fp_all = input.output_filepath + 'cal_opt3/cal_opt3/'
mcmc_output_figures_fp = input.output_filepath + 'figures/'
regions = [13,14,15]
#regions = [13]
cal_datasets = ['shean']
burn=1000
chainlength = 10000
# Bounds (80% bounds --> 90% above/below given threshold)
low_percentile = 10
high_percentile = 90
variables = ['massbal', 'precfactor', 'tempchange', 'ddfsnow']
vn_title_dict = {'massbal':'Mass Balance',
'precfactor':'$\mathregular{k_{p}}$',
'tempchange':'$\mathregular{T_{bias}}$',
'ddfsnow':'$\mathregular{f_{snow}}$'}
vn_abbreviations_wunits_dict = {
'massbal':'B (m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'$\mathregular{k_{p}}$ (-)',
'tempchange':'$\mathregular{T_{bias}}$ ($\mathregular{^{\circ}C}$)',
'ddfsnow':'$\mathregular{f_{snow}}$ (mm w.e. $\mathregular{d^{-1}}$ $\mathregular{^{\circ}C^{-1}}$)'}
vn_abbreviations_dict = {'massbal':'$\mathregular{B}$',
'precfactor':'$\mathregular{k_{p}}$',
'tempchange':'$\mathregular{T_{bias}}$',
'ddfsnow':'$\mathregular{f_{snow}}$'}
vn_title_wunits_dict = {'massbal':'Mass Balance (m w.e. $\mathregular{a^{-1}}$)',
'dif_masschange':'$\mathregular{B_{obs} - B_{mod}}$\n(m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'$\mathregular{k_{p}}$ (-)',
'tempchange':'$\mathregular{T_{bias}}$ ($\mathregular{^{\circ}C}$)',
'ddfsnow':'$\mathregular{f_{snow}}$ (mm w.e. $\mathregular{d^{-1}}$ $\mathregular{^{\circ}C^{-1}}$)'}
vn_title_noabbreviations_dict = {'massbal':'Mass Balance',
'precfactor':'Precipitation Factor',
'tempchange':'Temperature Bias',
'ddfsnow':'$\mathregular{f_{snow}}$'}
vn_label_dict = {'massbal':'Mass Balance (m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'Precipitation Factor (-)',
'tempchange':'Temperature Bias ($\mathregular{^{\circ}C}$)',
'ddfsnow':'f$_{snow}$ (mm w.e. $\mathregular{d^{-1}}$ $\mathregular{^{\circ}C^{-1}}$)',
'dif_masschange':'Mass Balance (Observation - Model, mwea)'}
vn_label_units_dict = {'massbal':'(m w.e. $\mathregular{a^{-1}}$)',
'precfactor':'(-)',
'tempchange':'($\mathregular{^{\circ}}$C)',
'ddfsnow':'(mm w.e. d$^{-1}$ $^\circ$C$^{-1}$)'}
metric_title_dict = {'Gelman-Rubin':'Gelman-Rubin Statistic',
'MC Error': 'Monte Carlo Error',
'Effective N': 'Effective Sample Size'}
metrics = ['Gelman-Rubin', 'MC Error', 'Effective N']
title_dict = {'Amu_Darya': 'Amu Darya',
'Brahmaputra': 'Brahmaputra',
'Ganges': 'Ganges',
'Ili': 'Ili',
'Indus': 'Indus',
'Inner_Tibetan_Plateau': 'Inner TP',
'Inner_Tibetan_Plateau_extended': 'Inner TP ext',
'Irrawaddy': 'Irrawaddy',
'Mekong': 'Mekong',
'Salween': 'Salween',
'Syr_Darya': 'Syr Darya',
'Tarim': 'Tarim',
'Yangtze': 'Yangtze',
'inner_TP': 'Inner TP',
'Karakoram': 'Karakoram',
'Yigong': 'Yigong',
'Yellow': 'Yellow',
'Bhutan': 'Bhutan',
'Everest': 'Everest',
'West Nepal': 'West Nepal',
'Spiti Lahaul': 'Spiti Lahaul',
'tien_shan': 'Tien Shan',
'Pamir': 'Pamir',
'pamir_alai': 'Pamir Alai',
'Kunlun': 'Kunlun',
'Hindu Kush': 'Hindu Kush',
13: 'Central Asia',
14: 'South Asia West',
15: 'South Asia East',
'all': 'HMA',
'Altun Shan':'Altun Shan',
'Central Himalaya':'C Himalaya',
'Central Tien Shan':'C Tien Shan',
'Dzhungarsky Alatau':'Dzhungarsky Alatau',
'Eastern Himalaya':'E Himalaya',
'Eastern Hindu Kush':'E Hindu Kush',
'Eastern Kunlun Shan':'E Kunlun Shan',
'Eastern Pamir':'E Pamir',
'Eastern Tibetan Mountains':'E Tibetan Mtns',
'Eastern Tien Shan':'E Tien Shan',
'Gangdise Mountains':'Gangdise Mtns',
'Hengduan Shan':'Hengduan Shan',
'Karakoram':'Karakoram',
'Northern/Western Tien Shan':'N/W Tien Shan',
'Nyainqentanglha':'Nyainqentanglha',
'Pamir Alay':'Pamir Alay',
'Qilian Shan':'Qilian Shan',
'Tanggula Shan':'Tanggula Shan',
'Tibetan Interior Mountains':'Tibetan Int Mtns',
'Western Himalaya':'W Himalaya',
'Western Kunlun Shan':'W Kunlun Shan',
'Western Pamir':'W Pamir'
}
#colors = ['#387ea0', '#fcb200', '#d20048']
linestyles = ['-', '--', ':']
# Group dictionaries
watershed_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_watershed.csv'
watershed_csv = pd.read_csv(watershed_dict_fn)
watershed_dict = dict(zip(watershed_csv.RGIId, watershed_csv.watershed))
kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_kaab.csv'
kaab_csv = pd.read_csv(kaab_dict_fn)
kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab_name))
himap_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_bolch.csv'
himap_csv = pd.read_csv(himap_dict_fn)
himap_dict = dict(zip(himap_csv.RGIId, himap_csv.bolch_name))
# Shapefiles
rgiO1_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/RGI/rgi60/00_rgi60_regions/00_rgi60_O1Regions.shp'
rgi_glac_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA.shp'
watershed_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/HMA_basins_20181018_4plot.shp'
kaab_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/kaab2015_regions.shp'
bolch_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/himap_regions/boundary_mountain_regions_hma_v3.shp'
srtm_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA.tif'
srtm_contour_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA_countours_2km_gt3000m_smooth.shp'
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
def load_glacierdata_byglacno(glac_no, option_loadhyps_climate=1, option_loadcal_data=1):
""" Load glacier data, climate data, and calibration data for list of glaciers
Parameters
----------
glac_no : list
list of glacier numbers (ex. ['13.0001', 15.00001'])
Returns
-------
main_glac_rgi, main_glac_hyps, main_glac_icethickness, main_glac_width, gcm_temp, gcm_prec, gcm_elev, gcm_lr,
cal_data, dates_table
"""
glac_no_byregion = {}
regions = [int(i.split('.')[0]) for i in glac_no]
regions = list(set(regions))
for region in regions:
glac_no_byregion[region] = []
for i in glac_no:
region = i.split('.')[0]
glac_no_only = i.split('.')[1]
glac_no_byregion[int(region)].append(glac_no_only)
for region in regions:
glac_no_byregion[region] = sorted(glac_no_byregion[region])
# EXCEPTION COULD BE ADDED HERE INSTEAD
# Load data for glaciers
dates_table_nospinup = modelsetup.datesmodelrun(startyear=input.startyear, endyear=input.endyear, spinupyears=0)
dates_table = modelsetup.datesmodelrun(startyear=input.startyear, endyear=input.endyear,
spinupyears=input.spinupyears)
count = 0
for region in regions:
count += 1
# ====== GLACIER data =====
if ((region == 13 and len(glac_no_byregion[region]) == 54429) or
(region == 14 and len(glac_no_byregion[region]) == 27988) or
(region == 15 and len(glac_no_byregion[region]) == 13119) ):
main_glac_rgi_region = modelsetup.selectglaciersrgitable(
rgi_regionsO1=[region], rgi_regionsO2 = 'all', rgi_glac_number='all')
else:
main_glac_rgi_region = modelsetup.selectglaciersrgitable(
rgi_regionsO1=[region], rgi_regionsO2 = 'all', rgi_glac_number=glac_no_byregion[region])
# Glacier hypsometry
main_glac_hyps_region = modelsetup.import_Husstable(
main_glac_rgi_region, input.hyps_filepath,input.hyps_filedict, input.hyps_colsdrop)
if option_loadcal_data == 1:
# ===== CALIBRATION DATA =====
cal_data_region = pd.DataFrame()
for dataset in cal_datasets:
cal_subset = class_mbdata.MBData(name=dataset)
cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi_region, main_glac_hyps_region,
dates_table_nospinup)
cal_data_region = cal_data_region.append(cal_subset_data, ignore_index=True)
cal_data_region = cal_data_region.sort_values(['glacno', 't1_idx'])
cal_data_region.reset_index(drop=True, inplace=True)
# ===== OTHER DATA =====
if option_loadhyps_climate == 1:
# Ice thickness [m], average
main_glac_icethickness_region = modelsetup.import_Husstable(
main_glac_rgi_region, input.thickness_filepath, input.thickness_filedict,
input.thickness_colsdrop)
main_glac_hyps_region[main_glac_icethickness_region == 0] = 0
# Width [km], average
main_glac_width_region = modelsetup.import_Husstable(
main_glac_rgi_region, input.width_filepath, input.width_filedict, input.width_colsdrop)
# ===== CLIMATE DATA =====
gcm = class_climate.GCM(name=input.ref_gcm_name)
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
gcm_temp_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.temp_fn, gcm.temp_vn, main_glac_rgi_region, dates_table_nospinup)
if input.option_ablation != 2 or input.ref_gcm_name not in ['ERA5']:
gcm_tempstd_region = np.zeros(gcm_temp_region.shape)
elif input.ref_gcm_name in ['ERA5']:
gcm_tempstd_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.tempstd_fn, gcm.tempstd_vn, main_glac_rgi_region, dates_table_nospinup)
gcm_prec_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.prec_fn, gcm.prec_vn, main_glac_rgi_region, dates_table_nospinup)
gcm_elev_region = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi_region)
# Lapse rate [K m-1]
gcm_lr_region, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(
gcm.lr_fn, gcm.lr_vn, main_glac_rgi_region, dates_table_nospinup)
# ===== APPEND DATASETS =====
if count == 1:
main_glac_rgi = main_glac_rgi_region
if option_loadcal_data == 1:
cal_data = cal_data_region
if option_loadhyps_climate == 1:
main_glac_hyps = main_glac_hyps_region
main_glac_icethickness = main_glac_icethickness_region
main_glac_width = main_glac_width_region
gcm_temp = gcm_temp_region
gcm_tempstd = gcm_tempstd_region
gcm_prec = gcm_prec_region
gcm_elev = gcm_elev_region
gcm_lr = gcm_lr_region
else:
main_glac_rgi = main_glac_rgi.append(main_glac_rgi_region)
if option_loadcal_data == 1:
cal_data = cal_data.append(cal_data_region)
if option_loadhyps_climate == 1:
# If more columns in region, then need to expand existing dataset
if main_glac_hyps_region.shape[1] > main_glac_hyps.shape[1]:
all_col = list(main_glac_hyps.columns.values)
reg_col = list(main_glac_hyps_region.columns.values)
new_cols = [item for item in reg_col if item not in all_col]
for new_col in new_cols:
main_glac_hyps[new_col] = 0
main_glac_icethickness[new_col] = 0
main_glac_width[new_col] = 0
elif main_glac_hyps_region.shape[1] < main_glac_hyps.shape[1]:
all_col = list(main_glac_hyps.columns.values)
reg_col = list(main_glac_hyps_region.columns.values)
new_cols = [item for item in all_col if item not in reg_col]
for new_col in new_cols:
main_glac_hyps_region[new_col] = 0
main_glac_icethickness_region[new_col] = 0
main_glac_width_region[new_col] = 0
main_glac_hyps = main_glac_hyps.append(main_glac_hyps_region)
main_glac_icethickness = main_glac_icethickness.append(main_glac_icethickness_region)
main_glac_width = main_glac_width.append(main_glac_width_region)
gcm_temp = np.vstack([gcm_temp, gcm_temp_region])
gcm_tempstd = np.vstack([gcm_tempstd, gcm_tempstd_region])
gcm_prec = np.vstack([gcm_prec, gcm_prec_region])
gcm_elev = np.concatenate([gcm_elev, gcm_elev_region])
gcm_lr = np.vstack([gcm_lr, gcm_lr_region])
# reset index
main_glac_rgi.reset_index(inplace=True, drop=True)
if option_loadcal_data == 1:
cal_data.reset_index(inplace=True, drop=True)
if option_loadhyps_climate == 1:
main_glac_hyps.reset_index(inplace=True, drop=True)
main_glac_icethickness.reset_index(inplace=True, drop=True)
main_glac_width.reset_index(inplace=True, drop=True)
if option_loadhyps_climate == 0 and option_loadcal_data == 0:
return main_glac_rgi
if option_loadhyps_climate == 0 and option_loadcal_data == 1:
return main_glac_rgi, cal_data
else:
return (main_glac_rgi, main_glac_hyps, main_glac_icethickness, main_glac_width,
gcm_temp, gcm_tempstd, gcm_prec, gcm_elev, gcm_lr,
cal_data, dates_table)
def select_groups(grouping, main_glac_rgi_all):
"""
Select groups based on grouping
"""
if grouping == 'rgi_region':
groups = main_glac_rgi_all.O1Region.unique().tolist()
group_cn = 'O1Region'
elif grouping == 'watershed':
groups = main_glac_rgi_all.watershed.unique().tolist()
group_cn = 'watershed'
elif grouping == 'kaab':
groups = main_glac_rgi_all.kaab.unique().tolist()
group_cn = 'kaab'
groups = [x for x in groups if str(x) != 'nan']
elif grouping == 'degree':
groups = main_glac_rgi_all.deg_id.unique().tolist()
group_cn = 'deg_id'
elif grouping == 'mascon':
groups = main_glac_rgi_all.mascon_idx.unique().tolist()
groups = [int(x) for x in groups]
group_cn = 'mascon_idx'
else:
groups = ['all']
group_cn = 'all_group'
try:
groups = sorted(groups, key=str.lower)
except:
groups = sorted(groups)
return groups, group_cn
def partition_groups(grouping, vn, main_glac_rgi_all, regional_calc='mean'):
"""Partition variable by each group
Parameters
----------
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
regional_calc : str
calculation used to compute region value (mean, sum, area_weighted_mean)
Output
------
groups : list
list of group names
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
ds_group = [[] for group in groups]
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = main_glac_rgi_all[vn].values[main_glac_rgi.index.values.tolist()]
if 'area_weighted' in regional_calc:
vn_glac_area = main_glac_rgi_all['Area'].values[main_glac_rgi.index.values.tolist()]
# Regional calc
if regional_calc == 'mean':
vn_reg = vn_glac.mean(axis=0)
elif regional_calc == 'sum':
vn_reg = vn_glac.sum(axis=0)
elif regional_calc == 'area_weighted_mean':
vn_reg = (vn_glac * vn_glac_area).sum() / vn_glac_area.sum()
# Record data for each group
ds_group[ngroup] = [group, vn_reg]
return groups, ds_group
def effective_n(ds, vn, iters, burn, chain=0):
"""
Compute the effective sample size of a trace.
Takes the trace and computes the effective sample size
according to its detrended autocorrelation.
Parameters
----------
ds : xarray.Dataset
dataset containing mcmc traces
vn : str
Parameter variable name
iters : int
number of mcmc iterations to test
burn : int
number of initial iterations to throw away
Returns
-------
effective_n : int
effective sample size
"""
# Effective sample size
x = ds['mp_value'].sel(chain=chain, mp=vn).values[burn:iters]
# detrend trace using mean to be consistent with statistics
# definition of autocorrelation
x = (x - x.mean())
# compute autocorrelation (note: only need second half since
# they are symmetric)
rho = np.correlate(x, x, mode='full')
rho = rho[len(rho)//2:]
# normalize the autocorrelation values
# note: rho[0] is the variance * n_samples, so this is consistent
# with the statistics definition of autocorrelation on wikipedia
# (dividing by n_samples gives you the expected value).
rho_norm = rho / rho[0]
# Iterate until sum of consecutive estimates of autocorrelation is
# negative to avoid issues with the sum being -0.5, which returns an
# effective_n of infinity
negative_autocorr = False
t = 1
n = len(x)
while not negative_autocorr and (t < n):
if not t % 2:
negative_autocorr = sum(rho_norm[t-1:t+1]) < 0
t += 1
return int(n / (1 + 2*rho_norm[1:t].sum()))
def gelman_rubin(ds, vn, iters=1000, burn=0, debug=False):
"""
Calculate Gelman-Rubin statistic.
Parameters
----------
ds : xarray.Dataset
Dataset containing MCMC iterations for a single glacier with 3 chains
vn : str
Parameter variable name
iters : int
number of MCMC iterations to test for the gelman-rubin statistic
burn : int
number of MCMC iterations to ignore at start of chain before performing test
Returns
-------
gelman_rubin_stat : float
gelman_rubin statistic (R_hat)
"""
if debug:
if len(ds.chain) != 3:
raise ValueError('Given dataset has an incorrect number of chains')
if iters > len(ds.chain):
raise ValueError('iters value too high')
if (burn >= iters):
raise ValueError('Given iters and burn in are incompatible')
# unpack iterations from dataset
for n_chain in ds.chain.values:
if n_chain == 0:
chain = ds['mp_value'].sel(chain=n_chain, mp=vn).values[burn:iters]
chain = np.reshape(chain, (1,len(chain)))
else:
chain2add = ds['mp_value'].sel(chain=n_chain, mp=vn).values[burn:iters]
chain2add = np.reshape(chain2add, (1,chain.shape[1]))
chain = np.append(chain, chain2add, axis=0)
#calculate statistics with pymc in-built function
return pymc.gelman_rubin(chain)
def mc_error(ds, vn, iters=None, burn=0, chain=None, method='overlapping'):
""" Calculates Monte Carlo standard error using the batch mean method for each chain
For multiple chains, it outputs a list of the values
Parameters
----------
ds : xarray.Dataset
Dataset containing MCMC iterations for a single glacier with 3 chains
vn : str
Parameter variable name
iters : int
Number of iterations to use
Returns
-------
chains_mcse : list of floats
list of the Monte Carlo standard error for each chain
chains_ci : list of floats
list of the +/- confidence interval value for each chain
"""
if iters is None:
iters = len(ds.mp_value)
trace = [ds['mp_value'].sel(chain=n_chain, mp=vn).values[burn:iters] for n_chain in ds.chain.values]
mcse_output = [mcse_batchmeans(i, method=method) for i in trace]
chains_mcse = [i[0] for i in mcse_output]
chains_ci = [i[1] for i in mcse_output]
return chains_mcse, chains_ci
def mcse_batchmeans(trace, t_quantile=0.95, method='overlapping'):
""" Calculates Monte Carlo standard error for a given trace using batch means method from Flegal and Jones (2010)
Splitting uses all values in trace, so batches can have different lengths (maximum difference is 1)
Parameters
----------
trace: np.ndarray
Array representing MCMC chain
t_quantile : float
student t-test quantile (default = 0.95)
method : str
method used to compute batch means (default = 'overlapping', other option is 'nonoverlapping')
Returns
-------
trace_mcse : float
Monte Carlo standard error for a given trace
trace_ci : float
+/- value for confidence interval
"""
# Number of batches (n**0.5 based on Flegal and Jones (2010))
batches = int(len(trace)**0.5)
batch_size = int(len(trace)/batches)
# Split into batches
if method == 'overlapping':
trace_batches = [trace[i:i+batch_size] for i in range(0,int(len(trace)-batches+1))]
elif method == 'nonoverlapping':
trace_batches = split_array(trace,batches)
# Sample batch means
trace_batches_means = [np.mean(i) for i in trace_batches]
# Batch mean estimator
trace_batches_mean = np.mean(trace_batches_means)
# Sample variance
if method == 'overlapping':
trace_samplevariance = (
(len(trace)/batches) / len(trace) * np.sum([(i - trace_batches_mean)**2 for i in trace_batches_means]))
elif method == 'nonoverlapping':
trace_samplevariance = (
(len(trace)/batches) / (batches-1) * np.sum([(i - trace_batches_mean)**2 for i in trace_batches_means]))
# Monte Carlo standard error
trace_mcse = trace_samplevariance**0.5 / len(trace)**0.5
# Confidence interval value (actual confidence interval is batch_mean_estimator +/- trace_ci)
trace_ci = stats.t.ppf(t_quantile, (len(trace)**0.5)-1) * trace_mcse
return trace_mcse, trace_ci
def split_array(arr, n=1):
"""
Split array of glaciers into batches for batch means.
Parameters
----------
arr : np.array
array that you want to split into separate batches
n : int
Number of batches to split glaciers into.
Returns
-------
arr_batches : np.array
list of n arrays that have sequential values in each list
"""
# If batches is more than list, the one in each list
if n > len(arr):
n = len(arr)
# number of values per list rounded down/up
n_perlist_low = int(len(arr)/n)
n_perlist_high = int(np.ceil(len(arr)/n))
# number of lists with higher number per list (uses all values of array, but chains not necessarily equal length)
n_lists_high = len(arr)%n
# loop through and select values
count = 0
arr_batches = []
for x in np.arange(n):
count += 1
if count <= n_lists_high:
arr_subset = arr[0:n_perlist_high]
arr_batches.append(arr_subset)
arr = arr[n_perlist_high:]
else:
arr_subset = arr[0:n_perlist_low]
arr_batches.append(arr_subset)
arr = arr[n_perlist_low:]
return arr_batches
def pickle_data(fn, data):
"""Pickle data
Parameters
----------
fn : str
filename including filepath
data : list, etc.
data to be pickled
Returns
-------
.pkl file
saves .pkl file of the data
"""
with open(fn, 'wb') as f:
pickle.dump(data, f)
def plot_hist(df, cn, bins, xlabel=None, ylabel=None, fig_fn='hist.png', fig_fp=input.output_filepath):
"""
Plot histogram for any bin size
"""
data = df[cn].values
hist, bin_edges = np.histogram(data,bins) # make the histogram
fig,ax = plt.subplots()
# Plot the histogram heights against integers on the x axis
ax.bar(range(len(hist)),hist,width=1, edgecolor='k')
# Set the ticks to the middle of the bars
ax.set_xticks([0.5+i for i,j in enumerate(hist)])
# Set the xticklabels to a string that tells us what the bin edges were
ax.set_xticklabels(['{} - {}'.format(bins[i],bins[i+1]) for i,j in enumerate(hist)], rotation=45, ha='right')
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
# Save figure
fig.set_size_inches(6,4)
fig.savefig(fig_fp + fig_fn, bbox_inches='tight', dpi=300)
def plot_mb_vs_parameters(tempchange_iters, precfactor_iters, ddfsnow_iters, modelparameters, glacier_rgi_table,
glacier_area_t0, icethickness_t0, width_t0, elev_bins, glacier_gcm_temp, glacier_gcm_tempstd,
glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
observed_massbal, observed_error, tempchange_boundhigh, tempchange_boundlow,
tempchange_opt_init=None, mb_max_acc=None, mb_max_loss=None, option_areaconstant=0,
option_plotsteps=1, fig_fp=input.output_filepath):
"""
Plot the mass balance [mwea] versus all model parameters to see how parameters effect mass balance
"""
#%%
mb_vs_parameters = pd.DataFrame(np.zeros((len(ddfsnow_iters) * len(precfactor_iters) * len(tempchange_iters), 4)),
columns=['precfactor', 'tempbias', 'ddfsnow', 'massbal'])
count=0
for n, precfactor in enumerate(precfactor_iters):
modelparameters[2] = precfactor
# run mass balance calculation
# if modelparameters[2] == 1:
# option_areaconstant = 0
# else:
# option_areaconstant = 1
option_areaconstant = 0
print('PF:', precfactor, 'option_areaconstant:', option_areaconstant)
for n, tempchange in enumerate(tempchange_iters):
modelparameters[7] = tempchange
for c, ddfsnow in enumerate(ddfsnow_iters):
modelparameters[4] = ddfsnow
modelparameters[5] = modelparameters[4] / input.ddfsnow_iceratio
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters[0:8], glacier_rgi_table, glacier_area_t0,
icethickness_t0, width_t0, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=option_areaconstant))
# Compute glacier volume change for every time step and use this to compute mass balance
# this will work for any indexing
glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# Mass change [km3 mwe]
# mb [mwea] * (1 km / 1000 m) * area [km2]
glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# Mean annual mass balance [mwea]
mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
(glac_wide_masschange.shape[0] / 12))
mb_vs_parameters.loc[count,:] = np.array([precfactor, tempchange, ddfsnow, mb_mwea])
count += 1
# print(modelparameters[2], modelparameters[7], modelparameters[4], np.round(mb_mwea,3))
# Subset data for each precfactor
linestyles = ['-', '--', ':', '-.']
linecolors = ['b', 'k', 'r']
prec_linedict = {precfactor : linestyles[n] for n, precfactor in enumerate(precfactor_iters)}
ddfsnow_colordict = {ddfsnow : linecolors[n] for n, ddfsnow in enumerate(ddfsnow_iters)}
#%%
# Plot the mass balance versus model parameters
fig, ax = plt.subplots(figsize=(6,4))
for precfactor in precfactor_iters:
modelparameters[2] = precfactor
mb_vs_parameters_subset = mb_vs_parameters.loc[mb_vs_parameters.loc[:,'precfactor'] == precfactor]
for ddfsnow in ddfsnow_iters:
mb_vs_parameters_plot = mb_vs_parameters_subset.loc[mb_vs_parameters_subset.loc[:,'ddfsnow'] == ddfsnow]
ax.plot(mb_vs_parameters_plot.loc[:,'tempbias'], mb_vs_parameters_plot.loc[:,'massbal'],
linestyle=prec_linedict[precfactor], color=ddfsnow_colordict[ddfsnow])
# Add horizontal line of mass balance observations
ax.axhline(observed_massbal, color='gray', linewidth=2, zorder=2)
observed_mb_min = observed_massbal - 3*observed_error
observed_mb_max = observed_massbal + 3*observed_error
fillcolor = 'lightgrey'
ax.fill_between([np.min(tempchange_iters), np.max(tempchange_iters)], observed_mb_min, observed_mb_max,
facecolor=fillcolor, label=None, zorder=1)
if option_plotsteps == 1:
# marker='*'
# marker_size = 20
marker='D'
marker_size = 10
markeredge_color = 'black'
marker_color = 'black'
txt_xadj = -0.1
txt_yadj = -0.06
xytxt_list = [(tempchange_boundhigh, mb_max_loss, '1'),
(tempchange_boundlow, mb_max_loss + 0.9*(mb_max_acc - mb_max_loss), '3'),
(tempchange_opt_init, observed_massbal, '4'),
(tempchange_opt_init + 3*tempchange_sigma, observed_mb_min, '6'),
(tempchange_opt_init - 3*tempchange_sigma, observed_mb_max, '6'),
(tempchange_opt_init - tempchange_sigma, observed_mb_max, '7'),
(tempchange_opt_init + tempchange_sigma, observed_mb_min, '7'),
(tempchange_mu, observed_massbal, '9')]
for xytxt in xytxt_list:
x,y,txt = xytxt[0], xytxt[1], xytxt[2]
ax.plot([x], [y], marker=marker, markersize=marker_size,
markeredgecolor=markeredge_color, color=marker_color, zorder=3)
ax.text(x+txt_xadj, y+txt_yadj, txt, zorder=4, color='white', fontsize=10)
ax.set_xlim(np.min(tempchange_iters), np.max(tempchange_iters))
if observed_massbal - 3*observed_error < mb_max_loss:
ylim_lower = observed_massbal - 3*observed_error
else:
ylim_lower = np.floor(mb_max_loss)
ax.set_ylim(int(ylim_lower),np.ceil(mb_vs_parameters['massbal'].max()))
print('\nMANUALLY SET YLIM\n')
ax.set_ylim(-2,2)
# Labels
# ax.set_title('Mass balance versus Parameters ' + glacier_str)
ax.set_xlabel('Temperature Bias ($\mathregular{^{\circ}}$C)', fontsize=12)
ax.set_ylabel('Mass Balance (m w.e. $\mathregular{a^{-1}}$)', fontsize=12)
# Add legend
x_min = mb_vs_parameters.loc[:,'tempbias'].min()
y_min = mb_vs_parameters.loc[:,'massbal'].min()
leg_lines = []
leg_names = []
for precfactor in precfactor_iters:
line = Line2D([x_min,y_min],[x_min,y_min], linestyle=prec_linedict[precfactor], color='gray')
leg_lines.append(line)
leg_names.append(str(precfactor))
leg_pf = ax.legend(leg_lines, leg_names, loc='upper right', title='$\mathit{k_{p}}$', frameon=False,
labelspacing=0.25, bbox_to_anchor=(0.99, 0.99))
leg_lines = []
leg_names = []
for ddfsnow in ddfsnow_iters:
line = Line2D([x_min,y_min],[x_min,y_min], linestyle='-', color=ddfsnow_colordict[ddfsnow])
leg_lines.append(line)
leg_names.append(str(np.round(ddfsnow*10**3,1)))
leg_ddf = ax.legend(leg_lines, leg_names, loc='upper left', title='$\mathit{f_{snow}}$', frameon=False,
labelspacing=0.25, bbox_to_anchor=(0.63, 0.99))
ax.add_artist(leg_pf)
# for precfactor in reversed(precfactor_iters):
# line = Line2D([x_min,y_min],[x_min,y_min], linestyle=prec_linedict[precfactor], color='gray')
# leg_lines.append(line)
# leg_names.append('$\mathregular{k_{p}}$ ' + str(precfactor))
# for ddfsnow in ddfsnow_iters:
# line = Line2D([x_min,y_min],[x_min,y_min], linestyle='-', color=ddfsnow_colordict[ddfsnow])
# leg_lines.append(line)
# leg_names.append('$\mathregular{f_{snow}}$ ' + str(np.round(ddfsnow*10**3,1)))
fig.savefig(fig_fp + glacier_str + '_mb_vs_parameters_areachg.eps',
bbox_inches='tight', dpi=300)
#%%
# ===== PLOT OPTIONS ==================================================================================================
def grid_values(vn, grouping, modelparams_all, midpt_value=np.nan):
""" XYZ of grid values """
# Group data
if vn in ['precfactor', 'tempchange', 'ddfsnow']:
groups, ds_vn_deg = partition_groups(grouping, vn, modelparams_all, regional_calc='area_weighted_mean')
groups, ds_group_area = partition_groups(grouping, 'Area', modelparams_all, regional_calc='sum')
elif vn == 'dif_masschange':
# Group calculations
groups, ds_group_cal = partition_groups(grouping, 'mb_cal_Gta', modelparams_all, regional_calc='sum')
groups, ds_group_era = partition_groups(grouping, 'mb_era_Gta', modelparams_all, regional_calc='sum')
groups, ds_group_area = partition_groups(grouping, 'Area', modelparams_all, regional_calc='sum')
# Group difference [Gt/yr]
dif_cal_era_Gta = (np.array([x[1] for x in ds_group_cal]) - np.array([x[1] for x in ds_group_era])).tolist()
# Group difference [mwea]
area = [x[1] for x in ds_group_area]
ds_group_dif_cal_era_mwea = [[x[0], dif_cal_era_Gta[n] / area[n] * 1000] for n, x in enumerate(ds_group_cal)]
ds_vn_deg = ds_group_dif_cal_era_mwea
z = [ds_vn_deg[ds_idx][1] for ds_idx in range(len(ds_vn_deg))]
x = np.array([x[0] for x in deg_groups])
y = np.array([x[1] for x in deg_groups])
lons = np.arange(x.min(), x.max() + 2 * degree_size, degree_size)
lats = np.arange(y.min(), y.max() + 2 * degree_size, degree_size)
x_adj = np.arange(x.min(), x.max() + 1 * degree_size, degree_size) - x.min()
y_adj = np.arange(y.min(), y.max() + 1 * degree_size, degree_size) - y.min()
z_array = np.zeros((len(y_adj), len(x_adj)))
z_array[z_array==0] = np.nan
for i in range(len(z)):
row_idx = int((y[i] - y.min()) / degree_size)
col_idx = int((x[i] - x.min()) / degree_size)
z_array[row_idx, col_idx] = z[i]
return lons, lats, z_array
def plot_spatialmap_mbdif(vns, grouping, modelparams_all, xlabel, ylabel, figure_fp, fig_fn_prefix='',
option_contour_lines=0, option_rgi_outlines=0, option_group_regions=0):
"""Plot spatial map of model parameters"""
#%%
fig = plt.figure()
# Custom subplots
gs = mpl.gridspec.GridSpec(20, 1)
ax1 = plt.subplot(gs[0:11,0], projection=cartopy.crs.PlateCarree())
ax2 = plt.subplot(gs[12:20,0])
# # Third subplot
# gs = mpl.gridspec.GridSpec(20, 20)
# ax1 = plt.subplot(gs[0:11,0:20], projection=cartopy.crs.PlateCarree())
# ax2 = plt.subplot(gs[12:20,0:7])
# ax2 = plt.subplot(gs[12:20,13:20])
cmap = 'RdYlBu_r'
# cmap = plt.cm.get_cmap(cmap, 5)
norm = plt.Normalize(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1])
vn = 'dif_masschange'
lons, lats, z_array = grid_values(vn, grouping, modelparams_all)
ax1.pcolormesh(lons, lats, z_array, cmap=cmap, norm=norm, zorder=2, alpha=0.8)
# Add country borders for reference
# ax1.add_feature(cartopy.feature.BORDERS, facecolor='none', edgecolor='lightgrey', zorder=10)
# ax1.add_feature(cartopy.feature.COASTLINE, facecolor='none', edgecolor='lightgrey', zorder=10)
# Set the extent
ax1.set_extent([east, west, south, north], cartopy.crs.PlateCarree())
# Label title, x, and y axes
ax1.set_xticks(np.arange(east,west+1,xtick), cartopy.crs.PlateCarree())
ax1.set_yticks(np.arange(south,north+1,ytick), cartopy.crs.PlateCarree())
ax1.set_xlabel(xlabel, size=labelsize, labelpad=0)
ax1.set_ylabel(ylabel, size=labelsize)
# Add colorbar
# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm._A = []
# cbar = plt.colorbar(sm, ax=ax1, fraction=0.04, pad=0.01)
# cbar.set_ticks(list(np.arange(colorbar_dict[vn][0], colorbar_dict[vn][1] + 0.01, 0.1)))
# fig.text(1.01, 0.6, '$\mathregular{B_{mod} - B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', va='center',
# rotation='vertical', size=12)
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.92, 0.5, 0.02, 0.35])
cbar = fig.colorbar(sm, cax=cbar_ax)
cbar.set_ticks(list(np.arange(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1] + 0.01, 0.1)))
fig.text(1.04, 0.67, '$\mathit{B_{mod}} - \mathit{B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', va='center',
rotation='vertical', size=12)
# Add contour lines and/or rgi outlines
if option_contour_lines == 1:
srtm_contour_shp = cartopy.io.shapereader.Reader(srtm_contour_fn)
srtm_contour_feature = cartopy.feature.ShapelyFeature(srtm_contour_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none', linewidth=0.05)
ax1.add_feature(srtm_contour_feature, zorder=9)
if option_rgi_outlines == 1:
rgi_shp = cartopy.io.shapereader.Reader(rgi_glac_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='black', facecolor='none', linewidth=0.1)
ax1.add_feature(rgi_feature, zorder=9)
if option_group_regions == 1:
rgi_shp = cartopy.io.shapereader.Reader(bolch_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none', linewidth=1)
ax1.add_feature(rgi_feature, zorder=9)
ax1.text(101., 28.0, 'Hengduan\nShan', zorder=10, size=8, va='center', ha='center')
ax1.text(99.0, 26.5, 'Nyainqentanglha', zorder=10, size=8, va='center', ha='center')
ax1.plot([98,96], [27,29.3], color='k', linewidth=0.25, zorder=10)
ax1.text(93.0, 27.5, 'Eastern Himalaya', zorder=10, size=8, va='center', ha='center')
ax1.text(80.0, 27.3, 'Central Himalaya', zorder=10, size=8, va='center', ha='center')
ax1.text(72.0, 31.7, 'Western Himalaya', zorder=10, size=8, va='center', ha='center')
ax1.text(70.5, 33.7, 'Eastern\nHindu Kush', zorder=10, size=8, va='center', ha='center')
ax1.text(79.0, 39.7, 'Karakoram', zorder=10, size=8, va='center', ha='center')
ax1.plot([76,78], [36,39], color='k', linewidth=0.25, zorder=10)
ax1.text(80.7, 38.0, 'Western\nKunlun Shan', zorder=10, size=8, va='center', ha='center')
ax1.text(86.0, 33.7, 'Tibetan Interior\nMountains', zorder=10, size=8, va='center', ha='center')
ax1.text(73.0, 29.0, 'Gandise Mountains', zorder=10, size=8, va='center', ha='center')
ax1.plot([77.5,81.5], [29,31.4], color='k', linewidth=0.25, zorder=10)
# Scatter plot
# # Scatterplot: Model vs. Observed Mass balance colored by Area
# cmap = 'RdYlBu_r'
# norm = colors.LogNorm(vmin=0.1, vmax=10)
# a = ax2.scatter(modelparams_all['mb_mwea'], modelparams_all['mb_mean'], c=modelparams_all['Area'],
# cmap=cmap, norm=norm, s=20, linewidth=0.5)
# a.set_facecolor('none')
# ax2.plot([-2.5,2],[-2.5,2], color='k', linewidth=0.5)
# ax2.set_xlim([-2.5,1.75])
# ax2.set_ylim([-2.5,1.75])
# ax2.set_ylabel('$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
# ax2.set_xlabel('$\mathregular{B_{mod}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
## # Add colorbar
## sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
## sm._A = []
## cbar = plt.colorbar(sm, ax=ax2, fraction=0.04, pad=0.01)
## fig.text(1.01, 0.5, 'Area ($\mathregular{km^{2}}$)', va='center', rotation='vertical', size=12)
#
# # Add colorbar
# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm._A = []
# cbar_ax = fig.add_axes([0.92, 0.13, 0.02, 0.29])
# cbar = fig.colorbar(sm, cax=cbar_ax)
## cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.5)))
# fig.text(1.04, 0.28, 'Area ($\mathregular{km^{2}}$)', va='center', rotation='vertical', size=12)
# Z-score
ax2.axhline(y=0, xmin=0, xmax=200, color='black', linewidth=0.5, zorder=1)
# ax2.scatter(modelparams_all['Area'], modelparams_all['mb_mwea'], c=modelparams_all['dif_cal_era_mean'],
# cmap=cmap, norm=norm, s=5)
# ax2.set_xlim([0,200])
# ax2.set_ylim([-2.9,1.25])
# ax2.set_ylabel('$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
# ax2.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
#
# # Inset axis over main axis
# ax_inset = plt.axes([.37, 0.16, .51, .14])
# ax_inset.axhline(y=0, xmin=0, xmax=5, color='black', linewidth=0.5)
# ax_inset.scatter(modelparams_all['Area'], modelparams_all['mb_mwea'], c=modelparams_all['dif_cal_era_mean'],
# cmap=cmap, norm=norm, s=3)
# ax_inset.set_xlim([0,5])
#
# # Add colorbar
# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# sm._A = []
# fig.subplots_adjust(right=0.9)
# cbar_ax = fig.add_axes([0.92, 0.16, 0.03, 0.67])
# cbar = fig.colorbar(sm, cax=cbar_ax)
# cbar.set_ticks(list(np.arange(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1] + 0.01, 0.1)))
# fig.text(1.04, 0.5, '$\mathregular{B_{mod} - B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', va='center',
# rotation='vertical', size=12)
# Scatterplot
cmap = 'RdYlBu'
# cmap = plt.cm.get_cmap(cmap, 5)
# norm = plt.Normalize(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1])
norm = MidpointNormalize(midpoint=0, vmin=colorbar_dict['massbal'][0], vmax=colorbar_dict['massbal'][1])
a = ax2.scatter(modelparams_all['Area'], modelparams_all['zscore'], c=modelparams_all['mb_mwea'],
cmap=cmap, norm=norm, s=20, linewidth=0.5, zorder=2)
a.set_facecolor('none')
ax2.set_xlim([0,200])
ax2.set_ylim([-3.8,2.5])
# ax2.set_ylabel('z-score ($\\frac{B_{mod} - B_{obs}}{B_{std}}$)', size=12)
ax2.set_ylabel('z-score (-)', size=12)
ax2.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
# Inset axis over main axis
ax_inset = plt.axes([.37, 0.16, .51, .12])
b = ax_inset.scatter(modelparams_all['Area'], modelparams_all['zscore'], c=modelparams_all['mb_mwea'],
cmap=cmap, norm=norm, s=10,linewidth=0.5)
b.set_facecolor('none')
ax_inset.set_xlim([0,5])
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar_ax = fig.add_axes([0.92, 0.13, 0.02, 0.29])
cbar = fig.colorbar(sm, cax=cbar_ax)
cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.5)))
fig.text(1.04, 0.28, '$\mathit{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', va='center',
rotation='vertical', size=12)
# cbar = plt.colorbar(sm, ax=ax2, fraction=0.04, pad=0.01)
# cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.5)))
# fig.text(1.01, 0.3, '$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', va='center',
# rotation='vertical', size=12)
# Add subplot labels
fig.text(0.15, 0.83, 'A', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.15, 0.40, 'B', zorder=4, color='black', fontsize=12, fontweight='bold')
# Save figure
fig.set_size_inches(6,7)
if degree_size < 1:
degsize_name = 'pt' + str(int(degree_size * 100))
else:
degsize_name = str(degree_size)
fig_fn = fig_fn_prefix + 'MB_dif_map_scatter_' + degsize_name + 'deg.png'
fig.savefig(figure_fp + fig_fn, bbox_inches='tight', dpi=300)
#%%
def plot_spatialmap_parameters(vns, grouping, modelparams_all, xlabel, ylabel, midpt_dict, cmap_dict, title_adj,
figure_fp, fig_fn_prefix='', option_contour_lines=0, option_rgi_outlines=0,
option_group_regions=0):
"""Plot spatial map of model parameters"""
fig, ax = plt.subplots(len(vns), 1, subplot_kw={'projection':cartopy.crs.PlateCarree()},
gridspec_kw = {'wspace':0.1, 'hspace':0.03})
for nvar, vn in enumerate(vns):
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
cmap = cmap_dict[vn]
norm = MidpointNormalize(midpoint=midpt_dict[vn], vmin=colorbar_dict[vn][0], vmax=colorbar_dict[vn][1])
lons, lats, z_array = grid_values(vn, grouping, modelparams_all)
if len(vns) > 1:
ax[nvar].pcolormesh(lons, lats, z_array, cmap=cmap, norm=norm, zorder=2, alpha=0.8)
else:
ax.pcolormesh(lons, lats, z_array, cmap=cmap, norm=norm, zorder=2, alpha=0.8)
# Set the extent
ax[nvar].set_extent([east, west, south, north], cartopy.crs.PlateCarree())
# Label title, x, and y axes
ax[nvar].set_xticks(np.arange(east,west+1,xtick), cartopy.crs.PlateCarree())
ax[nvar].set_yticks(np.arange(south,north+1,ytick), cartopy.crs.PlateCarree())
if nvar + 1 == len(vns):
ax[nvar].set_xlabel(xlabel, size=labelsize, labelpad=0)
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar = plt.colorbar(sm, ax=ax[nvar], fraction=0.03, pad=0.01)
# Set tick marks manually
if vn == 'dif_masschange':
cbar.set_ticks(list(np.arange(colorbar_dict[vn][0], colorbar_dict[vn][1] + 0.01, 0.05)))
elif vn == 'tempchange':
cbar.set_ticks(list(np.arange(colorbar_dict[vn][0], colorbar_dict[vn][1] + 0.01, 0.5))[1:-1])
ax[nvar].text(lons.max()+title_adj[vn], lats.mean(), vn_title_wunits_dict[vn], va='center', ha='center',
rotation='vertical', size=labelsize)
if option_group_regions == 1:
rgi_shp = cartopy.io.shapereader.Reader(bolch_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none', linewidth=1)
ax[nvar].add_feature(rgi_feature, zorder=9)
ax[nvar].text(101., 28.0, 'Hengduan\nShan', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(99.0, 26.5, 'Nyainqentanglha', zorder=10, size=8, va='center', ha='center')
ax[nvar].plot([98,96], [27,29.3], color='k', linewidth=0.25, zorder=10)
ax[nvar].text(93.0, 27.5, 'Eastern Himalaya', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(80.0, 27.3, 'Central Himalaya', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(72.0, 31.7, 'Western Himalaya', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(70.5, 33.7, 'Eastern\nHindu Kush', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(79.0, 39.7, 'Karakoram', zorder=10, size=8, va='center', ha='center')
ax[nvar].plot([76,78], [36,39], color='k', linewidth=0.25, zorder=10)
ax[nvar].text(80.7, 38.0, 'Western\nKunlun Shan', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(86.0, 33.7, 'Tibetan Interior\nMountains', zorder=10, size=8, va='center', ha='center')
ax[nvar].text(73.0, 29.0, 'Gandise Mountains', zorder=10, size=8, va='center', ha='center')
ax[nvar].plot([77.5,81.5], [29,31.4], color='k', linewidth=0.25, zorder=10)
else:
# Add country borders for reference
ax[nvar].add_feature(cartopy.feature.BORDERS, facecolor='none', edgecolor='lightgrey', zorder=10)
ax[nvar].add_feature(cartopy.feature.COASTLINE, facecolor='none', edgecolor='lightgrey', zorder=10)
# Add contour lines and/or rgi outlines
if option_contour_lines == 1:
srtm_contour_shp = cartopy.io.shapereader.Reader(srtm_contour_fn)
srtm_contour_feature = cartopy.feature.ShapelyFeature(srtm_contour_shp.geometries(),
cartopy.crs.PlateCarree(),
edgecolor='lightgrey', facecolor='none',
linewidth=0.05)
ax[nvar].add_feature(srtm_contour_feature, zorder=9)
if option_rgi_outlines == 1:
rgi_shp = cartopy.io.shapereader.Reader(rgi_glac_shp_fn)
rgi_feature = cartopy.feature.ShapelyFeature(rgi_shp.geometries(), cartopy.crs.PlateCarree(),
edgecolor='black', facecolor='none', linewidth=0.1)
ax[nvar].add_feature(rgi_feature, zorder=9)
# Add subplot labels
if len(vns) == 3:
fig.text(0.21, 0.86, 'A', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.21, 0.605, 'B', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.21, 0.35, 'C', zorder=4, color='black', fontsize=12, fontweight='bold')
elif len(vns) == 2:
fig.text(0.21, 0.85, 'A', zorder=4, color='black', fontsize=12, fontweight='bold')
fig.text(0.21, 0.46, 'B', zorder=4, color='black', fontsize=12, fontweight='bold')
if len(vns) > 1:
fig.text(0.1, 0.5, ylabel, va='center', rotation='vertical', size=12)
# Save figure
fig.set_size_inches(6,3*len(vns))
if degree_size < 1:
degsize_name = 'pt' + str(int(degree_size * 100))
else:
degsize_name = str(degree_size)
fig_fn = fig_fn_prefix + 'mp_maps_' + degsize_name + 'deg_' + str(len(vns)) + 'params.png'
fig.savefig(figure_fp + fig_fn, bbox_inches='tight', dpi=300)
#%%
def observation_vs_calibration(regions, netcdf_fp, chainlength=chainlength, burn=0, chain_no=0, netcdf_fn=None):
"""
Compare mass balance observations with model calibration
Parameters
----------
regions : list of strings
list of regions
chainlength : int
chain length
burn : int
burn-in number
Returns
-------
.png files
saves histogram of differences between observations and calibration
.csv file
saves .csv file of comparison
"""
#%%
#for batman in [0]:
# netcdf_fp = mcmc_output_netcdf_fp_all
# chain_no = 0
csv_fp = netcdf_fp + 'csv/'
fig_fp = netcdf_fp + 'figures/'
# Load mean of all model parameters
if os.path.isfile(csv_fp + netcdf_fn) == False:
filelist = []
for region in regions:
filelist.extend(glob.glob(netcdf_fp + str(region) + '*.nc'))
glac_no = []
reg_no = []
for netcdf in filelist:
glac_str = netcdf.split('/')[-1].split('.nc')[0]
glac_no.append(glac_str)
reg_no.append(glac_str.split('.')[0])
glac_no = sorted(glac_no)
(main_glac_rgi, main_glac_hyps, main_glac_icethickness, main_glac_width,
gcm_temp, gcm_tempstd, gcm_prec, gcm_elev, gcm_lr, cal_data, dates_table) = load_glacierdata_byglacno(glac_no)
posterior_cns = ['glacno', 'mb_mean', 'mb_std', 'pf_mean', 'pf_std', 'tc_mean', 'tc_std', 'ddfsnow_mean',
'ddfsnow_std']
posterior_all = pd.DataFrame(np.zeros((main_glac_rgi.shape[0], len(posterior_cns))), columns=posterior_cns)
print('burn:', burn, 'chain length:', chainlength)
for n, glac_str_wRGI in enumerate(main_glac_rgi['RGIId'].values):
if n%500 == 0:
print(n, glac_str_wRGI)
# Glacier string
glacier_str = glac_str_wRGI.split('-')[1]
# MCMC Analysis
ds = xr.open_dataset(netcdf_fp + glacier_str + '.nc')
df = pd.DataFrame(ds['mp_value'].values[burn:chainlength,:,0], columns=ds.mp.values)
# Posteriors
posterior_row = [glacier_str,
df.massbal.mean(), df.massbal.std(),
df.precfactor.mean(), df.precfactor.std(),
df.tempchange.mean(), df.tempchange.std(),
df.ddfsnow.mean(), df.ddfsnow.std()]
posterior_all.loc[n,:] = posterior_row
ds.close()
modelparams_all = main_glac_rgi[['RGIId', 'CenLon', 'CenLat', 'O1Region', 'Area', 'RefDate', 'glacno',
'RGIId_float']]
modelparams_all = pd.concat([modelparams_all, cal_data[['mb_mwe', 'mb_mwe_err', 't1', 't2', 'area_km2']]],
axis=1)
modelparams_all['mb_mwea'] = cal_data['mb_mwe'] / (cal_data['t2'] - cal_data['t1'])
modelparams_all['mb_mwea_err'] = cal_data['mb_mwe_err'] / (cal_data['t2'] - cal_data['t1'])
modelparams_all = pd.concat([modelparams_all, posterior_all], axis=1)
# Add region and priors
modelparams_all['Region'] = modelparams_all.RGIId.map(input.reg_dict)
# Priors
# precipitation factor
precfactor_alpha_dict = {region: input.precfactor_gamma_region_dict[region][0]
for region in list(input.precfactor_gamma_region_dict.keys())}
precfactor_beta_dict = {region: input.precfactor_gamma_region_dict[region][1]
for region in list(input.precfactor_gamma_region_dict.keys())}
modelparams_all['prior_pf_alpha'] = modelparams_all.Region.map(precfactor_alpha_dict)
modelparams_all['prior_pf_beta'] = modelparams_all.Region.map(precfactor_beta_dict)
modelparams_all['prior_pf_mu'] = modelparams_all['prior_pf_alpha'] / modelparams_all['prior_pf_beta']
modelparams_all['prior_pf_std'] = (modelparams_all['prior_pf_alpha'] / modelparams_all['prior_pf_beta']**2)**0.5
# temperature change
tempchange_mu_dict = {region: input.tempchange_norm_region_dict[region][0]
for region in list(input.tempchange_norm_region_dict.keys())}
tempchange_std_dict = {region: input.tempchange_norm_region_dict[region][1]
for region in list(input.tempchange_norm_region_dict.keys())}
modelparams_all['prior_tc_mu'] = modelparams_all.Region.map(tempchange_mu_dict)
modelparams_all['prior_tc_std'] = modelparams_all.Region.map(tempchange_std_dict)
# degree-day factor of snow
modelparams_all['prior_ddfsnow_mu'] = input.ddfsnow_mu * 1000
modelparams_all['prior_ddfsnow_std'] = input.ddfsnow_sigma * 1000
if os.path.exists(csv_fp) == False:
os.makedirs(csv_fp)
modelparams_all.to_csv(csv_fp + netcdf_fn, index=False)
else:
modelparams_all = pd.read_csv(csv_fp + netcdf_fn)
#%%
# Change column names to enable use of existing scripts
modelparams_all['obs_mwea'] = modelparams_all['mb_mwea']
modelparams_all['obs_mwea_std'] = modelparams_all['mb_mwea_err']
modelparams_all['mod_mwea'] = modelparams_all['mb_mean']
modelparams_all['mod_mwea_std'] = modelparams_all['mb_std']
modelparams_all['Area_km2'] = modelparams_all['Area']
mb_compare = modelparams_all.copy()
# # Mass balance comparison: observations and model
# mb_compare_cols = ['glacno', 'obs_mwea', 'obs_mwea_std', 'mod_mwea', 'mod_mwea_std', 'dif_mwea']
# mb_compare = pd.DataFrame(np.zeros((len(glac_no), len(mb_compare_cols))), columns=mb_compare_cols)
# mb_compare['glacno'] = glac_no
# mb_compare['obs_mwea'] = cal_data['mb_mwe'] / (cal_data['t2'] - cal_data['t1'])
# mb_compare['obs_mwea_std'] = cal_data['mb_mwe_err'] / (cal_data['t2'] - cal_data['t1'])
# for nglac, glac in enumerate(glac_no):
# # open dataset
# if nglac%500 == 0:
# print(nglac, glac)
# ds = xr.open_dataset(netcdf_fp + glac + '.nc')
# mb_all = ds['mp_value'].sel(chain=chain_no, mp='massbal').values[burn:chainlength]
# mb_compare.loc[nglac, 'mod_mwea'] = np.mean(mb_all)
# mb_compare.loc[nglac, 'mod_mwea_std'] = np.std(mb_all)
# # close dataset
# ds.close()
#%%
mb_compare['dif_mwea'] = mb_compare['mod_mwea'] - mb_compare['obs_mwea']
mb_compare['dif_zscore'] = (mb_compare['mod_mwea'] - mb_compare['obs_mwea']) / mb_compare['obs_mwea_std']
# mb_compare['Area_km2'] = main_glac_rgi['Area']
# mb_compare['Zmin'] = main_glac_rgi['Zmin']
# mb_compare['Zmax'] = main_glac_rgi['Zmax']
# mb_compare['Zmed'] = main_glac_rgi['Zmed']
mb_compare['obs_Gta'] = mb_compare['obs_mwea'] / 1000 * mb_compare['Area_km2']
mb_compare['obs_Gta_std'] = mb_compare['obs_mwea_std'] / 1000 * mb_compare['Area_km2']
mb_compare['mod_Gta'] = mb_compare['mod_mwea'] / 1000 * mb_compare['Area_km2']
mb_compare['mod_Gta_std'] = mb_compare['mod_mwea_std'] / 1000 * mb_compare['Area_km2']
print('Observed MB [Gt/yr]:', np.round(mb_compare.obs_Gta.sum(),2),
'(+/-', np.round(mb_compare.obs_Gta_std.sum(),2),')',
'\nModeled MB [Gt/yr]:', np.round(mb_compare.mod_Gta.sum(),2),
'(+/-', np.round(mb_compare.mod_Gta_std.sum(),2),')'
)
#%%
mb_compare['abs_zscore'] = np.absolute(mb_compare['dif_zscore'])
mb_compare_gt1zscore = mb_compare.loc[mb_compare.abs_zscore > 1]
mb_compare_gt5km2 = mb_compare.loc[mb_compare.Area_km2 > 5]
mb_compare_gt5km2_gt1zscore = mb_compare_gt5km2.loc[mb_compare_gt5km2.abs_zscore > 1]
print('Glaciers > 5km2', mb_compare_gt5km2.shape[0], 'w zscore > 1', mb_compare_gt5km2_gt1zscore.shape[0])
mb_compare_lt5km2 = mb_compare.loc[mb_compare.Area_km2 <= 5]
mb_compare_lt5km2_gt1zscore = mb_compare_lt5km2.loc[mb_compare_lt5km2.abs_zscore > 1]
print('Glaciers > 5km2', mb_compare_lt5km2.shape[0], 'w zscore > 1', mb_compare_lt5km2_gt1zscore.shape[0],
'(' + str(np.round(mb_compare_lt5km2_gt1zscore.shape[0] / mb_compare_lt5km2.shape[0] *100, 0)) + '%)',)
print('Number of glaciers with zscore > 1:', mb_compare_gt1zscore.shape[0], '(' +
str(np.round(mb_compare_gt1zscore.shape[0] / mb_compare.shape[0] *100, 0)) + '%)', '(' +
str(np.round(mb_compare_gt1zscore['Area_km2'].sum() / mb_compare['Area_km2'].sum() *100, 2)) + '% of area)')
# #%%
# # Calculate number of glaciers where observed mass balance < maximum mass loss
# glac_no = [x.split('-')[1] for x in mb_compare['RGIId'].values]
# (main_glac_rgi, main_glac_hyps, main_glac_icethickness, main_glac_width,
# gcm_temp, gcm_tempstd, gcm_prec, gcm_elev, gcm_lr, cal_data, dates_table) = load_glacierdata_byglacno(glac_no)
# #%%
# mb_max_loss = -1 * (main_glac_hyps * main_glac_icethickness).sum(1) / main_glac_hyps.sum(1) / 18
# mb_compare['mb_max_loss'] = mb_max_loss
# mb_compare['cal_mwea'] = cal_data['mb_mwe'] / 18
# mb_compare['mb_obs_vs_maxloss'] = mb_compare['cal_mwea'] - mb_compare['mb_max_loss']
# mb_compare_lt_maxloss = mb_compare.loc[mb_compare['mb_obs_vs_maxloss'] < 0]
# #%%
# ===== HISTOGRAM: mass balance difference ======
dif_bins = [-1.5, -1, -0.5, -0.2, -0.1, -0.05,-0.02, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 1.5]
bin_min = np.floor((mb_compare['dif_mwea'].min() * 100))/100
bin_max = np.ceil((mb_compare['dif_mwea'].max() * 100))/100
if bin_min < dif_bins[0]:
dif_bins[0] = bin_min
if bin_max > dif_bins[-1]:
dif_bins[-1] = bin_max
hist_fn = 'hist_' + str(int(chainlength/1000)) + 'kch_dif_mwea.png'
plot_hist(mb_compare, 'dif_mwea', dif_bins,
xlabel='$\mathregular{B_{mod} - B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', ylabel='Count',
fig_fp=fig_fp, fig_fn=hist_fn)
# ===== HISTOGRAM: z-score =====
dif_bins = [-2,-1, -0.75, -0.5, -0.25, -0.1, 0.1, 0.25, 0.5, 0.75, 1, 2]
bin_min = np.floor((mb_compare['dif_zscore'].min() * 100))/100
bin_max = np.ceil((mb_compare['dif_zscore'].max() * 100))/100
if bin_min < dif_bins[0]:
dif_bins[0] = bin_min
if bin_max > dif_bins[-1]:
dif_bins[-1] = bin_max
hist_fn = 'hist_' + str(int(chainlength/1000)) + 'kch_zscore.png'
plot_hist(mb_compare, 'dif_zscore', dif_bins,
xlabel='z-score ($\\frac{B_{mod} - B_{obs}}{B_{std}}$)', ylabel='Count',
fig_fp=fig_fp, fig_fn=hist_fn)
# ===== Scatterplot: Glacier Area [km2] vs. Mass balance, color-coded by mass balance difference =====
fig, ax = plt.subplots()
cmap = 'RdYlBu'
# cmap = plt.cm.get_cmap(cmap, 5)
colorbar_dict = {'precfactor':[0,5],
'tempchange':[-5,5],
'ddfsnow':[2.6,5.6],
'massbal':[-1.5,0.5],
'dif_masschange':[-0.5,0.5],
'dif_zscore':[-1,1]}
norm = plt.Normalize(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1])
a = ax.scatter(mb_compare['Area_km2'], mb_compare['obs_mwea'], c=mb_compare['dif_mwea'],
cmap=cmap, norm=norm, s=20, linewidth=0.5)
a.set_facecolor('none')
ax.set_xlim([0,200])
ax.set_ylim([-2.5,1.25])
ax.set_ylabel('$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
ax.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
# Inset axis over main axis
ax_inset = plt.axes([.35, .19, .48, .35])
b = ax_inset.scatter(mb_compare['Area_km2'], mb_compare['obs_mwea'], c=mb_compare['dif_mwea'],
cmap=cmap, norm=norm, s=20,linewidth=0.5)
b.set_facecolor('none')
ax_inset.set_xlim([0,5])
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar = plt.colorbar(sm, ax=ax, fraction=0.04, pad=0.01)
cbar.set_ticks(list(np.arange(colorbar_dict['dif_masschange'][0], colorbar_dict['dif_masschange'][1] + 0.01, 0.25)))
fig.text(1.01, 0.5, '$\mathregular{B_{mod} - B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', va='center',
rotation='vertical', size=12)
# Save figure
fig.set_size_inches(6,4)
fig_fn = 'MB_vs_area_wdif_scatterplot.png'
fig.savefig(fig_fp + fig_fn, bbox_inches='tight', dpi=300)
# ===== Scatterplot: Glacier Area [km2] vs. Mass balance, color-coded by Z-SCORE difference =====
fig, ax = plt.subplots()
cmap = 'RdYlBu'
# cmap = plt.cm.get_cmap(cmap, 5)
norm = plt.Normalize(colorbar_dict['dif_zscore'][0], colorbar_dict['dif_zscore'][1])
a = ax.scatter(mb_compare['Area_km2'], mb_compare['obs_mwea'], c=mb_compare['dif_zscore'],
cmap=cmap, norm=norm, s=20, linewidth=0.5)
a.set_facecolor('none')
ax.set_xlim([0,200])
ax.set_ylim([-2.5,1.25])
ax.set_ylabel('$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', size=12)
ax.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
# Inset axis over main axis
ax_inset = plt.axes([.35, .19, .48, .35])
b = ax_inset.scatter(mb_compare['Area_km2'], mb_compare['obs_mwea'], facecolor='None', c=mb_compare['dif_zscore'],
cmap=cmap, norm=norm, s=20,linewidth=0.5)
b.set_facecolor('none')
ax_inset.set_xlim([0,5])
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar = plt.colorbar(sm, ax=ax, fraction=0.04, pad=0.01)
cbar.set_ticks(list(np.arange(colorbar_dict['dif_zscore'][0], colorbar_dict['dif_zscore'][1] + 0.01, 0.25)))
fig.text(1.01, 0.5, 'z-score ($\\frac{B_{mod} - B_{obs}}{B_{std}}$)', va='center',
rotation='vertical', size=12)
# Save figure
fig.set_size_inches(6,4)
fig_fn = 'MB_vs_area_wdif_scatterplot_zscore.png'
fig.savefig(fig_fp + fig_fn, bbox_inches='tight', dpi=300)
# ===== Scatterplot: Glacier Area [km2] vs. mass balance difference, color-coded by Mass balance =====
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
fig, ax = plt.subplots()
cmap = 'RdYlBu'
# cmap = plt.cm.get_cmap(cmap, 5)
# norm = plt.Normalize(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1])
norm = MidpointNormalize(midpoint=0, vmin=colorbar_dict['massbal'][0], vmax=colorbar_dict['massbal'][1])
a = ax.scatter(mb_compare['Area_km2'], mb_compare['dif_mwea'], c=mb_compare['obs_mwea'],
cmap=cmap, norm=norm, s=20, linewidth=0.5)
a.set_facecolor('none')
ax.set_xlim([0,200])
ax.set_ylim([-2.49,1.75])
ax.set_ylabel('$\mathregular{B_{mod} - B_{obs}}$ (m w.e. $\mathregular{a^{-1}}$)', size=12)
ax.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
# Inset axis over main axis
ax_inset = plt.axes([.35, .19, .48, .35])
a = ax_inset.scatter(mb_compare['Area_km2'], mb_compare['dif_zscore'], c=mb_compare['obs_mwea'],
cmap=cmap, norm=norm, s=20,linewidth=0.5)
a.set_facecolor('none')
ax_inset.set_xlim([0,5])
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar = plt.colorbar(sm, ax=ax, fraction=0.04, pad=0.01)
cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.25)))
fig.text(1.01, 0.5, '$\mathregular{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', va='center',
rotation='vertical', size=12)
# Save figure
fig.set_size_inches(6,4)
fig_fn = 'dif_vs_area_wMB_scatterplot.png'
fig.savefig(fig_fp + fig_fn, bbox_inches='tight', dpi=300)
# ===== Scatterplot: Glacier Area [km2] vs. Z-SCORE DIFFERENCE, color-coded by Mass balance =====
fig, ax = plt.subplots()
cmap = 'RdYlBu'
# cmap = plt.cm.get_cmap(cmap, 5)
# norm = plt.Normalize(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1])
norm = MidpointNormalize(midpoint=0, vmin=colorbar_dict['massbal'][0], vmax=colorbar_dict['massbal'][1])
a = ax.scatter(mb_compare['Area_km2'], mb_compare['dif_zscore'], c=mb_compare['obs_mwea'],
cmap=cmap, norm=norm, s=20, linewidth=0.5)
a.set_facecolor('none')
ax.set_xlim([0,200])
ax.set_ylim([-3.99,2.5])
# ax.set_ylabel('z-score ($\\frac{B_{mod} - B_{obs}}{B_{std}}$)', size=12)
ax.set_ylabel('z-score (-)', size=12)
ax.set_xlabel('Area ($\mathregular{km^{2}}$)', size=12)
# Inset axis over main axis
ax_inset = plt.axes([.35, .19, .48, .35])
a = ax_inset.scatter(mb_compare['Area_km2'], mb_compare['dif_zscore'], c=mb_compare['obs_mwea'],
cmap=cmap, norm=norm, s=10,linewidth=0.5)
a.set_facecolor('none')
ax_inset.set_xlim([0,5])
# Add colorbar
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cbar = plt.colorbar(sm, ax=ax, fraction=0.04, pad=0.01)
cbar.set_ticks(list(np.arange(colorbar_dict['massbal'][0], colorbar_dict['massbal'][1] + 0.01, 0.25)))
fig.text(1.01, 0.5, '$\mathit{B_{obs}}$ $\mathregular{(m w.e. a^{-1})}$', va='center',
rotation='vertical', size=12)
# Save figure
fig.set_size_inches(6,4)
fig_fn = 'dif_vs_area_wMB_scatterplot_zscore.png'
fig.savefig(fig_fp + fig_fn, bbox_inches='tight', dpi=300)
return mb_compare
#%%
if __name__ == '__main__':
#%%
if option_metrics_vs_chainlength == 1:
# metrics_vs_chainlength(mcmc_output_netcdf_fp_3chain, regions, iterations, burn=burn, nchain=3,
# option_subplot_labels=1)
netcdf_fp = mcmc_output_netcdf_fp_3chain
fig_fp = netcdf_fp + 'figures/'
csv_fp = netcdf_fp + 'csv/'
nchain = 3
# burn = 1000
# iterstep = 5000
# itermax = 25000
burn = 0
iterstep = 2000
itermax = 25000
iterations = np.arange(0, itermax, iterstep)
if iterations[0] < 1000:
iterations[0] = burn + 1000
else:
iterations = iterations[1:]
if iterations[-1] != itermax:
iterations = np.append(iterations, itermax)
iters = iterations
option_mcerror_normalize = 1
option_subplot_labels = 0
metrics = ['Gelman-Rubin', 'MC Error', 'Effective N']
if nchain == 1:
metrics.remove('Gelman-Rubin')
low_percentile = 10
high_percentile = 90
print('iterations:', iterations)
# File names
en_fn_pkl = 'effective_n_list.pkl'
mc_fn_pkl = 'mc_error_list.pkl'
gr_fn_pkl = 'gelman_rubin_list.pkl'
postmean_fn_pkl = 'postmean_list.pkl'
poststd_fn_pkl = 'poststd_list.pkl'
glacno_fn_pkl = 'glacno_list.pkl'
iter_ending = '_' + str(iterstep) + 'iterstep_' + str(burn) + 'burn.pkl'
# Check if files exist
if os.path.isfile(csv_fp + en_fn_pkl.replace('.pkl', iter_ending)):
with open(csv_fp + en_fn_pkl.replace('.pkl', iter_ending), 'rb') as f:
en_list = pickle.load(f)
with open(csv_fp + mc_fn_pkl.replace('.pkl', iter_ending), 'rb') as f:
mc_list = pickle.load(f)
if nchain > 1:
with open(csv_fp + gr_fn_pkl.replace('.pkl', iter_ending), 'rb') as f:
gr_list = pickle.load(f)
with open(csv_fp + postmean_fn_pkl.replace('.pkl', iter_ending), 'rb') as f:
postmean_list = pickle.load(f)
with open(csv_fp + poststd_fn_pkl.replace('.pkl', iter_ending), 'rb') as f:
poststd_list = pickle.load(f)
with open(csv_fp + glacno_fn_pkl, 'rb') as f:
glac_no = pickle.load(f)
# Otherwise, process and pickle data
else:
# Lists to record metrics
glac_no = []
en_list = {}
gr_list = {}
mc_list = {}
postmean_list = {}
poststd_list = {}
# Load netcdf filenames
filelist = []
for region in regions:
filelist.extend(glob.glob(netcdf_fp + str(region) + '*.nc'))
filelist = sorted(filelist)
# iterate through each glacier
count = 0
for count, netcdf in enumerate(filelist):
# for count, netcdf in enumerate(filelist[0:100]):
glac_str = netcdf.split('/')[-1].split('.nc')[0]
glac_no.append(glac_str)
# if count%100 == 0:
print(count, glac_str)
en_list[glac_str] = {}
gr_list[glac_str] = {}
mc_list[glac_str] = {}
postmean_list[glac_str] = {}
poststd_list[glac_str] = {}
# open dataset
ds = xr.open_dataset(netcdf)
# Metrics for each parameter
for nvar, vn in enumerate(variables):
# Effective sample size
if 'Effective N' in metrics:
en = [effective_n(ds, vn=vn, iters=i, burn=burn) for i in iters]
en_list[glac_str][vn] = dict(zip(iters, en))
if 'MC Error' in metrics:
# Monte Carlo error
# the first [0] extracts the MC error as opposed to the confidence interval
# the second [0] extracts the first chain
mc = [mc_error(ds, vn=vn, iters=i, burn=burn, method='overlapping')[0][0] for i in iters]
mc_list[glac_str][vn] = dict(zip(iters, mc))
# Gelman-Rubin Statistic
if len(ds.chain) > 1 and 'Gelman-Rubin' in metrics:
gr = [gelman_rubin(ds, vn=vn, iters=i, burn=burn) for i in iters]
gr_list[glac_str][vn] = dict(zip(iters, gr))
# Posteriors
for nvar, vn in enumerate(variables):
postmean_list[glac_str][vn] = {}
poststd_list[glac_str][vn] = {}
for n_iters in iterations:
df = pd.DataFrame(ds['mp_value'].values[burn:n_iters,:,0], columns=ds.mp.values)
postmean_list[glac_str]['massbal'][n_iters] = df.massbal.mean()
postmean_list[glac_str]['precfactor'][n_iters] = df.precfactor.mean()
postmean_list[glac_str]['tempchange'][n_iters] = df.tempchange.mean()
postmean_list[glac_str]['ddfsnow'][n_iters] = df.ddfsnow.mean()
poststd_list[glac_str]['massbal'][n_iters] = df.massbal.std()
poststd_list[glac_str]['precfactor'][n_iters] = df.precfactor.std()
poststd_list[glac_str]['tempchange'][n_iters] = df.tempchange.std()
poststd_list[glac_str]['ddfsnow'][n_iters] = df.ddfsnow.std()
# close datase
ds.close()
# Pickle lists for next time
if os.path.exists(csv_fp) == False:
os.makedirs(csv_fp)
pickle_data(csv_fp + en_fn_pkl.replace('.pkl', iter_ending), en_list)
pickle_data(csv_fp + mc_fn_pkl.replace('.pkl', iter_ending), mc_list)
if len(ds.chain) > 1:
pickle_data(csv_fp + gr_fn_pkl.replace('.pkl', iter_ending), gr_list)
pickle_data(csv_fp + postmean_fn_pkl.replace('.pkl', iter_ending), postmean_list)
pickle_data(csv_fp + poststd_fn_pkl.replace('.pkl', iter_ending), poststd_list)
pickle_data(csv_fp + glacno_fn_pkl, glac_no)
#%%
# Load netcdf filenames
filelist = []
filelist.extend(glob.glob(netcdf_fp + '*.nc'))
filelist = sorted(filelist)
glacno_all = [x.split('/')[-1].replace('.nc','') for x in filelist]
# Identify glaciers not already processed
glacno_notprocessed = sorted(list(set(glacno_all) - set(glac_no)))
# iterate through each glacier
count = 0
for count, glac_str in enumerate(glacno_notprocessed):
netcdf = netcdf_fp + glac_str + '.nc'
glac_no.append(glac_str)
print(count, glac_str)
en_list[glac_str] = {}
gr_list[glac_str] = {}
mc_list[glac_str] = {}
postmean_list[glac_str] = {}
poststd_list[glac_str] = {}
# open dataset
ds = xr.open_dataset(netcdf)
# Metrics for each parameter
for nvar, vn in enumerate(variables):
# Effective sample size
if 'Effective N' in metrics:
en = [effective_n(ds, vn=vn, iters=i, burn=burn) for i in iters]
en_list[glac_str][vn] = dict(zip(iters, en))
if 'MC Error' in metrics:
# Monte Carlo error
# the first [0] extracts the MC error as opposed to the confidence interval
# the second [0] extracts the first chain
mc = [mc_error(ds, vn=vn, iters=i, burn=burn, method='overlapping')[0][0] for i in iters]
mc_list[glac_str][vn] = dict(zip(iters, mc))
# Gelman-Rubin Statistic
if len(ds.chain) > 1 and 'Gelman-Rubin' in metrics:
gr = [gelman_rubin(ds, vn=vn, iters=i, burn=burn) for i in iters]
gr_list[glac_str][vn] = dict(zip(iters, gr))
# Posteriors
for nvar, vn in enumerate(variables):
postmean_list[glac_str][vn] = {}
poststd_list[glac_str][vn] = {}
for n_iters in iterations:
df = | pd.DataFrame(ds['mp_value'].values[burn:n_iters,:,0], columns=ds.mp.values) | pandas.DataFrame |
import pandas as pd
import streamlit as st
import numpy as np
import folium
import plotly.express as px
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
from datetime import datetime
#Expandir layout
st.set_page_config(layout='wide')
#decorador para ler o arquivo da memória em vez do disco
@st.cache(allow_output_mutation=True)
def get_data(path):
data = pd.read_csv(path)
return data
def drop_rows(data):
data = data.drop(data[data['bathrooms'] == 0].index)
return data
def reset_index(data):
data = data.reset_index(drop=True)
return data
def conversion_date(data):
# conversion of date
data['date'] = pd.to_datetime(data['date'])
return data
def set_feature(data):
# add new features
data['price_m2'] = (data['price'] / data['sqft_lot']) * 0.093
return data
def overview_data(data):
st.title("House Rocket company by <NAME>")
st.title("Data Overview")
st.sidebar.title("Data overview filters")
# filtro para coluna
f_attributes = st.sidebar.multiselect('Enter columns', data.columns)
# filtro para região
f_zipcode = st.sidebar.multiselect('Enter zipcode', data['zipcode'].unique())
# attributes + zipcode = selecionar colunas e linhas
# attributes = selecionar colunas
# zipcode = selecionar linhas
# 0 + 0 = retorno o dataset original
if (f_zipcode != []) & (f_attributes != []):
data = data.loc[data['zipcode'].isin(f_zipcode), f_attributes]
elif (f_zipcode != []) & (f_attributes == []):
data = data.loc[data['zipcode'].isin(f_zipcode), :]
elif (f_zipcode == []) & (f_attributes != []):
data = data.loc[:, f_attributes]
else:
data = data.copy()
st.dataframe(data)
c1, c2 = st.columns((1, 1))
# Average metrics
df1 = data[['id', 'zipcode']].groupby('zipcode').count().reset_index()
df2 = data[['price', 'zipcode']].groupby('zipcode').mean().reset_index()
df3 = data[['sqft_living', 'zipcode']].groupby('zipcode').mean().reset_index()
df4 = data[['price_m2', 'zipcode']].groupby('zipcode').mean().reset_index()
# Merge
m1 = pd.merge(df1, df2, on='zipcode', how='inner')
m2 = pd.merge(m1, df3, on='zipcode', how='inner')
df = pd.merge(m2, df4, on='zipcode', how='inner')
df.columns = ['Zipcode', 'Total Houses', 'Price', 'Sqft Living', 'Price/m2']
c1.header('Values for zipcode')
c1.dataframe(df, height=600, width=400)
# Descriptive statistics
num_attributes = data.select_dtypes(include=['int64', 'float64'])
media = pd.DataFrame(num_attributes.apply(np.mean))
mediana = pd.DataFrame(num_attributes.apply(np.median))
std = pd.DataFrame(num_attributes.apply(np.std))
max_ = pd.DataFrame(num_attributes.apply(np.max))
min_ = pd.DataFrame(num_attributes.apply(np.min))
df1 = pd.concat([max_, min_, media, mediana, std], axis=1).reset_index()
df1.columns = ['attributes', 'max', 'min', 'mean', 'median', 'std']
c2.header('Descriptive analysis')
c2.dataframe(df1, height=600, width=400)
return None
def portfolio_density(data):
st.title('Region Overview')
c1, c2 = st.columns((1, 1))
c1.header('Portfolio density')
df = data.sample(10)
# Base Map - Folium
density_map = folium.Map(location=[data['lat'].mean(),
data['long'].mean()],
default_zoom_start=15)
marker_cluster = MarkerCluster().add_to(density_map)
for name, row in df.iterrows():
folium.Marker([row['lat'], row['long']],
popup='Sold R${0} on: {1}. Features: {2} sqft, {3} bedrooms, {4} bathrooms, year built: {5}'.format(
row['price'],
row['date'],
row['sqft_living'],
row['bedrooms'],
row['bathrooms'],
row['yr_built'])).add_to(marker_cluster)
with c1:
folium_static(density_map)
return None
def commercial_distribution(data):
st.sidebar.title('Commercial attributes filters')
st.title('Commercial Attributes')
# -----Average Price per Year
# filters
min_year_built = int(data['yr_built'].min())
max_year_built = int(data['yr_built'].max())
st.sidebar.subheader('Select max construction year')
f_year_built = st.sidebar.slider('Year Built', min_year_built,
max_year_built,
max_year_built)
st.header('Price per year')
# data selection
df = data.loc[data['yr_built'] < f_year_built]
df = df[['price', 'yr_built']].groupby('yr_built').mean().reset_index()
# plot
fig = px.line(df, x='yr_built', y='price')
fig.update_traces(line=dict(color='black'))
fig.update_layout(title="Average price per construction year")
fig.update_xaxes(title="Construction year (-)")
fig.update_yaxes(title="Price (USD)")
st.plotly_chart(fig, use_container_width=True)
# -----Average Price per Day
st.header('Price per date of purchase')
st.sidebar.subheader('Select max date')
# load data
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with | tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo') | pandas.util.testing.assert_raises_regex |
import pandas as pd
import xml.etree.ElementTree as ET
import lxml.etree as etree
most_serious_problem = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/most_serious_problem/special_eb_most_serious_problem_final.csv")
personally_taken_action = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/personally_taken_action/special_eb_personally_taken_action_final.csv")
severity_of_problem = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/severity_of_problem/special_eb_severity_of_problem_final.csv")
who_is_responsible = | pd.read_csv(
"../data/processed_data/special_eb/data/3_final/who_is_responsible/special_eb_who_is_responsible_final.csv") | pandas.read_csv |
# created 31 May 2017
import os
import re
import unittest
from time import time
import numpy as np
import pandas as pd
def unittest_summary(src_dir,
test_file_prefix='test_',
test_def_prefix='test_',
ignore_modules=None,
ignore_packages=None):
"""
Checks packages for unittests and looks for test files corresponding for each module.
Parameters
----------
src_dir : str
Directory containing packages (src folder).
test_file_prefix: str, defaults to 'test_' so it will look for 'test_{module.py}'.
Prefix for unit test file, default 'test_' - e.g. look for 'test_' + module.py.
test_def_prefix : str
Prefix for unit test def, default 'test_' - e.g. will count lines '^def test_.*'.
ignore_modules : list
Package files to ignore, default None.
ignore_packages : list
Packages to ignore, default None.
Return
-------
df : pd.DataFrame
Summary of unit tests in the library.
"""
print('-' * 40)
print(f"\n\nsrc Library Global Tests\n\n")
print('-' * 40)
# Getting list of packages from src
pkg_dirs = [os.path.join(src_dir, i)
for i in os.listdir(src_dir)
if os.path.isdir(os.path.join(src_dir, i))]
# Check if there is a subpackage called 'tests' in each package
have_test_dir = {os.path.basename(i): 'tests' in os.listdir(i) for i in pkg_dirs}
pkg_and_modules = dict()
for i in pkg_dirs:
pkg_and_modules[os.path.basename(i)] = \
[j for j in os.listdir(i) if bool(re.search('[.]py$', j))]
# package -> modules -> test details
full_dict = dict()
for pkg_name, lst_of_modules in pkg_and_modules.items():
# Packages containing subpackages 'tests'
if have_test_dir[pkg_name]:
ut_dir = os.path.join(src_dir, pkg_name, 'tests')
# Listing test modules within the subpackage
ut_files = os.listdir(ut_dir)
# To nest dictionary
full_dict[pkg_name] = {}
for m in lst_of_modules:
# Concatenate 'test_' + module.py
test_module = test_file_prefix + m
if test_module in ut_files:
# read in the file, count the number of lines begining with def (or 'def test')
# read the number of lines in file
with open(os.path.join(ut_dir, test_module)) as fp:
content = fp.readlines()
test_defs = [c for c in content if bool(re.search(f'^def {test_def_prefix}', c.lstrip()))]
has_unit_tests, num_test_def, num_test_lines = True, len(test_defs), len(content)
else:
has_unit_tests, num_test_def, num_test_lines = False, 0, 0
full_dict[pkg_name][m] = {"has_unit_test": has_unit_tests,
"num_test_def": num_test_def,
"num_test_lines": num_test_lines}
else:
print(f"Excluding package '{pkg_name}' as it does not have subpackage 'tests'.")
df = pd.DataFrame.from_dict({(i, j): full_dict[i][j]
for i in full_dict.keys()
for j in full_dict[i].keys()},
orient="index")
df.index.names = ["Package", "Module"]
df = df.reset_index()
if ignore_modules:
print(f'Ignoring modules: {ignore_modules}')
df = df.loc[~df['Module'].isin(ignore_modules)]
if ignore_packages:
print(f'Ignoring packages: {ignore_packages}')
df = df.loc[~df['Package'].isin(ignore_packages)]
print("\n")
print('-' * 60)
# files with unit tests
fwut = | pd.pivot_table(df, index=['Package'], values='has_unit_test', aggfunc='mean') | pandas.pivot_table |
def load_ladcp_data(all_files):
import os
import gsw
import xarray as xr
import pandas as pd
import numpy as np
for ti,f in zip(range(len(all_files)),all_files):
ladcp_cast = load_ladcp_csv(f)
name = os.path.basename(f).split('.')[0]
st_name = np.array(int(name[-3:]))# (int(name[-3:]))
# create dataset and combine lat,lon into one dimension with stack
prof_as_ds = xr.Dataset({'u': (['lon', 'lat', 'z'], ladcp_cast.u.values[np.newaxis,np.newaxis,:]),
'v': (['lon', 'lat', 'z'], ladcp_cast.v.values[np.newaxis,np.newaxis,:])},
coords={'lon': (ladcp_cast.longitude[...,np.newaxis]),
'lat': (ladcp_cast.latitude[...,np.newaxis]),
'station': (st_name[...,np.newaxis]),
'time': (np.array(ladcp_cast.datetime)[...,np.newaxis]),
'z': -ladcp_cast.z.values}).stack(xy = ('lon','lat','station','time'))
if ti == 0:
ladcp_data = prof_as_ds
else:
ladcp_data = xr.concat([ladcp_data,prof_as_ds],dim=('xy'))
return ladcp_data
def load_ctd_data(data_files):
import os
import gsw
import xarray as xr
import pandas as pd
import numpy as np
for ti,f in zip(range(len(data_files)),data_files):
ctd_cast = load_ctd_csv(f)
name = os.path.basename(f).split('.')[0]
# convert to CT,SA,z
z = gsw.z_from_p(ctd_cast.Pressure,ctd_cast.lat)
p = ctd_cast.Pressure
if p.max() < 50:
continue
#print(p.max(),p.min(),z.max(),z.min())
SA = gsw.SA_from_SP(ctd_cast.Salinity,ctd_cast.Pressure,ctd_cast.lon,ctd_cast.lat)
CT = gsw.CT_from_t(SA,ctd_cast.Temperature,ctd_cast.Pressure)
RHO = gsw.rho(SA,CT,ctd_cast.Pressure)
p_ref = 10.1325; # reference pressure # following scanfish calc
Pot_dens = gsw.rho(SA,CT,p_ref);
sigma_0 = Pot_dens - 1000;
st_name = np.array(int(name[-3:]))# (int(name[-3:]))
# 'time','Pressure','Temperature','O2','Salinity'
# !!! NOTE: time is off by 1 days therefore subtracting 1 when converting to datetime format
# create dataset and combine lat,lon into one dimension with stack
prof_as_ds = xr.Dataset({'CT': (['lon', 'lat', 'z'], CT[np.newaxis,np.newaxis,:]),
'SA': (['lon', 'lat', 'z'], SA[np.newaxis,np.newaxis,:]),
'RHO': (['lon', 'lat', 'z'], RHO[np.newaxis,np.newaxis,:]),
'sigma_0': (['lon', 'lat', 'z'], sigma_0[np.newaxis,np.newaxis,:]),
'Temperature': (['lon', 'lat', 'z'], ctd_cast.Temperature.values[np.newaxis,np.newaxis,:]),
'Salinity': (['lon', 'lat', 'z'], ctd_cast.Salinity.values[np.newaxis,np.newaxis,:]),
'O2': (['lon', 'lat', 'z'], ctd_cast.O2.values[np.newaxis,np.newaxis,:]),
'Pressure': (['lon', 'lat', 'z'], ctd_cast.Pressure.values[np.newaxis,np.newaxis,:])},
coords={'lon': (np.array(ctd_cast.longitude)[...,np.newaxis]),
'lat': (np.array(ctd_cast.latitude)[...,np.newaxis]),
'station': (st_name[...,np.newaxis]),
'time': np.array(pd.to_datetime(ctd_cast.time.values[0]-1, unit='D', origin=pd.Timestamp('01-01-2016')))[...,np.newaxis],
'z': z}).stack(xy = ('lon','lat','station','time'))
z_1m = np.arange(np.ceil(z.max()),np.floor(z.min()),-1.0) # seem to need exactly the same z to make concat work
prof_as_ds = prof_as_ds.interp(z=z_1m,method='linear', kwargs={'fill_value':np.nan})
if ti == 0:
ctd_data = prof_as_ds
else:
ctd_data = xr.concat([ctd_data,prof_as_ds],dim=('xy'))
return ctd_data
import pandas as pd
class cast_data_format(pd.DataFrame):
def __init__(self, data=None, index=None, columns=None, name=None,
longitude=None, latitude=None, datetime=None,
config=None, dtype=None, copy=False):
super(cast_data_format, self).__init__(data=data, index=index,
columns=columns, dtype=dtype,
copy=copy)
self.longitude = longitude
self.latitude = latitude
self.datetime = datetime
self.config = config
self.name = name
def __reduce__(self):
return self.__class__, (
DataFrame(self), # NOTE Using that type(data)==DataFrame and the
# the rest of the arguments of DataFrame.__init__
# to defaults, the constructors acts as a
# copy constructor.
None,
None,
self.longitude,
self.latitude,
self.datetime,
self.config,
self.name,
None,
False,
)
def load_ladcp_csv(filename_in,skiprows=10,headerlines=6):
# modified from ctd module
from io import StringIO
import os
import pandas as pd
import numpy as np
# first get info from file header
read_header = | pd.read_csv(filename_in,sep='\s+',iterator=True,header=None) | pandas.read_csv |
# %%
from datetime import datetime, timedelta
from pathlib import Path
import random
import pandas as pd
# %%
data = pd.read_csv("../data/base2020.csv", sep=";")
# %%
def report(state, date, last_date, last_state, age, sex):
if last_state is not None:
events.append(dict(
from_state=last_state,
to_state=state,
age=age,
sex=sex,
days=(date - last_date).days if not | pd.isna(last_date) | pandas.isna |
# 5.Perform Operations on Files
# 5.1: From the raw data below create a data frame
# 'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
# 'last_name': ['Miller', 'Jacobson', ".", 'Milner', 'Cooze'],
# 'age': [42, 52, 36, 24, 73],
# 'preTestScore': [4, 24, 31, ".", "."],
# 'postTestScore': ["25,000", "94,000", 57, 62, 70]
# 5.2: Save the dataframe into a csv file as example.csv
# 5.3: Read the example.csv and print the data frame
# 5.4: Read the example.csv without column heading
# 5.5: Read the example.csv andmake the index columns as 'First Name’ and 'Last Name'
# 5.6: Print the data frame in a Boolean form as True or False.
# True for Null/ NaN values and false for non-nullvalues
# 5.7: Read the dataframe by skipping first 3 rows and print the data frame
# 5.8: Load a csv file while interpreting "," in strings around numbers as thousands seperators.
# Check the raw data 'postTestScore' column has, as thousands separator.
# Comma should be ignored while reading the data. It is default behaviour,
# but you need to give argument to read_csv function which makes sure commas are ignored.
import pandas as pd
# 5.1
df = pd.DataFrame({'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', 'Jacobson', ".", 'Milner', 'Cooze'],
'age': [42, 52, 36, 24, 73],
'preTestScore': [4, 24, 31, ".", "."],
'postTestScore': ["25,000", "94,000", 57, 62, 70]})
# 5.2
df.to_csv("example.csv")
print("*"*20)
# 5.3
df = | pd.read_csv("example.csv") | pandas.read_csv |
from typing import List, Tuple, Dict
import codecs
import json
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
import seaborn as sns
import plotly
import plotly.graph_objs as go
import plotly.figure_factory as ff
import plotly.express as px
from tom_lib.utils import save_topic_number_metrics_data
sns.set(rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
mpl.use("Agg") # To be able to create figures on a headless server (no DISPLAY variable)
def split_string_sep(string: str, sep: str = None):
"""Split a string on spaces and put together with newlines
"""
if sep is None:
sep = ' '
string_new = sep.join(string.split(sep=sep)[:2])
if len(string.split()) > 2:
string_new = '\n'.join([string_new, sep.join(string.split()[2:4])])
if len(string.split()) > 4:
string_new = '\n'.join([string_new, sep.join(string.split()[4:7])])
if len(string.split()) > 7:
string_new = '\n'.join([string_new, sep.join(string.split()[7:])])
return string_new
def split_string_nchar(string: str, nchar: int = None):
"""Split a string into a given number of chunks based on number of characters
"""
if nchar is None:
nchar = 25
return '\n'.join([string[(i * nchar):(i + 1) * nchar] for i in range(int(np.ceil(len(string) / nchar)))])
class Visualization:
def __init__(self, topic_model, output_dir=None):
self.topic_model = topic_model
if output_dir is None:
if self.topic_model.trained:
self.output_dir = Path(f'output_{self.topic_model.model_type}_{self.topic_model.nb_topics}_topics')
else:
self.output_dir = Path(f'output_{self.topic_model.model_type}')
else:
if isinstance(output_dir, str):
self.output_dir = Path(output_dir)
elif isinstance(output_dir, Path):
self.output_dir = output_dir
else:
raise TypeError(f"'output_dir' of type {type(output_dir)} not a valid type")
if not self.output_dir.exists():
self.output_dir.mkdir(parents=True, exist_ok=True)
def plot_topic_distribution(self, doc_id, file_name='topic_distribution.png'):
file_path = self.output_dir / file_name
distribution = self.topic_model.topic_distribution_for_document(doc_id)
data_x = range(0, len(distribution))
plt.clf()
plt.xticks(np.arange(0, len(distribution), 1.0))
plt.bar(data_x, distribution, align='center')
plt.title('Topic distribution')
plt.ylabel('probability')
plt.xlabel('topic')
plt.savefig(file_path)
def plot_word_distribution(self, topic_id, nb_words=10, file_name='word_distribution.png'):
file_path = self.output_dir / file_name
data_x = []
data_y = []
distribution = self.topic_model.top_words(topic_id, nb_words)
for weighted_word in distribution:
data_x.append(weighted_word[0])
data_y.append(weighted_word[1])
plt.clf()
plt.bar(range(len(data_x)), data_y, align='center')
plt.xticks(range(len(data_x)), data_x, size='small', rotation='vertical')
plt.title('Word distribution')
plt.ylabel('probability')
plt.xlabel('word')
plt.savefig(file_path)
def plot_greene_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
tao: int = 10,
step: int = 5,
top_n_words: int = 10,
sample=0.8,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
greene_stability = self.topic_model.greene_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
top_n_words=top_n_words,
tao=tao,
sample=sample,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
plt.plot(num_topics_infer, greene_stability, 'o-')
plt.xticks(num_topics_infer)
plt.title('Greene et al. metric (higher is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Stability')
# find and annotate the maximum point on the plot
ymax = max(greene_stability)
xpos = greene_stability.index(ymax)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymax), xytext=(best_k, ymax), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'greene.png'
file_path_data = self.output_dir / 'greene.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=greene_stability, step=step, metric_type='greene')
def plot_arun_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
iterations: int = 10,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
symmetric_kl_divergence = self.topic_model.arun_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
iterations=iterations,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
plt.plot(num_topics_infer, symmetric_kl_divergence, 'o-')
plt.xticks(num_topics_infer)
plt.title('Arun et al. metric (lower is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Symmetric KL Divergence')
# find and annotate the maximum point on the plot
ymin = min(symmetric_kl_divergence)
xpos = symmetric_kl_divergence.index(ymin)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymin), xytext=(best_k, ymin), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'arun.png'
file_path_data = self.output_dir / 'arun.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=symmetric_kl_divergence, step=step, metric_type='arun')
def plot_brunet_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
iterations: int = 10,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
cophenetic_correlation = self.topic_model.brunet_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
iterations=iterations,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
plt.plot(num_topics_infer, cophenetic_correlation, 'o-')
plt.xticks(num_topics_infer)
plt.title('Brunet et al. metric (higher is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Cophenetic correlation coefficient')
# find and annotate the maximum point on the plot
ymax = max(cophenetic_correlation)
xpos = cophenetic_correlation.index(ymax)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymax), xytext=(best_k, ymax), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'brunet.png'
file_path_data = self.output_dir / 'brunet.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=cophenetic_correlation, step=step, metric_type='brunet')
def plot_coherence_w2v_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
top_n_words: int = 10,
w2v_size: int = None,
w2v_min_count: int = None,
# w2v_max_vocab_size: int = None,
w2v_max_final_vocab: int = None,
w2v_sg: int = None,
w2v_workers: int = None,
verbose: int = 0,
nmf_init: str = None,
nmf_solver: str = None,
nmf_beta_loss: str = None,
nmf_max_iter: int = None,
nmf_alpha: float = None,
nmf_l1_ratio: float = None,
nmf_shuffle: bool = None,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
coherence = self.topic_model.coherence_w2v_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
top_n_words=top_n_words,
w2v_size=w2v_size,
w2v_min_count=w2v_min_count,
# w2v_max_vocab_size=w2v_max_vocab_size,
w2v_max_final_vocab=w2v_max_final_vocab,
w2v_sg=w2v_sg,
w2v_workers=w2v_workers,
verbose=verbose,
nmf_init=nmf_init,
nmf_solver=nmf_solver,
nmf_beta_loss=nmf_beta_loss,
nmf_max_iter=nmf_max_iter,
nmf_alpha=nmf_alpha,
nmf_l1_ratio=nmf_l1_ratio,
nmf_shuffle=nmf_shuffle,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
plt.clf()
# create the line plot
plt.plot(num_topics_infer, coherence, 'o-')
plt.xticks(num_topics_infer)
plt.title('Coherence-Word2Vec metric (higher is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Mean Coherence')
# find and annotate the maximum point on the plot
ymax = max(coherence)
xpos = coherence.index(ymax)
best_k = num_topics_infer[xpos]
plt.annotate(f'k={best_k}', xy=(best_k, ymax), xytext=(best_k, ymax), textcoords='offset points', fontsize=16)
file_path_fig = self.output_dir / 'coherence_w2v.png'
file_path_data = self.output_dir / 'coherence_w2v.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data,
range_=(min_num_topics, max_num_topics),
data=coherence, step=step, metric_type='coherence_w2v')
def plot_perplexity_metric(
self,
min_num_topics: int = 10,
max_num_topics: int = 20,
step: int = 5,
train_size: float = 0.7,
verbose: int = 0,
lda_algorithm: str = None,
lda_alpha: float = None,
lda_eta: float = None,
lda_learning_method: str = None,
lda_n_jobs: int = None,
lda_n_iter: int = None,
random_state=None,
):
train_perplexities, test_perplexities = self.topic_model.perplexity_metric(
min_num_topics=min_num_topics,
max_num_topics=max_num_topics,
step=step,
train_size=train_size,
verbose=verbose,
lda_algorithm=lda_algorithm,
lda_alpha=lda_alpha,
lda_eta=lda_eta,
lda_learning_method=lda_learning_method,
lda_n_jobs=lda_n_jobs,
lda_n_iter=lda_n_iter,
random_state=random_state,
)
num_topics_infer = range(min_num_topics, max_num_topics + 1, step)
if (len(train_perplexities) > 0) and (len(test_perplexities) > 0):
plt.clf()
plt.plot(num_topics_infer, train_perplexities, 'o-', label='Train')
plt.plot(num_topics_infer, test_perplexities, 'o-', label='Test')
plt.xticks(num_topics_infer)
plt.title('Perplexity metric (lower is better)')
plt.xlabel('Number of Topics')
plt.ylabel('Perplexity')
plt.legend(loc='best')
file_path_fig = self.output_dir / 'perplexity.png'
file_path_data_train = self.output_dir / 'perplexity_train.tsv'
file_path_data_test = self.output_dir / 'perplexity_test.tsv'
plt.savefig(file_path_fig)
save_topic_number_metrics_data(
file_path_data_train,
range_=(min_num_topics, max_num_topics),
data=train_perplexities, step=step, metric_type='perplexity')
save_topic_number_metrics_data(
file_path_data_test,
range_=(min_num_topics, max_num_topics),
data=test_perplexities, step=step, metric_type='perplexity')
def topic_cloud(self, file_name='topic_cloud.json'):
file_path = self.output_dir / file_name
json_graph = {}
json_nodes = []
json_links = []
for i in range(self.topic_model.nb_topics):
description = []
for weighted_word in self.topic_model.top_words(i, 5):
description.append(weighted_word[1])
json_nodes.append({'name': f'topic{i}',
'frequency': self.topic_model.topic_frequency(i),
'description': f"Topic {i}: {', '.join(description)}",
'group': i})
json_graph['nodes'] = json_nodes
json_graph['links'] = json_links
with codecs.open(file_path, 'w', encoding='utf-8') as fp:
json.dump(json_graph, fp, indent=4, separators=(',', ': '))
def plot_docs_over_time(
self,
freq: str = '1YS',
count=True,
by_affil: bool = False,
ma_window: int = None,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot count of documents per frequency window, optionally by affiliation
"""
if by_affil:
groupby = [pd.Grouper(freq=freq), self.topic_model.corpus._affiliation_col]
else:
groupby = [pd.Grouper(freq=freq)]
result_count = self.topic_model.corpus.data_frame.reset_index().set_index(
self.topic_model.corpus._date_col).groupby(
by=groupby).size()
if by_affil:
result_count = result_count.unstack().fillna(0)
if not count:
total_count = self.topic_model.corpus.data_frame.reset_index().set_index(
self.topic_model.corpus._date_col).groupby(
by=[pd.Grouper(freq=freq)]).size()
result_count = result_count.div(total_count, axis=0)
if ma_window:
result_count = result_count.rolling(window=ma_window, min_periods=1, center=True).mean()
fig, ax = plt.subplots(figsize=figsize)
result_count.plot(ax=ax, kind='line')
if count:
title_str = 'Document Counts'
else:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.0%}'))
title_str = 'Percent of Documents'
if by_affil:
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
title_str += ' Per Affiliation'
title_str += ' Per Year'
ax.set_title(title_str)
fig.autofmt_xdate(bottom=0.2, rotation=30, ha='center')
fig.tight_layout()
if savefig:
if count:
plot_string = 'doc_count'
else:
plot_string = 'doc_percent'
if by_affil:
affil_string = 'affil'
else:
affil_string = 'overall'
if ma_window:
ma_string = f'_{ma_window}_MA'
else:
ma_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{affil_string}{ma_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_docs_above_thresh(
self,
topic_cols: List[str] = None,
normalized: bool = True,
thresh: float = 0.5,
kind: str = 'count',
n_words: int = 10,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot the number of documents associated with each topic, above some threshold
kind = 'count' or 'percent'
"""
fig, ax = plt.subplots(figsize=figsize)
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
result = np.array(
(self.topic_model.topic_distribution_for_document(normalized=normalized) >= thresh).sum(axis=0)
)[0]
if kind == 'count':
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:,.0f}'))
ax.set_ylabel('Count of Documents')
elif kind == 'percent':
result = result / self.topic_model.corpus.size
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.1%}'))
ax.set_ylabel('Percent of Documents')
result = result[[tc in topic_cols for tc in topic_cols_all]]
sns.barplot(x=topic_cols, y=result, ax=ax)
# result = pd.DataFrame(data=result, columns=topic_cols_all)[topic_cols]
# sns.barplot(ax=ax, data=result)
title_str = f'Documents above {thresh} topic loading'
if normalized:
title_str = f'{title_str} ({norm_string})'
title_str = f'{title_str}; {self.topic_model.corpus.size:,} total docs'
ax.set_title(title_str)
fig.autofmt_xdate()
if savefig:
plot_string = 'hist_above_thresh'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}_{kind}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_heatmap(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
mask_thresh: float = None,
cmap=None,
vmax: float = None,
vmin: float = None,
fmt: str = '.2f',
annot_fontsize: int = 13,
n_words: int = 10,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot a heatmap of topic-topic Pearson correlation coefficient values
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
corr = pd.DataFrame(
data=np.corrcoef(self.topic_model.topic_distribution_for_document(normalized=normalized).T),
columns=topic_cols_all,
index=topic_cols_all,
)
corr = corr.loc[topic_cols, topic_cols]
if rename:
corr = corr.rename(columns=rename, index=rename)
topic_cols = list(rename.values())
if mask_thresh is None:
mask_thresh = 0
if figsize is None:
figsize = (max(25, min(len(topic_cols) // 1.1, 25)), max(15, min(len(topic_cols) // 1.2, 15)))
if cmap is None:
cmap = sns.diverging_palette(220, 10, as_cmap=True)
if vmax is None:
vmax = corr.max().max()
# vmax=0.25
# vmin=-vmax
if vmin is None:
vmin = corr.min().min()
fig, ax = plt.subplots(figsize=figsize)
ax = sns.heatmap(corr, ax=ax, center=0, annot=True, fmt=fmt, annot_kws={'fontsize': annot_fontsize},
vmin=vmin, vmax=vmax,
mask=((corr > -mask_thresh) & (corr < mask_thresh)),
cmap=cmap,
cbar_kws={'label': 'Pearson Correlation Coefficient'},
# square=True,
)
ax.hlines(range(1, corr.shape[0]), *ax.get_xlim(), lw=0.5)
ax.vlines(range(1, corr.shape[1]), *ax.get_ylim(), lw=0.5)
ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right', fontsize=18)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=18)
if savefig:
plot_string = 'topic-topic_corr'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_clustermap(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
mask_thresh: float = None,
cmap=None,
vmax: float = None,
vmin: float = None,
fmt: str = '.2f',
annot_fontsize: int = 13,
n_words: int = 10,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
metric: str = None,
method: str = None,
):
"""Plot a hierarchical clustermap of topic-topic Pearson correlation coefficient values
(computed with np.corrcoef). Plot is made with Seaborn's clustermap.
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
corr = pd.DataFrame(
data=np.corrcoef(self.topic_model.topic_distribution_for_document(normalized=normalized).T),
columns=topic_cols_all,
index=topic_cols_all,
)
corr = corr.loc[topic_cols, topic_cols]
if rename:
corr = corr.rename(columns=rename, index=rename)
topic_cols = list(rename.values())
if mask_thresh is None:
mask_thresh = 0
if figsize is None:
figsize = (max(25, min(len(topic_cols) // 1.1, 25)), max(15, min(len(topic_cols) // 1.2, 15)))
if cmap is None:
cmap = sns.diverging_palette(220, 10, as_cmap=True)
if vmax is None:
vmax = corr.max().max()
# vmax=0.25
# vmin=-vmax
if vmin is None:
vmin = corr.min().min()
if metric is None:
metric = 'euclidean'
# metric = 'correlation'
if method is None:
# method = 'complete'
method = 'average'
# method = 'ward'
g = sns.clustermap(
corr,
center=0, annot=True, fmt=fmt, annot_kws={'fontsize': annot_fontsize},
metric=metric,
method=method,
vmin=vmin, vmax=vmax,
mask=((corr > -mask_thresh) & (corr < mask_thresh)),
cmap=cmap,
figsize=figsize,
cbar_kws={'label': '\n'.join('Pearson Correlation Coefficient'.split())},
)
g.ax_heatmap.hlines(range(1, corr.shape[0]), *g.ax_heatmap.get_xlim(), lw=0.5)
g.ax_heatmap.vlines(range(1, corr.shape[1]), *g.ax_heatmap.get_ylim(), lw=0.5)
g.ax_heatmap.set_xticklabels(g.ax_heatmap.get_xticklabels(), rotation=30, ha='right', fontsize=18)
g.ax_heatmap.set_yticklabels(g.ax_heatmap.get_yticklabels(), fontsize=18)
if savefig:
plot_string = 'topic-topic_corr_grouped'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}'
filename_out_img = f'{filename_out}.{figformat}'
filename_out_data = f'{filename_out}.csv'
# save image to disk
g.savefig(self.output_dir / filename_out_img, dpi=dpi, transparent=False, bbox_inches='tight')
# save values to csv
corr.iloc[g.dendrogram_row.reordered_ind, g.dendrogram_col.reordered_ind].to_csv(self.output_dir / filename_out_data)
plt.close('all')
else:
filename_out_img = None
plt.show()
return g, filename_out_img
def plot_topic_loading_hist(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
bins=None,
ncols: int = None,
n_words: int = 10,
nchar_title: int = None,
figsize_scale: int = None,
figsize: Tuple[int, int] = None,
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Plot histogram of document loading distributions per topic
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
if normalized:
norm_string = 'normalized'
if bins is None:
bins = np.arange(0, 1.05, 0.05)
else:
norm_string = ''
if bins is None:
bins = 10
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
if ncols is None:
ncols = 5
if ncols > len(topic_cols):
ncols = len(topic_cols)
nrows = int(np.ceil(len(topic_cols) / ncols))
if figsize_scale is None:
figsize_scale = 3
if figsize is None:
figsize = (ncols * figsize_scale, nrows * figsize_scale)
fig, axes = plt.subplots(
figsize=figsize,
nrows=nrows, ncols=ncols,
sharey=True,
sharex=True,
)
for topic_col, ax in zip(topic_cols, axes.ravel()):
_df[topic_col].plot(ax=ax, kind='hist', bins=bins)
title = split_string_nchar(topic_col, nchar=nchar_title)
ax.set_title(title)
xlabel = 'Topic Loading'
if normalized:
ax.set_xlabel(f'{xlabel}\n({norm_string})')
ax.set_xlim((0, 1))
else:
ax.set_xlabel(xlabel)
# ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:,.0f}'))
# ax.set_yticklabels([f'{int(x):,}' for x in ax.get_yticks().tolist()]);
# show xticklabels on all axes
for topic_col, ax in zip(topic_cols, axes.ravel()):
plt.setp(ax.get_xticklabels(), visible=True)
# removed unused axes
for i in range(len(topic_cols), nrows * ncols):
axes.ravel()[i].axis('off')
fig.tight_layout()
if savefig:
plot_string = 'topic_loading_hist'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
else:
norm_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, axes, filename_out
def plot_topic_loading_boxplot(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
n_words: int = 10,
ylim: Tuple[float, float] = None,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Marginal distributions of topic loadings
Plot Boxplot of document loading distributions per topic
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
fig, ax = plt.subplots(figsize=figsize)
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.1%}'))
norm_string = 'normalized'
ax.set_ylabel(f'Topic Loading ({norm_string})')
else:
norm_string = ''
ax.set_ylabel('Topic Loading (absolute)')
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
ax = sns.boxplot(ax=ax, data=_df)
ax.set_title('Topic Loading Distribution (boxplot)')
if ylim:
ax.set_ylim(ylim)
fig.autofmt_xdate()
if savefig:
plot_string = 'topic_loading_boxplot'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
else:
norm_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_topic_loading_barplot(
self,
topic_cols: List[str] = None,
rename: Dict = None,
normalized: bool = True,
n_words: int = 10,
ylim: Tuple[float, float] = None,
figsize: Tuple[int, int] = (12, 8),
savefig: bool = False,
dpi: int = 72,
figformat: str = 'png',
):
"""Marginal distributions of topic loadings
Plot Barplot of document loading distributions per topic
"""
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
if not topic_cols:
topic_cols = topic_cols_all
fig, ax = plt.subplots(figsize=figsize)
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.1%}'))
norm_string = 'normalized'
ax.set_ylabel(f'Average Topic Loading ({norm_string})')
else:
norm_string = ''
ax.set_ylabel('Average Topic Loading (absolute)')
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized),
columns=topic_cols_all,
)
_df = _df[topic_cols]
if rename:
_df = _df.rename(columns=rename)
topic_cols = list(rename.values())
ax = sns.barplot(ax=ax, data=_df, estimator=np.mean)
ax.set_title('Topic Loading Distribution (barplot; 95% CI of the mean)')
if ylim:
ax.set_ylim(ylim)
fig.autofmt_xdate()
if savefig:
plot_string = 'topic_loading_barplot'
topics_string = f'{len(topic_cols)}_topics'
if normalized:
norm_string = f'_{norm_string}'
else:
norm_string = ''
filename_out = f'{self.topic_model.corpus.name}_{plot_string}_{topics_string}{norm_string}.{figformat}'
# save image to disk
fig.savefig(self.output_dir / filename_out, dpi=dpi, transparent=False, bbox_inches='tight')
plt.close('all')
else:
filename_out = None
plt.show()
return fig, ax, filename_out
def plot_one_topic_over_time_count(
self,
topic_col: str,
rename: Dict = None,
normalized: bool = True,
thresh: float = 0.1,
freq: str = '1YS',
n_words: int = 10,
):
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
idx = topic_cols_all.index(topic_col)
addtl_cols = [self.topic_model.corpus._date_col]
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized)[:, idx],
columns=[topic_col],
)
if rename:
_df = _df.rename(columns=rename)
topic_col = list(rename.values())
_df = pd.merge(_df, self.topic_model.corpus.data_frame[addtl_cols], left_index=True, right_index=True)
_df = _df.reset_index().set_index(self.topic_model.corpus._date_col)
result = _df[_df[topic_col] >= thresh].groupby(
pd.Grouper(freq=freq))[topic_col].size()
if result.empty:
print(f"No documents >= {thresh}")
fig = None
ax = None
else:
fig, ax = plt.subplots()
result.plot(ax=ax, kind='line', marker='o')
ax.set_title(topic_col)
ylabel = f"# of year's documents >= {thresh}"
if normalized:
# ax.set_ylim((-0.05, 1.05))
ylabel = f"{ylabel}\n({norm_string})"
ax.set_ylabel(ylabel)
ax.set_xlabel("Publication year")
plt.show()
return fig, ax
def plot_one_topic_over_time_percent(
self,
topic_col: str,
rename: Dict = None,
normalized: bool = True,
thresh: float = 0.1,
freq: str = '1YS',
n_words: int = 10,
):
topic_cols_all = [' '.join(tw) for tw in self.topic_model.top_words_topics(num_words=n_words)]
idx = topic_cols_all.index(topic_col)
addtl_cols = [self.topic_model.corpus._date_col]
if normalized:
norm_string = 'normalized'
else:
norm_string = ''
_df = pd.DataFrame(
data=self.topic_model.topic_distribution_for_document(normalized=normalized)[:, idx],
columns=[topic_col],
)
if rename:
_df = _df.rename(columns=rename)
topic_col = list(rename.values())
_df = | pd.merge(_df, self.topic_model.corpus.data_frame[addtl_cols], left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 15:16:47 2017
@author: wasifaahmed
"""
from flask import Flask, flash,render_template, request, Response, redirect, url_for, send_from_directory,jsonify,session
import json as json
from datetime import datetime,timedelta,date
from sklearn.cluster import KMeans
import numpy as np
from PIL import Image
from flask.ext.sqlalchemy import SQLAlchemy
import matplotlib.image as mpimg
from io import StringIO
from skimage import data, exposure, img_as_float ,io,color
import scipy
from scipy import ndimage
import time
import tensorflow as tf
import os , sys
import shutil
import numpy as np
import pandas as pd
from PIL import Image
from model import *
from sqlalchemy.sql import text
from sqlalchemy import *
from forms import *
import math
from io import StringIO
import csv
from sqlalchemy.orm import load_only
from datetime import datetime,date
from numpy import genfromtxt
from sqlalchemy.ext.serializer import loads, dumps
from sqlalchemy.orm import sessionmaker, scoped_session
from flask_bootstrap import Bootstrap
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(graph=graph)
init_op = tf.global_variables_initializer()
pointsarray=[]
def load_model():
sess.run(init_op)
saver = tf.train.import_meta_graph('E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
#saver = tf.train.import_meta_graph('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
print('The model is loading...')
#saver.restore(sess, "/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727")
saver.restore(sess, 'E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727')
print('loaded...')
pass
engine =create_engine('postgresql://postgres:user@localhost/postgres')
Session = scoped_session(sessionmaker(bind=engine))
mysession = Session()
app = Flask(__name__)
app.config.update(
DEBUG=True,
SECRET_KEY='\<KEY>')
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:user@localhost/fras_production'
db.init_app(app)
Bootstrap(app)
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.route('/',methods=['GET', 'POST'])
def login():
form = LoginForm()
return render_template('forms/login.html', form=form)
@app.route('/home',methods=['GET', 'POST'])
def index():
return render_template('pages/home.html')
@app.route('/detail_setup/')
def Detail_Setup():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/detail_setup.html',
data=selection,
firer_1=firer_1)
@app.route('/auto_setup/')
def auto_setup():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).filter(TGroup.date==curdate).all()
return render_template('pages/auto_setup.html',
data=selection, data_2=selection_2,form=form)
@app.route('/auto_setup_1/')
def auto_setup_1():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).all()
return render_template('pages/auto_setup_1.html',
data=selection, data_2=selection_2,form=form)
@app.route('/group_gen/',methods=['GET', 'POST'])
def group_gen():
da_1=None
da_2=None
da_3=None
da_4=None
da_5=None
da_6=None
da_7=None
da_8=None
if request.method == "POST":
data = request.get_json()
group=data['data']
session['group']=group
data=TGroup.query.filter(TGroup.group_no==group).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
return jsonify(data1=da_1,
data2=da_2,
data3=da_3,
data4=da_4,
data5=da_5,
data6=da_6,
data7=da_7,
data8=da_8
)
@app.route('/detail_exitence_1/',methods=['GET', 'POST'])
def detail_exitence_1():
ra_1=None
da_1=None
detail=None
service_id_1=None
session=None
paper=None
set_no=None
cant=None
if request.method == "POST":
data = request.get_json()
detail=data['data']
dt=time.strftime("%Y-%m-%d")
data=db.session.query(Session_Detail).filter(Session_Detail.detail_no==detail).scalar()
db.session.query(TShooting).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=data.session_id,
detail_no=data.detail_no,
target_1_id=data.target_1_id,
target_2_id=data.target_2_id,
target_3_id=data.target_3_id,
target_4_id=data.target_4_id,
target_5_id=data.target_5_id,
target_6_id=data.target_6_id,
target_7_id=data.target_7_id,
target_8_id=data.target_8_id,
paper_ref=data.paper_ref,
set_no=data.set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
res=[]
ten=[]
gp_len=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==data.target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
da_1=db.session.query(Shooter.name).filter(Shooter.id==data.target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==data.target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==data.target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
session=db.session.query(TShooting.session_id).scalar()
paper=db.session.query(TShooting.paper_ref).scalar()
set_no=db.session.query(TShooting.set_no).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==data.target_1_id).scalar()
return jsonify(
data1=da_1,
ra_1=ra_1,
detail=detail,
service_id_1=service_id_1,
session=session,
paper=paper,
set_no=set_no,
cant=cant,
res=res,
ten=ten,
gp_len=gp_len
)
@app.route('/generate_ref/' ,methods=['GET', 'POST'])
def generate_ref():
g=None
if request.method == "POST":
data = request.get_json()
paper_ref =data['data']
if (paper_ref == 'New'):
g=0
else:
obj=TPaper_ref.query.scalar()
g= obj.paper_ref
return jsonify(gen=int(g))
@app.route('/create_detail_target_2/', methods=['GET', 'POST'])
def create_detail_target_2():
curdate=time.strftime("%Y-%m-%d")
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=TShooting.query.scalar()
return render_template('pages/create_detail_target_2.html',
detail_data=detail_data,
firer_1=firer_1
)
@app.route('/save_target_2/', methods=['GET', 'POST'])
def save_target_2():
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
ses=Session_Detail.query.first()
ses.target_2_id=r_id
db.session.commit()
temp =TShooting.query.first()
temp.target_2_id=r_id
db.session.commit()
return redirect(url_for('individual_score_target_2'))
@app.route('/create_detail_target_1/', methods=['GET', 'POST'])
def create_detail_target_1():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date==curdate).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/create_detail_target_1.html',
data=selection,
firer_1=firer_1
)
@app.route('/create_session/', methods=['GET', 'POST'])
def create_session():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('create_detail_target_1'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/monthly_report/',methods=['GET','POST'])
def monthly_report():
year=None
month=None
date_start=None
try:
if request.method=='POST':
month=request.form.get('comp_select')
year = datetime.now().year
if (month == 'October'):
dt_start='-10-01'
dt_end ='-10-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='January'):
dt_start='-01-01'
dt_end ='-01-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='February'):
dt_start='-02-01'
dt_end ='-02-28'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='March'):
dt_start='-03-01'
dt_end ='-03-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='April'):
dt_start='-04-01'
dt_end ='-04-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='May'):
dt_start='-05-01'
dt_end ='-05-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='June'):
dt_start='-06-01'
dt_end ='-06-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='July'):
dt_start='-07-01'
dt_end ='-07-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='August'):
dt_start='-08-01'
dt_end ='-08-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='September'):
dt_start='-09-01'
dt_end ='-09-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='November'):
dt_start='-11-01'
dt_end ='-11-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
else:
dt_start='-12-01'
dt_end ='-12-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
return render_template('pages/monthly_report.html', dat1=dat1 ,month=month)
except Exception as e:
return render_template('errors/month_session.html')
return render_template('pages/monthly_report.html')
@app.route('/save_target_1/', methods=['GET', 'POST'])
def save_target_1():
ref_1=None
try:
if request.method == 'POST':
detail_no = request.form['game_id_1']
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r2_id=999
r3_id=999
r4_id=999
r5_id=999
r6_id=999
r7_id=999
r8_id=999
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
ref_1 = None
paper=db.session.query(TPaper_ref).scalar()
if(ref == ""):
ref_1=paper.paper_ref
else:
ref_1=ref
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).delete()
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_target_1'))
return redirect(url_for('individual_score_target_1'))
@app.route('/FRAS/', methods=['GET', 'POST'])
def load ():
try:
ref_1=None
if request.method == 'POST':
detail_no = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
print("Inside ref _4 else")
ref_1=ref
print(ref_1)
print("Inside ref _4 else 1")
if(int(set_no)>5):
print("Inside ref _5 else")
return redirect(url_for('paper_duplicate_error'))
else:
print("Inside TPaper_ref")
db.session.query(TPaper_ref).delete()
print("Inside TPaper_ref")
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
print("Inside load 3")
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
print("temp1")
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
print("temp")
temp=db.session.query(TShooting.save_flag).scalar()
print(temp)
if(temp is None):
print("Inside the temp if")
print(sess)
print(detail_no)
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
print(Tdetail_shots)
print("Tdetail_shots")
db.session.add(Tdetail_shots)
db.session.commit()
print(""
)
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error_2'))
return redirect(url_for('image_process'))
@app.route('/FRAS_1/', methods=['GET', 'POST'])
def load_1 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_102'))
return redirect(url_for('detail_view'))
@app.route('/FRAS_2/', methods=['GET', 'POST'])
def load_2 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error'))
return redirect(url_for('image_process'))
@app.route('/detail_view/', methods=['GET', 'POST'])
def detail_view():
detail = Session_Detail.query.all()
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view.html',detail=detail)
@app.route('/detail_view/detail/<id>', methods=['GET', 'POST'])
def view_detail(id):
detail=Session_Detail.query.filter(Session_Detail.id == id)
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view_id.html',data=detail)
@app.route('/detail_view/edit/<id>', methods=['GET', 'POST'])
def view_detail_edit(id):
try:
detail=Session_Detail.query.filter(Session_Detail.id == id).first()
form=DetailEditForm(obj=detail)
if form.validate_on_submit():
tmp_list = []
target_1=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
tmp_list.append(target_1.id)
target_2=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
tmp_list.append(target_2.id)
target_3=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
tmp_list.append(target_3.id)
target_4=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
tmp_list.append(target_4.id)
target_5=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
tmp_list.append(target_5.id)
target_6=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
tmp_list.append(target_6.id)
target_7=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
tmp_list.append(target_7.id)
target_8=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
tmp_list.append(target_8.id)
duplicate = False
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
detail.date=form.date.data
detail.session_id=form.session_id.data
detail.detail_no=form.detail_no.data
detail.paper_ref=form.paper_ref.data
detail.set_no=form.set_no.data
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
detail.target_1_id=target_1_obj.id
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
detail.target_2_id=target_2_obj.id
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
detail.target_3_id=target_3_obj.id
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
detail.target_4_id=target_4_obj.id
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
detail.target_5_id=target_5_obj.id
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
detail.target_6_id=target_6_obj.id
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
detail.target_7_id=target_7_obj.id
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
detail.target_8_id=target_8_obj.id
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_edit = TPaper_ref(
paper_ref=form.paper_ref.data,
detail_no=form.detail_no.data,
session_no=form.session_id.data
)
db.session.add(ref_edit)
db.session.commit()
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting.save_flag==1):
return redirect(url_for('data_save'))
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_edit =TShooting(
date=form.date.data,
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=form.session_id.data,
detail_no=form.detail_no.data,
target_1_id=target_1_obj.id,
target_2_id=target_2_obj.id,
target_3_id=target_3_obj.id,
target_4_id=target_4_obj.id,
target_5_id=target_5_obj.id,
target_6_id=target_6_obj.id,
target_7_id=target_7_obj.id,
target_8_id=target_8_obj.id,
paper_ref=form.paper_ref.data,
set_no=form.set_no.data,
save_flag=0
)
db.session.add(Tdetail_edit)
db.session.commit()
return redirect(url_for('detail_view'))
form.date.data=detail.date
form.session_id.data=detail.session_id
form.detail_no.data=detail.detail_no
form.paper_ref.data=detail.paper_ref
form.set_no.data=detail.set_no
name_1= Shooter.query.filter(Shooter.id==detail.target_1_id).scalar()
form.target_1_service.data=data=name_1.service_id
name_2= Shooter.query.filter(Shooter.id==detail.target_2_id).scalar()
form.target_2_service.data=data=name_2.service_id
name_3= Shooter.query.filter(Shooter.id==detail.target_3_id).scalar()
form.target_3_service.data=data=name_3.service_id
name_4= Shooter.query.filter(Shooter.id==detail.target_4_id).scalar()
form.target_4_service.data=data=name_4.service_id
name_5=Shooter.query.filter(Shooter.id==detail.target_5_id).scalar()
form.target_5_service.data=data=name_5.service_id
name_6=Shooter.query.filter(Shooter.id==detail.target_6_id).scalar()
form.target_6_service.data=data=name_6.service_id
name_7=Shooter.query.filter(Shooter.id==detail.target_7_id).scalar()
form.target_7_service.data=data=name_7.service_id
name_8=Shooter.query.filter(Shooter.id==detail.target_8_id).scalar()
form.target_8_service.data=data=name_8.service_id
except Exception as e:
return render_template('errors/detail_view.html')
return render_template('pages/detail_view_edit.html' , detail=detail,form=form)
@app.route('/data_save', methods=['GET', 'POST'])
def data_save():
return render_template('pages/data_save.html')
@app.route('/target_registration/', methods=['GET', 'POST'])
def target_registration():
result=None
if request.method=="POST":
data1 = request.get_json()
print(data1)
cant=data1['cant']
div=data1['div']
rank=data1['rank']
gen=data1['gender']
dt=data1['date']
name=data1['name']
army_no=data1['service']
unit=data1['unit']
brigade=data1['brig']
gender_id=db.session.query(Gender.id).filter(Gender.name==gen).scalar()
rank_id=db.session.query(Rank.id).filter(Rank.name==rank).scalar()
cant_id=db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant ,Cantonment.division==div).scalar()
print("cant_id")
print(cant_id)
shooter = Shooter(
name=name,
service_id = army_no,
registration_date = dt,
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=unit,
brigade=brigade
)
db.session.add(shooter)
db.session.commit()
result="Data Saved Sucessfully"
return jsonify(result=result)
@app.route('/shooter_registration/', methods=['GET', 'POST'])
def registration():
try:
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
gender =Gender.query.all()
rank = Rank.query.all()
ran = request.form.get('comp_select4')
cant = request.form.get('comp_select')
gen = request.form.get('comp_select5')
brig = request.form.get('comp_select1')
form = RegistrationForm(request.form)
if(ran is None):
pass
else:
ran_object=Rank.query.filter(Rank.name==ran).scalar()
rank_id = ran_object.id
cant_object = Cantonment.query.filter(Cantonment.cantonment==cant,Cantonment.division==brig).scalar()
cant_id = cant_object.id
gen_obj=Gender.query.filter(Gender.name==gen).scalar()
gender_id = gen_obj.id
if form.validate_on_submit():
shooter = Shooter(
name=form.name.data,
service_id = form.service_id.data,
registration_date = form.dt.data.strftime('%Y-%m-%d'),
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=form.unit.data,
brigade=form.brig.data
)
db.session.add(shooter)
db.session.commit()
new_form = RegistrationForm(request.form)
return redirect(url_for('firer_details'))
except Exception as e:
return redirect(url_for('error_4'))
return render_template('forms/registration.html',
cantonment = cantonment ,
form=form ,
rank = rank,
gender=gender)
@app.route('/get_brigade/')
def get_brigade():
cant = request.args.get('customer')
da = da = Cantonment.query.filter(Cantonment.cantonment==cant).distinct(Cantonment.division)
data = [{"name": x.division} for x in da]
return jsonify(data)
@app.route('/firer_details/', methods=['GET', 'POST'])
def firer_details():
firer = Shooter.query.all()
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_details.html' , firer = firer)
@app.route('/bulk_registration_group')
def bulk_registration_group():
form=BulkRegistrationForm(request.form)
return render_template('pages/bulk_registration_group.html',form=form)
@app.route('/bulk_registration')
def bulk_registration():
cantonment=db.session.query(Cantonment).distinct(Cantonment.cantonment)
form=RegistrationForm(request.form)
return render_template('pages/bulk_registration.html',cantonment=cantonment,form=form)
@app.route('/upload', methods=['POST'])
def upload():
try:
f = request.files['data_file']
cant = request.form.get('comp_select')
div = request.form.get('comp_select1')
form=RegistrationForm(request.form)
unit = request.form['game_id_1']
brig = request.form['game_id_2']
cant_id = db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant,
Cantonment.division==div
).scalar()
if form.is_submitted():
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
shooters = Shooter(
name = lis[i][0],
service_id=lis[i][3],
registration_date=datetime.now(),
gender_id=db.session.query(Gender.id).filter(Gender.name==lis[i][2]).scalar(),
cantonment_id = cant_id,
rank_id = db.session.query(Rank.id).filter(Rank.name==lis[i][1]).scalar(),
unit=unit,
brigade=brig
)
db.session.add(shooters)
db.session.commit()
except Exception as e:
return redirect(url_for('error_3'))
return redirect(url_for('firer_details'))
@app.route('/uploadgroup', methods=['POST'])
def uploadgroup():
try:
f = request.files['data_file']
form=BulkRegistrationForm(request.form)
if form.is_submitted():
curdate_p=(date.today())- timedelta(1)
if(db.session.query(db.exists().where(TGroup.date <= curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
else:
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
except Exception as e:
return redirect(url_for('error_duplicate'))
return redirect(url_for('group_view'))
@app.route('/new_group')
def new_group():
firer = [row.service_id for row in Shooter.query.all()]
return render_template('pages/new_group.html',firer_1=firer)
@app.route('/individual_group/', methods=['GET', 'POST'])
def individual_group():
try:
curdate_p=(date.today())- timedelta(1)
#check=mysession.query(TGroup).filter(date==curdate_p).all()
if request.method=="POST":
grp = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(db.session.query(db.exists().where(TGroup.date == curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
else:
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
except Exception as e:
return render_template('errors/group_view_error.html')
return redirect(url_for('group_view'))
@app.route('/group_view/', methods=['GET', 'POST'])
def group_view():
detail = TGroup.query.all()
return render_template('pages/group_detail_view.html',detail=detail)
@app.route('/group_view/detail/<id>', methods=['GET', 'POST'])
def group_detail_view(id):
view = TGroup.query.filter(TGroup.group_no == id)
return render_template('pages/group_detail_view_id.html' , data = view)
@app.route('/group_details/edit/<id>', methods=['GET', 'POST'])
def group_detail_edit(id):
firer = TGroup.query.filter(TGroup.group_no == id).first()
form=GroupEditForm(obj=firer)
if form.validate_on_submit():
firer.date=form.date.data
firer.target_1_no=form.target_1_army.data
firer.target_2_no=form.target_2_army.data
firer.target_3_no=form.target_3_army.data
firer.target_4_no=form.target_4_army.data
firer.target_5_no=form.target_5_army.data
firer.target_6_no=form.target_6_army.data
firer.target_7_no=form.target_7_army.data
firer.target_8_no=form.target_8_army.data
firer.group_no=form.group_no.data
db.session.commit()
return redirect(url_for('group_view'))
form.group_no.data=firer.group_no
form.target_1_army.data=firer.target_1_no
form.target_2_army.data=firer.target_2_no
form.target_3_army.data=firer.target_3_no
form.target_4_army.data=firer.target_4_no
form.target_5_army.data=firer.target_5_no
form.target_6_army.data=firer.target_6_no
form.target_7_army.data=firer.target_7_no
form.target_8_army.data=firer.target_8_no
return render_template('pages/group_edit.html' , firer = firer , form=form)
@app.route('/firer_details/detail/<id>', methods=['GET', 'POST'])
def firer_detail_view(id):
firer = Shooter.query.filter(Shooter.service_id == id)
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_detail_view.html' , data = firer)
@app.route('/firer_details/edit/<id>', methods=['GET', 'POST'])
def firer_detail_edit(id):
firer = Shooter.query.filter(Shooter.service_id == id).first()
form=RegistrationEditForm(obj=firer)
try:
if form.validate_on_submit():
firer.name = form.name.data
firer.service_id=form.service_id.data
firer.registration_date=form.date.data
gender_obj=Gender.query.filter(Gender.name==form.gender.data).scalar()
firer.gender_id=gender_obj.id
cantonment_obj=Cantonment.query.filter(Cantonment.cantonment==form.cantonment.data ,Cantonment.division==form.div.data).scalar()
firer.cantonment_id=cantonment_obj.id
rank_obj=Range.query.filter(Rank.name==form.rank.data).distinct(Rank.id).scalar()
firer.rank_id=rank_obj.id
firer.unit=form.unit.data
firer.brigade=form.brigade.data
db.session.commit()
return redirect(url_for('firer_details'))
form.name.data=firer.name
form.service_id.data=firer.service_id
form.date.data=firer.registration_date
gender_name=Gender.query.filter(Gender.id==firer.gender_id).scalar()
form.gender.data=gender_name.name
cantonment_name=Cantonment.query.filter(Cantonment.id==firer.cantonment_id).scalar()
form.cantonment.data=cantonment_name.cantonment
form.div.data=cantonment_name.division
unit_data=Shooter.query.filter(Shooter.service_id==firer.service_id).scalar()
form.unit.data=unit_data.unit
form.brigade.data=unit_data.brigade
rank_name=Rank.query.filter(Rank.id==firer.rank_id).distinct(Rank.name).scalar()
form.rank.data=rank_name.name
except Exception as e:
return redirect(url_for('error_7'))
return render_template('pages/firer_detail_edit.html' , firer = firer , form=form)
@app.route('/live/')
def live():
T1_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_rank = mysession.query(Rank.name).filter(Rank.id==T1_r_id).scalar()
T2_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_rank = mysession.query(Rank.name).filter(Rank.id==T2_r_id).scalar()
T3_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_rank = mysession.query(Rank.name).filter(Rank.id==T3_r_id).scalar()
T4_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_rank = mysession.query(Rank.name).filter(Rank.id==T4_r_id).scalar()
T5_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_rank = mysession.query(Rank.name).filter(Rank.id==T5_r_id).scalar()
T6_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_rank = mysession.query(Rank.name).filter(Rank.id==T6_r_id).scalar()
T7_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_rank = mysession.query(Rank.name).filter(Rank.id==T7_r_id).scalar()
T8_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_rank = mysession.query(Rank.name).filter(Rank.id==T8_r_id).scalar()
return render_template('pages/live.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/cam_detail_2/', methods=['GET', 'POST'])
def cam_detail_2():
return render_template('pages/cam_detail_1.html')
@app.route('/cam_detail_4/', methods=['GET', 'POST'])
def cam_detail_4():
return render_template('pages/cam_detail_2.html')
@app.route('/cam_detail_1/', methods=['GET', 'POST'])
def cam_detail_1():
return render_template('pages/cam_detail_3.html')
@app.route('/cam_detail_3/', methods=['GET', 'POST'])
def cam_detail_3():
return render_template('pages/cam_detail_4.html')
@app.route('/cam_detail_6/', methods=['GET', 'POST'])
def cam_detail_6():
return render_template('pages/cam_detail_5.html')
@app.route('/cam_detail_8/', methods=['GET', 'POST'])
def cam_detail_8():
return render_template('pages/cam_detail_6.html')
@app.route('/cam_detail_7/', methods=['GET', 'POST'])
def cam_detail_7():
return render_template('pages/cam_detail_7.html')
@app.route('/cam_detail_5/', methods=['GET', 'POST'])
def cam_detail_5():
return render_template('pages/cam_detail_8.html')
@app.route('/session_setup/', methods=['GET', 'POST'])
def session_setup():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('session_config'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/configuration/', methods=['GET', 'POST'])
def session_config():
config = Shooting_Session.query.all()
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail.html',con=config)
@app.route('/image_process/')
def image_process():
dt=time.strftime("%Y-%m-%d")
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
data =TShooting.query.scalar()
if(data is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
elif(data.save_flag == 1 ):
db.session.query(TShooting).delete()
db.session.commit()
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
if(T1 is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
else:
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
if(T2 is None):
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
else:
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id,TShooting.target_3_id!=999).scalar()
if(T3 is None):
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
else:
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id,TShooting.target_4_id!=999).scalar()
if(T4 is None):
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
else:
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
if(T5 is None):
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
else:
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
if(T6 is None):
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
else:
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
if(T7 is None):
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
else:
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
if(T8 is None):
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/image_process.html' ,
T1_name=T1_name,
detail_data=detail_data,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/image_edit_1/', methods=['GET', 'POST'])
def image_edit_1():
return render_template('pages/image_edit_1.html')
@app.route('/image_edit_2/', methods=['GET', 'POST'])
def image_edit_2():
return render_template('pages/image_edit_2.html')
@app.route('/image_edit_3/', methods=['GET', 'POST'])
def image_edit_3():
return render_template('pages/image_edit_3.html')
@app.route('/image_edit_4/', methods=['GET', 'POST'])
def image_edit_4():
return render_template('pages/image_edit_4.html')
@app.route('/image_edit_5/', methods=['GET', 'POST'])
def image_edit_5():
return render_template('pages/image_edit_5.html')
@app.route('/image_edit_6/', methods=['GET', 'POST'])
def image_edit_6():
return render_template('pages/image_edit_6.html')
@app.route('/image_edit_7/', methods=['GET', 'POST'])
def image_edit_7():
return render_template('pages/image_edit_7.html')
@app.route('/image_edit_8/', methods=['GET', 'POST'])
def image_edit_8():
return render_template('pages/image_edit_8.html')
@app.route('/configuration/detail/<id>', methods=['GET', 'POST'])
def session_config_detail(id):
config = Shooting_Session.query.filter(Shooting_Session.id == id)
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail_view.html',con=config)
@app.route('/configuration/edit/<id>', methods=['GET', 'POST'])
def shooting_config_edit(id):
edit = Shooting_Session.query.get_or_404(id)
form = SessionEditForm(obj=edit)
if form.validate_on_submit():
edit.session_no = form.session_no.data
edit.date = form.date.data
edit.occasion=form.occ.data
edit.target_distance = form.target_distance.data
ammunation_id=Ammunation.query.filter(Ammunation.name==form.ammunation_name.data).scalar()
edit.ammunation_id=ammunation_id.id
firearms_id=Firearms.query.filter(Firearms.name==form.firerarms_name.data).scalar()
edit.firearms_id=firearms_id.id
range_id=Range.query.filter(Range.name==form.range_name.data).scalar()
edit.shooting_range_id=range_id.id
edit.weather_notes=form.weather_notes.data
edit.comments=form.comments.data
db.session.commit()
return redirect(url_for('session_config'))
form.session_no.data=edit.session_no
form.date.data=edit.date
form.occ.data=edit.occasion
ammunation_name=Ammunation.query.filter(Ammunation.id==edit.ammunation_id).scalar()
form.ammunation_name.data=ammunation_name.name
firerarms_name=Firearms.query.filter(Firearms.id==edit.firearms_id).scalar()
form.firerarms_name.data=firerarms_name.name
range_name=Range.query.filter(Range.id==edit.shooting_range_id).scalar()
form.range_name.data=range_name.name
form.weather_notes.data=edit.weather_notes
form.comments.data=edit.comments
return render_template('pages/shooting_configuration_edit.html',form=form,edit=edit)
@app.route('/detail_dashboard/')
def detail_dashboard():
tshoot=db.session.query(TShooting).scalar()
if(tshoot is None):
T1_name = "NA"
T1_service="NA"
T1_rank ="NA"
T2_name = "NA"
T2_service="NA"
T2_rank ="NA"
T3_name = "NA"
T3_service="NA"
T3_rank ="NA"
T4_name = "NA"
T4_service="NA"
T4_rank ="NA"
T5_name = "NA"
T5_service="NA"
T5_rank ="NA"
T6_name = "NA"
T6_service="NA"
T6_rank ="NA"
T7_name = "NA"
T7_service="NA"
T7_rank ="NA"
T8_name = "NA"
T8_service="NA"
T8_rank ="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id).scalar()
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id).scalar()
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/detail_dashboard.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/adhoc_detail_1/', methods=['GET', 'POST'])
def adhoc_detail_1():
name_1=None
army=None
rank=None
cant=None
set_1_name=None
set_1_army=None
set_2_name=None
set_2_army=None
set_3_name=None
set_3_army=None
set_4_name=None
set_4_army=None
res=[]
ten=[]
gp_len=[]
if request.method == "POST":
data1 = request.get_json()
army=data1['usr']
curdate=time.strftime("%Y-%m-%d")
name_1=db.session.query(Shooter.name).filter(Shooter.service_id==army).scalar()
target_1_id=db.session.query(Shooter.id).filter(Shooter.service_id==army).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.service_id==army).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.service_id==army).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
set_1_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(Shooter.id==set_1_id).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==2,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(Shooter.id==set_2_id).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==3,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(Shooter.id==set_3_id).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==4,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(Shooter.id==set_4_id).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
return jsonify(name_1=name_1,army=army,rank=rank,cant=cant,
set_1_name=set_1_name,
set_2_name=set_2_name,
set_3_name=set_3_name,
set_4_name=set_4_name,
set_1_army=set_1_army,
set_2_army=set_2_army,
set_3_army=set_3_army,
set_4_army=set_4_army,
gp_len=gp_len,
res=res,
ten=ten
)
@app.route('/individual_score/target_1', methods=['GET', 'POST'])
def individual_score_target_1():
session.clear()
data=TShooting.query.scalar()
firing_set_arr=[]
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
gender =Gender.query.all()
rank_s = Rank.query.all()
firing_set=db.session.query(Firer_Details.set_no).filter(Firer_Details.target_no==1).distinct().all()
for ele in firing_set:
for ele2 in ele:
firing_set_arr.append(ele2)
if(len(firing_set_arr)<1):
pass
else:
i=len(firing_set_arr)-1
if(firing_set_arr[i]==5):
db.session.query(Firer_Details).filter(Firer_Details.target_no==1).delete()
db.session.commit()
else:
pass
dt=time.strftime("%Y-%m-%d")
curdatetime=datetime.now()
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
name = "NA"
detail_no ="NA"
rank ="NA"
target_no = 1
service_id ="NA"
ten = []
res = []
selection=Shooting_Session.query.filter(Shooting_Session.date>=dt).order_by(Shooting_Session.datetimestamp.desc()).all()
firearms = Firearms.query.all()
rang= Range.query.all()
ammunation = Ammunation.query.all()
return render_template('pages/prediction_target_1.html',
curdatetime=curdatetime,
name = name,
firer_1=firer_1,
rank=rank,
detail_data=detail_data,
detail_no=detail_no,
target_no=target_no,
service_id=service_id,
firearms=firearms,
ammunation=ammunation,
data=selection,
rang=rang,
res=res,
date=dt,
ten=ten,
cantonment=cantonment,
gender=gender,
rank_s=rank_s)
@app.route('/session_target_1/', methods=['GET', 'POST'])
def session_target_1():
if request.method == "POST":
data1 = request.get_json()
session=data1["session"]
ran=data1["range"]
arms=data1["arms"]
distance=data1["dis"]
occ=data1["occ"]
ammu=data1["ammu"]
weather=data1["weather"]
comment=data1["comment"]
range_id=db.session.query(Range.id).filter(Range.name==ran).scalar()
arms_id=db.session.query(Firearms.id).filter(Firearms.name==arms).scalar()
ammu_id=db.session.query(Ammunation.id).filter(Ammunation.name==ammu).scalar()
shooting=Shooting_Session(
date=time.strftime("%Y-%m-%d"),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=arms_id,
ammunation_id=ammu_id,
target_distance=distance,
weather_notes =weather,
comments =comment,
session_no=session,
occasion=occ
)
db.session.add(shooting)
db.session.commit()
result="This is Successfully Saved"
return jsonify(result=result ,session=session)
@app.route('/target_1_populate/', methods=['GET', 'POST'])
def target_1_populate():
if request.method == 'POST':
session_id=db.session.query(TShooting.session_id).scalar()
return jsonify(session_id=session_id)
@app.route('/load_detail_1/', methods=['GET', 'POST'])
def load_detail_1():
result_1="Done"
if request.method == 'POST':
curdate=time.strftime("%Y-%m-%d")
r8=None
data=request.get_json()
tmp_list = []
duplicate = False
detail =data["detail"]
sess=data["session"]
paper=data["paper"]
shot=data["shot"]
set=data["set"]
if(data["r1"]==""):
r1_id=999
else:
r1=data["r1"]
r1_id=db.session.query(Shooter.id).filter(Shooter.service_id==r1).scalar()
if(data["r2"]==""):
r2_id=999
else:
r2=data["r2"]
r2_id=db.session.query(Shooter.id).filter(Shooter.service_id==r2).scalar()
if(data["r3"]==""):
r3_id=999
else:
r3=data["r3"]
r3_id=db.session.query(Shooter.id).filter(Shooter.service_id==r3).scalar()
if(data["r4"]==""):
r4_id=999
else:
r4=data["r4"]
r4_id=db.session.query(Shooter.id).filter(Shooter.service_id==r4).scalar()
if(data["r5"]==""):
r5_id=999
else:
r5=data["r5"]
r5_id=db.session.query(Shooter.id).filter(Shooter.service_id==r5).scalar()
if(data["r6"]==""):
r6_id=999
else:
r6=data["r6"]
r6_id=db.session.query(Shooter.id).filter(Shooter.service_id==r6).scalar()
if(data["r7"]==""):
r7_id=999
else:
r7=data["r7"]
r7_id=db.session.query(Shooter.id).filter(Shooter.service_id==r7).scalar()
if(data["r8"]==""):
r8_id=999
else:
r8=data["r8"]
r8_id=db.session.query(Shooter.id).filter(Shooter.service_id==r8).scalar()
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
tmp_list.append(r8_id)
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
date=time.strftime("%Y-%m-%d"),
paper_ref=paper,
detail_no=detail,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(i!=j and tmp_list[i]==tmp_list[j]):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
else:
duplicate = True
else:
duplicate = False
if(duplicate):
print("inside dup")
error="dup"
else:
db.session.query(TShooting).delete()
db.session.commit()
tshoot=TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(tshoot)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
error="ok"
firer_name,cant,rank,service_id,res,tenden,gp_len,set_4_name,set_4_army,set_4_session_no,set_4_detail_no,set_3_name,set_3_army,set_3_session_no,set_3_detail_no,set_2_name,set_2_army,set_2_session_no,set_2_detail_no,set_1_name,set_1_army,set_1_session_no,set_1_detail_no,current_firer_name,current_army_no,current_session_no,current_detail_no=get_information(r1_id,sess,paper)
result="The Detail is Saved Successfully"
return jsonify(result=result,data1=firer_name,ra_1=rank,detail=detail,
service_id_1=service_id,
session=sess,
paper=paper,
set_no=set,
cant=cant,
gp_len=gp_len,
res=res,
ten=tenden,
set_4_name=set_4_name,
set_3_name=set_3_name,
set_2_name=set_2_name,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_4_army=set_4_army,
set_3_army=set_3_army,
set_2_army=set_2_army,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_4_session_no=set_4_session_no,
set_3_session_no=set_3_session_no,
set_2_session_no=set_2_session_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_4_detail_no=set_4_detail_no,
set_3_detail_no=set_3_detail_no,
set_2_detail_no=set_2_detail_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no
)
return jsonify(result_1=result_1)
def get_information(target_1_id,sess,paper_ref):
res=[]
ten=[]
gp_len=[]
curdate=time.strftime("%Y-%m-%d")
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(int(ele6))
da_1=db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==target_1_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==target_1_id).scalar()
return(da_1,cant,ra_1,service_id_1,res,ten,gp_len,
set_4_name,set_4_army,set_4_session_no,set_4_detail_no,
set_3_name,set_3_army,set_3_session_no,set_3_detail_no,
set_2_name,set_2_army,set_2_session_no,set_2_detail_no,
set_1_name,set_1_army,set_1_session_no,set_1_detail_no,
current_firer_name,current_army_no,current_session_no,current_detail_no
)
@app.route('/individual_score/target_2', methods=['GET', 'POST'])
def individual_score_target_2():
firer_id =db.session.query(TShooting.target_2_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 2
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres,)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
if request.method == 'POST':
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print("paper_ref")
print(paper_ref)
return render_template('pages/prediction_target_2.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_3', methods=['GET', 'POST'])
def individual_score_target_3():
firer_id =db.session.query(TShooting.target_3_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 3
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_3.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_4', methods=['GET', 'POST'])
def individual_score_target_4():
firer_id =db.session.query(TShooting.target_4_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 4
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_4.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_5', methods=['GET', 'POST'])
def individual_score_target_5():
firer_id =db.session.query(TShooting.target_5_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 5
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_5.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_6', methods=['GET', 'POST'])
def individual_score_target_6():
firer_id =db.session.query(TShooting.target_6_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 6
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_6.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_7', methods=['GET', 'POST'])
def individual_score_target_7():
firer_id =db.session.query(TShooting.target_7_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_7.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_8', methods=['GET', 'POST'])
def individual_score_target_8():
firer_id =db.session.query(TShooting.target_8_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_8.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/prediction_target_1/', methods=['GET', 'POST'])
def prediction_target_1():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,detail,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_1()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 ,Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
set_2_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
set_3_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
print(set_3_x_arr)
set_4_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
set_4_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
print("set_2_detail_no")
print(set_2_detail_no)
print(set_2_detail_no)
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_2/', methods=['GET', 'POST'])
def prediction_target_2():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j = | pd.Series(Tfirt_x) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
import argparse
import sys
parser=argparse.ArgumentParser(
description=''' ''')
__file__ = "add_metadata.py"
__author__ = '<NAME> (<EMAIL>)'
__version__ = '0.1'
__date__ = 'March 2nd, 2021'
parser.add_argument('inputDirectory',
help='Full path to the input directory where all files are')
parser.add_argument('metadata')
# Execute parse_args()
args = parser.parse_args()
inputDirectory = sys.argv[0]
metadata = sys.argv[1]
###############################################################################
import os
import pandas as pd
# From the script location, find results directory
script_location = sys.path[0]
out_dir = os.path.dirname(os.path.dirname(script_location))
out_dir = os.path.join(out_dir, "results/Annotation_results")
curdir = out_dir
os.chdir(curdir)
print("Input directory: " + curdir)
entries = list()
for (dirpath, dirnames, filenames) in os.walk(curdir):
entries += [os.path.join(dirpath, file) for file in filenames]
# list tblout files
anno_files =[]
for filename in entries:
if "counts" in filename or "PA" in filename:
anno_files.append(filename)
metadata = pd.read_csv(os.path.join(script_location, "data/metadata.csv"),
names =["Genome","Metadata_field"], index_col=None)
for file in anno_files:
filename = os.path.basename(file)
name = filename.replace(".csv", "") + "_metadata.csv"
df = | pd.read_csv(file) | pandas.read_csv |
def Moder_merger(params : dict):
def Solo_M1mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 72.081324
mz_Cl = 34.968853 + mz - 72.081324
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 44.997654
mz_Cl = 34.968853 + mz - 44.997654
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
mz_Cl = 34.968853 + mz - 66.979600
mz_m2HpNa = 20.97412 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M1m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M1m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 36.948058
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 72.081324)/2
mz_Cl = 34.968853 + (mz - 72.081324)/2
mz_m2HpNa = 20.97412 + (mz - 72.081324)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 44.997654)/2
mz_Cl = 34.968853 + (mz - 44.997654)/2
mz_m2HpNa = 20.97412 + (mz - 44.997654)/2
mz_mHpHCOOH = 44.997654 + (mz - 44.997654)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_mHpHCOOH = peaks.between(mz_mHpHCOOH - prec_mass_error, mz_mHpHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_mHpHCOOH])
def Solo_M2mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/2
mz_Cl = 34.968853 + (mz + 1.007825)/2
mz_m2HpNa = 20.97412 + (mz + 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/2
mz_Cl = 34.968853 + (mz - 34.968853)/2
mz_m2HpNa = 20.97412 + (mz - 34.968853)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/2
mz_Cl = 34.968853 + (mz - 66.979600)/2
mz_m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/2
mz_Cl = 34.968853 + (mz - 20.97412)/2
mz_m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 36.948058)/2
mz_Cl = 34.968853 + (mz - 36.948058)/2
mz_m2HpNa = 20.97412 + (mz - 36.948058)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 36.948058)/2
mz_m2HpK = 36.948058 + (mz - 36.948058)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK])
def Solo_M3mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/3
mz_Cl = 34.968853 + (mz + 1.007825)/3
mz_m2HpNa = 20.97412 + (mz + 1.007825)/3
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/3
mz_m2HpK = 36.948058 + (mz + 1.007825)/3
mz_M2mH = -1.007825 + (mz + 1.007825)*(2/3)
mz_M2pCl = 34.968853 + (mz + 1.007825)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(2/3)
mz_M2m2HpK = 36.948058 + (mz + 1.007825)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/3
mz_Cl = 34.968853 + (mz - 34.968853)/3
mz_m2HpNa = 20.97412 + (mz - 34.968853)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/3
mz_m2HpK = 36.948058 + (mz - 34.968853)/3
mz_M2mH = -1.007825 + (mz - 34.968853)*(2/3)
mz_M2pCl = 34.968853 + (mz - 34.968853)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 34.968853)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/3
mz_Cl = 34.968853 + (mz - 66.979600)/3
mz_m2HpNa = 20.97412 + (mz - 66.979600)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/3
mz_m2HpK = 36.948058 + (mz - 66.979600)/3
mz_M2mH = -1.007825 + (mz - 66.979600)*(2/3)
mz_M2pCl = 34.968853 + (mz - 66.979600)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 66.979600)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/3
mz_Cl = 34.968853 + (mz - 20.97412)/3
mz_m2HpNa = 20.97412 + (mz - 20.97412)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/3
mz_m2HpK = 36.948058 + (mz - 20.97412)/3
mz_M2mH = -1.007825 + (mz - 20.97412)*(2/3)
mz_M2pCl = 34.968853 + (mz - 20.97412)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 20.97412)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M4mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/4
mz_Cl = 34.968853 + (mz + 1.007825)/4
mz_m2HpNa = 20.97412 + (mz + 1.007825)/4
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/4
mz_m2HpK = 36.948058 + (mz + 1.007825)/4
mz_M2mH = -1.007825 + (mz + 1.007825)/2
mz_M2pCl = 34.968853 + (mz + 1.007825)/2
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/2
mz_M2m2HpK = 36.948058 + (mz + 1.007825)/2
mz_M3mH = -1.007825 + (mz + 1.007825)*(3/4)
mz_M3pCl = 34.968853 + (mz + 1.007825)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz + 1.007825)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(3/4)
mz_M3m2HpK = 36.948058 + (mz + 1.007825)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/4
mz_Cl = 34.968853 + (mz - 34.968853)/4
mz_m2HpNa = 20.97412 + (mz - 34.968853)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/4
mz_m2HpK = 36.948058 + (mz - 34.968853)/4
mz_M2mH = -1.007825 + (mz - 34.968853)/2
mz_M2pCl = 34.968853 + (mz - 34.968853)/2
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/2
mz_M2m2HpK = 36.948058 + (mz - 34.968853)/2
mz_M3mH = -1.007825 + (mz - 34.968853)*(3/4)
mz_M3pCl = 34.968853 + (mz - 34.968853)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 34.968853)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 34.968853)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/4
mz_Cl = 34.968853 + (mz - 66.979600)/4
mz_m2HpNa = 20.97412 + (mz - 66.979600)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/4
mz_m2HpK = 36.948058 + (mz - 66.979600)/4
mz_M2mH = -1.007825 + (mz - 66.979600)/2
mz_M2pCl = 34.968853 + (mz - 66.979600)/2
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
mz_M2m2HpK = 36.948058 + (mz - 66.979600)/2
mz_M3mH = -1.007825 + (mz - 66.979600)*(3/4)
mz_M3pCl = 34.968853 + (mz - 66.979600)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 66.979600)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 66.979600)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/4
mz_Cl = 34.968853 + (mz - 20.97412)/4
mz_m2HpNa = 20.97412 + (mz - 20.97412)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/4
mz_m2HpK = 36.948058 + (mz - 20.97412)/4
mz_M2mH = -1.007825 + (mz - 20.97412)/2
mz_M2pCl = 34.968853 + (mz - 20.97412)/2
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
mz_M2m2HpK = 36.948058 + (mz - 20.97412)/2
mz_M3mH = -1.007825 + (mz - 20.97412)*(3/4)
mz_M3pCl = 34.968853 + (mz - 20.97412)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 20.97412)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 20.97412)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M2pH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2pHpCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 42.034374)/2
mz_Na = 22.98977 + (mz - 42.034374)/2
mz_K = 38.963708 + (mz - 42.034374)/2
mz_HpCH3CN = 42.034374 + (mz - 42.034374)/2
mz_HpCH3OH = 33.034040 + (mz - 42.034374)/2
mz_NapCH3CN = 64.016319 + (mz - 42.034374)/2
mz_NapCH3OH = 55.015985 + (mz - 42.034374)/2
mz_KpCH3CN = 79.990257 + (mz - 42.034374)/2
mz_KpCH3OH = 70.989923 + (mz - 42.034374)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 33.034040)/2
mz_Na = 22.98977 + (mz - 33.034040)/2
mz_K = 38.963708 + (mz - 33.034040)/2
mz_HpCH3CN = 42.034374 + (mz - 33.034040)/2
mz_HpCH3OH = 33.034040 + (mz - 33.034040)/2
mz_NapCH3CN = 64.016319 + (mz - 33.034040)/2
mz_NapCH3OH = 55.015985 + (mz - 33.034040)/2
mz_KpCH3CN = 79.990257 + (mz - 33.034040)/2
mz_KpCH3OH = 70.989923 + (mz - 33.034040)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 47.013304)/2
mz_Na = 22.98977 + (mz - 47.013304)/2
mz_K = 38.963708 + (mz - 47.013304)/2
mz_HpCH3CN = 42.034374 + (mz - 47.0133042)/2
mz_HpCH3OH = 33.034040 + (mz - 47.013304)/2
mz_NapCH3CN = 64.016319 + (mz - 47.013304)/2
mz_NapCH3OH = 55.015985 + (mz - 47.013304)/2
mz_KpCH3CN = 79.990257 + (mz - 47.013304)/2
mz_KpCH3OH = 70.989923 + (mz - 47.013304)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pNH4(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 18.034374)/2
mz_NH4 = 18.034374 + (mz - 18.034374)/2
mz_Na = 22.98977 + (mz - 18.034374)/2
mz_K = 38.963708 + (mz - 18.034374)/2
mz_HpCH3CN = 42.034374 + (mz - 18.034374)/2
mz_HpCH3OH = 33.034040 + (mz - 18.034374)/2
mz_NapCH3CN = 64.016319 + (mz - 18.034374)/2
mz_NapCH3OH = 55.015985 + (mz - 18.034374)/2
mz_KpCH3CN = 79.990257 + (mz - 18.034374)/2
mz_KpCH3OH = 70.989923 + (mz - 18.034374)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_NH4 = peaks.between(mz_NH4 - prec_mass_error, mz_NH4 + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_NH4, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 22.98977)/2
mz_Na = 22.98977 + (mz - 22.98977)/2
mz_NapCH3CN = 64.016319 + (mz - 22.98977)/2
mz_NapCH3OH = 55.015985 + (mz - 22.98977)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pNapCH3OH(ion_idx, mgf_file) :
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 55.015985)/2
mz_Na = 22.98977 + (mz - 55.015985)/2
mz_NapCH3CN = 64.016319 + (mz - 55.015985)/2
mz_NapCH3OH = 55.015985 + (mz - 55.015985)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pNapCH3CN(ion_idx, mgf_file) :
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = | pd.Series(mgf_file[ion_idx].peaks.mz) | pandas.Series |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from datetime import datetime
import pytest
from pandas import read_sql_query, DataFrame
from edfi_google_classroom_extractor.api.teachers import _sync_without_cleanup
from edfi_lms_extractor_lib.api.resource_sync import (
SYNC_COLUMNS_SQL,
SYNC_COLUMNS,
add_hash_and_json_to,
add_sourceid_to,
)
from tests.api.api_helper import prep_expected_sync_df, prep_from_sync_db_df
IDENTITY_COLUMNS = ["courseId", "userId"]
COLUMNS = [
"courseId",
"userId",
"profile.id",
"profile.name.givenName",
"profile.name.familyName",
"profile.name.fullName",
"profile.emailAddress",
"profile.permissions",
"profile.photoUrl",
"profile.verifiedTeacher",
]
CHANGED_TEACHER_BEFORE = [
"1",
"11",
"111",
"givenName1",
"familyName1",
"fullName1",
"<EMAIL>",
"1111",
"http://111",
"False",
]
CHANGED_TEACHER_AFTER = [
"1",
"11",
"111",
"*CHANGED*",
"familyName1",
"fullName1",
"<EMAIL>",
"1111",
"http://111",
"False",
]
UNCHANGED_TEACHER = [
"2",
"22",
"222",
"givenName2",
"familyName2",
"fullName2",
"<EMAIL>",
"2222",
"http://222",
"False",
]
OMITTED_FROM_SYNC_TEACHER = [
"3",
"33",
"333",
"givenName3",
"familyName3",
"fullName3",
"<EMAIL>",
"3333",
"http://333",
"False",
]
NEW_TEACHER = [
"4",
"44",
"444",
"givenName4",
"familyName4",
"fullName4",
"<EMAIL>",
"4444",
"http://444",
"False",
]
SYNC_DATA = [CHANGED_TEACHER_AFTER, UNCHANGED_TEACHER, NEW_TEACHER]
def describe_when_testing_sync_with_new_and_missing_and_updated_rows():
@pytest.fixture
def test_db_after_sync(test_db_fixture):
# arrange
INITIAL_TEACHER_DATA = [
CHANGED_TEACHER_BEFORE,
UNCHANGED_TEACHER,
OMITTED_FROM_SYNC_TEACHER,
]
teachers_initial_df = | DataFrame(INITIAL_TEACHER_DATA, columns=COLUMNS) | pandas.DataFrame |
"""
Usage:
aggregate-makespan.py -i FOLDER [--output FOLDER] [--start-run INT] [--end-run INT]
Required Options:
-i FOLDER --input FOLDER where the experiments are
Options:
-o FOLDER --output FOLDER where the output should go
[default: input]
--start-run INT only include runs starting at start-run
--end-run INT only include runs ending at and including end-run
"""
from docopt import docopt
import pandas as pd
import os
import sys
import re
from datetime import datetime,timedelta
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
args=docopt(__doc__,help=True,options_first=False)
path = args["--input"].rstrip("/")
outPath = args["--input"] if args["--output"] == "input" else args["--output"]
basePath = outPath
rawOutPath = outPath.rstrip("/") + "/raw_total_makespan.csv"
outPath = outPath.rstrip("/") + "/total_makespan.csv"
startRun=args["--start-run"] if args["--start-run"] else 1
endRun=args["--end-run"] if args["--end-run"] else False
df = pd.DataFrame()
df4 = pd.DataFrame()
runs = 1
neCount=0
eCount=0
neCountJob=0
eCountJob=0
expNeCount={}
expECount={}
jobNeCount={}
jobECount={}
totNeCount=0
totECount=0
experiments=[i for i in os.listdir(path) if os.path.isdir(path+"/"+i)]
with open(basePath+"/errors_total_makespan.txt","w") as OutFile:
for exp in experiments:
jobs = [i for i in os.listdir(path+"/"+exp+"/")]
jobs.sort(key=natural_keys)
print(exp,flush=True)
OutFile.write(str(exp)+"\n")
for job in jobs:
print(job,flush=True)
OutFile.write(str(job)+"\n")
runs=len([i for i in os.listdir(path+"/"+exp+"/"+job) if os.path.isdir(path+"/"+exp+"/"+job + "/" + i)])
if endRun:
runs=endRun
if runs > 1:
df1 = pd.DataFrame()
df3 = pd.DataFrame()
for number in range(startRun,runs+1,1):
run = "Run_"+ str(number)
makespanPath = path+"/"+exp+"/"+job + "/" + run + "/output/expe-out/makespan.csv"
fileExists=os.path.exists(makespanPath)
if not fileExists:
print("Doesn't Exist: "+makespanPath,flush=True)
OutFile.write("Doesn't Exist: "+makespanPath+"\n")
neCount+=1
neCountJob+=1
if fileExists:
try:
dfTmp = pd.read_csv(makespanPath,sep=",",header=0)
except:
print("error with file: "+makespanPath,flush=True)
OutFile.write("error with file: "+makespanPath+"\n")
eCount+=1
eCountJob+=1
continue
dfTmp2 = dfTmp.copy()
dfTmp2["run"]=["Run_"+str(number)]
dfTmp2["job"]=job
dfTmp2["exp"]=exp
df1 = pd.concat([df1,dfTmp],axis=0)
df3 = | pd.concat([df3,dfTmp2],axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import sys
import traceback
from tqdm.auto import tqdm
import os
import csv
import git
import sys
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
def get_date(x):
return '-'.join(x.split('-')[:3])
def get_fips(x):
return x.split('-')[-1]
def pinball_loss(y_true, y_pred, quantile = 0.5):
delta = y_true - y_pred
loss_above = np.sum(delta[delta > 0])*(quantile)
loss_below = np.sum(-1*delta[delta < 0])*(1-quantile)
return (loss_above + loss_below) / len(y_true)
def pinball_loss2(y_true, y_pred, size, quantile = 0.5):
delta = y_true - y_pred
if delta > 0:
loss = delta*quantile
else:
loss = -1*delta*(1-quantile)
return loss / size
def evaluate(test_df, user_df):
join_df = test_df.join(user_df, how = 'inner')
if(len(join_df) != len(test_df)):
sys.stderr.write("Submission not right length. \n")
raise Exception("Submission not right length")
if(user_df.isna().sum().sum() > 0 ):
sys.stderr.write("Submission contains NaN. \n")
raise Exception("Submission Contains NaN.")
if(join_df.index.equals(test_df.index) == False):
sys.stderr.write("Incorrect ID format in Submission. \n")
raise Exception("Incorrect ID format.")
total_loss = 0
for column in ['10','20','30','40','50', '60', '70', '80', '90']:
quantile = int(column) / 100.0
loss = pinball_loss(join_df['deaths'].values, join_df[column].values, quantile) / 9.0
total_loss += loss
return total_loss
def evaluate2(test_df, user_df):
county_losses = {}
join_df = test_df.join(user_df, how = 'inner')
if(len(join_df) != len(test_df)):
sys.stderr.write("Submission not right length. \n")
raise Exception("Submission not right length")
if(user_df.isna().sum().sum() > 0 ):
sys.stderr.write("Submission contains NaN. \n")
raise Exception("Submission Contains NaN.")
if(join_df.index.equals(test_df.index) == False):
sys.stderr.write("Incorrect ID format in Submission. \n")
raise Exception("Incorrect ID format.")
total_loss = 0
size = len(join_df['deaths'].values)
for index, row in join_df.iterrows():
county = index.split('-')[-1]
county_loss = 0
for column in ['10','20','30','40','50', '60', '70', '80', '90']:
quantile = int(column) / 100.0
# if county == '36061':
# print(f"{row[column]} versus {row['deaths']}")
loss = pinball_loss2(row['deaths'], row[column], size, quantile) / 9.0
county_loss += loss
total_loss += loss
if county in county_losses.keys():
county_losses[county] += county_loss
else:
county_losses[county] = county_loss
return total_loss, county_losses
def evaluator(submission, start_date):
print(f"scoring {submission}")
daily_df = pd.read_csv(f"{homedir}" + '/data/us/covid/nyt_us_counties_daily.csv')
# daily_df = pd.read_csv(f"{homedir}" + '/data/us/covid/nyt_us_counties.csv')
daily_df.loc[daily_df["county"]=='New York City', "fips"]=36061
daily_df.dropna(subset=['fips'], inplace=True)
daily_df['fips'] = daily_df['fips'].astype(int)
end_date = daily_df['date'].max()
daily_df['id'] = daily_df['date'] +'-'+ daily_df['fips'].astype(str)
preperiod_df = daily_df[(daily_df['date'] < start_date)]
daily_df = daily_df[(daily_df['date'] <= end_date) & (daily_df['date'] >= start_date)]
sample_submission = | pd.read_csv(f"{homedir}"+ '/sample_submission.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import datetime
from seir.wrapper import MultiPopWrapper
from seir.utils import plot_solution
# read calibration data
actual_hospitalisations = pd.read_excel('data/calibration.xlsx', sheet_name='Hospitalisations')
actual_hospitalisations['Date'] = [pd.to_datetime(x, ).date() for x in actual_hospitalisations['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
actual_infections = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_confirmed.csv')
actual_infections.rename(columns={'date': 'Date', 'total': 'Cum. Confirmed'}, inplace=True)
actual_infections.index = pd.to_datetime(actual_infections['Date'], dayfirst=True)
actual_infections = actual_infections.resample('D').mean().ffill().reset_index()
actual_infections['Date'] = [ | pd.to_datetime(x, dayfirst=True) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 12:17:18 2019
@author: ortutay
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
drug_wd = '/home/ortutay/Dropbox/Misc/HiDucator/DataScienceAcademy/Courses/Advanced_Python_for_datascience/AdvancedPythonPractice/data'
os.chdir(drug_wd)
# Read in data from CSV file
drug_original = pd.read_csv("drug_consumption_original.csv",
index_col = 0)
# Convert category to numeric
# pd.Categorical() == as.factor() in R
a = pd.Categorical(drug_original['age']).codes
print(pd.Categorical(drug_original['age'])[0:10])
print(a[0:10])
drug_original['age'].value_counts().plot.bar()
plt.hist(a)
# center around 0 and spread to 1 standard deviation: technically this is standardization
b = (a - a.mean())/a.std()
plt.hist(b)
b.mean()
b.std()
pd.Series(b).describe()
# Lets convert all categorial data to numeric
categories = ['age','sex','edu','country','ethnicity']
drugs = ['Alcohol', 'Amphet', 'Amyl',
'Benzos', 'Caff', 'Cannabis', 'Choc', 'Coke', 'Crack', 'Ecstasy',
'Heroin', 'Ketamine', 'Legalh', 'LSD', 'Meth', 'Mushrooms',
'Nicotine','Semeron', 'VSA']
newdf = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict
import matplotlib
matplotlib.use("Agg", warn=False)
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from .diff_gaussian import laplace_of_gaussian
from .graph_cut import perform_binary_cut
from .helpers import collapse_labels
from .helpers import collapse_small_area
from .binary_segmentation import poisson_deconvolve
from .binary_segmentation import gmm_thresholding
import numpy as np
import scipy as sp
from skimage.color import label2rgb, rgb2gray
from skimage.measure import regionprops
from ..normalization import MacenkoNormalization
from ..normalization import ReinhardNormalization
from ..normalization import VahadaneNormalization
from ..normalization import XuNormalization
from .max_clustering import max_clustering
from .fractal_dimension import fractal_dimension
import pandas as pd
import warnings
__all__ = ("max_clustering",)
def label_nuclei(
nuclei_stain_rgb,
thresholding="gmm",
foreground_threshold=None,
min_radius=3,
max_radius=5,
local_max_search_radius=3,
min_nucleus_area=80,
normalization=None,
draw=True,
savetopng=None,
):
"""Perform segmentation labelling on nuclei.
Better defaults seem to be:
local_max_search_radius=3,
min_radius=3,
max_radius=5,
min_nucleus_area=80,
Parameters
----------
nuclei_stain: array_like
Gray image matrix
thresholding: string
Choice of binary/gmm/poisson
min_radius, max_radius: int
Minimum/maximum radius of detectable bob by difference of gaussian
local_max_search_radius: int
how many neraby regions should be considered for collapsing into one
min_nucleus_area: float
Anything below this is not a viable Nuclei
normalization: string
None or one of macenko/vahadane/xu
draw: bool
Should draw the segmentation?
savetopng: string
Path to png to store labelled image
"""
assert thresholding in [
"gmm",
"poisson-graph",
"poisson-hard",
"custom",
], "Unsupported thresholding method {}".format(thresholding)
assert normalization in [None, "macenko", "vahadane", "xu"]
if normalization == "macenko":
macenko_fit = MacenkoNormalization()
macenko_fit.fit(np.asarray(nuclei_stain_rgb).astype(np.uint8))
H_channel_v = macenko_fit.get_hematoxylin_channel(nuclei_stain_rgb)
nuclei_stain_rgb = H_channel_v / 255.0
elif normalization == "vahadane":
vahadane_fit = VahadaneNormalization()
vahadane_fit.fit(np.asarray(nuclei_stain_rgb).astype(np.uint8))
H_channel_v = vahadane_fit.get_hematoxylin_channel(nuclei_stain_rgb)
nuclei_stain_rgb = H_channel_v / 255.0
elif normalization == "xu":
xu_fit = XuNormalization()
xu_fit.fit(np.asarray(nuclei_stain_rgb).astype(np.uint8))
H_channel_v = xu_fit.get_hematoxylin_channel(nuclei_stain_rgb)
nuclei_stain_rgb = H_channel_v / 255.0
nuclei_stain_bw = rgb2gray(nuclei_stain_rgb)
if thresholding == "custom":
assert (
foreground_threshold > 0
), "foreground_threshold should be > 0 for custom thresholding"
foreground_mask = sp.ndimage.morphology.binary_fill_holes(
nuclei_stain_bw < foreground_threshold
)
elif thresholding == "gmm":
foreground_threshold, _ = gmm_thresholding(nuclei_stain_bw)
foreground_mask = sp.ndimage.morphology.binary_fill_holes(
nuclei_stain_bw < foreground_threshold
)
elif thresholding == "poisson-graph":
bg, fg, threshold = poisson_deconvolve(nuclei_stain_bw.astype(np.uint8))
foreground_mask = perform_binary_cut(background=bg, foreground=fg)
elif thresholding == "poisson-hard":
bg, fg, threshold = poisson_deconvolve(nuclei_stain_bw.astype(np.uint8))
foreground_mask = sp.ndimage.morphology.binary_fill_holes(
nuclei_stain_bw < threshold
)
log_max, sigma_max = laplace_of_gaussian(
nuclei_stain_bw,
foreground_mask,
sigma_min=min_radius * np.sqrt(2),
sigma_max=max_radius * np.sqrt(2),
)
nuclei_seg_mask, seeds, maxima = max_clustering(
log_max, foreground_mask, local_max_search_radius
)
# Remove small objects
nuclei_seg_mask = collapse_small_area(nuclei_seg_mask, min_nucleus_area).astype(
np.int
)
region_properties = regionprops(
nuclei_seg_mask, intensity_image=nuclei_stain_bw, coordinates="rc"
)
title = "Number of nuclei = {}".format(len(region_properties))
# Display results
if not draw:
return region_properties, foreground_mask
fig = plt.figure(figsize=(20, 10))
ax = plt.subplot(1, 2, 1)
ax.set_axis_off()
ax.imshow(label2rgb(nuclei_seg_mask, nuclei_stain_bw, bg_label=0), origin="lower")
ax.set_title("Nuclei segmentation mask overlay \n {}".format(title))
ax = plt.subplot(1, 2, 2)
ax.imshow(nuclei_stain_rgb)
ax.set_axis_off()
ax.set_xlim([0, nuclei_stain_rgb.shape[1]])
ax.set_ylim([0, nuclei_stain_rgb.shape[0]])
ax.set_title("Nuclei bounding boxes")
for region_property in region_properties:
c = [region_property.centroid[1], region_property.centroid[0], 0]
width = region_property.bbox[3] - region_property.bbox[1] + 1
height = region_property.bbox[2] - region_property.bbox[0] + 1
cur_bbox = {"type": "rectangle", "center": c, "width": width, "height": height}
ax.plot(c[0], c[1], "g+")
mrect = mpatches.Rectangle(
[c[0] - 0.5 * width, c[1] - 0.5 * height],
width,
height,
fill=False,
ec="g",
linewidth=2,
)
ax.add_patch(mrect)
fig.tight_layout()
plt.axis("off")
if savetopng:
fig.savefig(savetopng)
return region_properties, foreground_mask
def extract_features(region_property):
"""Given a region_property extract important features.
Parameters
----------
region_property: skimage.measre.RegionProperties
Returns
-------
features: dict
Dictionary with feature id as key
"""
features_to_keep = [
"area",
"bbox_area",
"convex_area",
"eccentricity",
"equivalent_diameter",
"extent",
"inertia_tensor_eigvals",
"major_axis_length",
"minor_axis_length",
"max_intensity",
"mean_intensity",
"moments_central",
"moments_hu",
"orientation",
"perimeter",
"solidity",
]
features = OrderedDict()
for feature in features_to_keep:
if feature in ["inertia_tensor_eigvals", "moments_hu", "moments_central"]:
moments = region_property[feature]
moments = np.asarray(moments)
for index, moment in enumerate(moments.ravel()):
features["{}_{}".format(feature, index + 1)] = moment
else:
try:
features[feature] = region_property[feature]
except KeyError:
features[feature] = np.nan
# Custom features
# compactness = perimeter^2/area
features["compactness"] = features["perimeter"] ** 2 / features["area"]
# Texture
intensity = region_property["intensity_image"]
features["texture"] = np.var(intensity)
features["fractal_dimension"] = fractal_dimension(intensity)
return features
def summarize_region_properties(region_properties, image):
"""Summarize RegionProperties over an entire image.
Parameters
----------
region_properties: list
List of region propeties
Returns
-------
summary_stats: dict
Summarized info
"""
feature_df = []
for region_property in region_properties:
feature_df.append(extract_features(region_property))
feature_df = | pd.DataFrame.from_dict(feature_df) | pandas.DataFrame.from_dict |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = | pd.Series([1, 0, 0, 0, 2], index=index) | pandas.Series |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_datetime_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
- High and low as datetimes
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_datetime_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as datetime and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-03'),
pd.to_datetime('2020-08-04'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_column_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_scalar_column(self):
"""Test the ``Between.is_valid`` method with ``low`` as scalar and ``high`` as a column.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_scalar(self):
"""Test the ``Between.is_valid`` method with ``low`` as a column and ``high`` as scalar.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the second value is smaller than ``low`` and
last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the first row, False
for the last two. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_column(self):
"""Test the ``Between.is_valid`` method with ``low`` and ``high`` as columns.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans(self):
"""Test the ``Between.is_valid`` method with nan values in low and high columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9, 1.0],
'b': [0, None, None, 0.4],
'c': [0.5, None, 0.6, None]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_nans(self):
"""Test the ``Between.is_valid`` method with nan values in constraint column.
If the constraint column is Nan, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, None],
'b': [0, 0.1, 0.5],
'c': [0.5, 1.5, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_scalar_low_nans(self):
"""Test the ``Between.is_valid`` method with ``high`` as scalar and ``low`` containing NaNs.
The NaNs in ``low`` should be ignored.
Input:
- Table with a NaN row
Output:
- The NaN values should be ignored when making comparisons.
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, None, None],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans_datetime(self):
"""Test the ``Between.is_valid`` method with nan values in low and high datetime columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with row NaN containing NaNs.
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-13'),
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-13'),
| pd.to_datetime('2020-08-14') | pandas.to_datetime |
import numpy as np
import pandas as pd
from scipy.stats import mode
from tqdm import tqdm
from geopy.geocoders import Nominatim
from datetime import datetime
def handle_bornIn(x):
skip_vals = ['16-Mar', '23-May', 'None']
if x not in skip_vals:
return datetime(2012, 1, 1).year - datetime(int(x), 1, 1).year
else:
return 23
def handle_gender(x):
if x == 'male':
return 1
else:
return 0
def handle_memberSince(x):
skip_vals = ['--None']
if pd.isna(x):
return datetime(2012, 1, 1)
elif x not in skip_vals:
return datetime.strptime(x, '%d-%m-%Y')
else:
return datetime(2012, 1, 1)
def process_tours_df(data_content):
dtype = {}
cols = data_content.tours_df.columns[9:]
for d in cols:
dtype[d] = np.int16
data_content.tours_df = data_content.tours_df.astype(dtype)
data_content.tours_df['area'] = data_content.tours_df['city'] + ' ' + data_content.tours_df['state'] + ' ' + \
data_content.tours_df['pincode'] + ' ' + data_content.tours_df['country']
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.lstrip() if type(x) == str else x)
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.rstrip() if type(x) == str else x)
data_content.tours_df.drop(['city', 'state', 'pincode', 'country'], axis=1, inplace=True)
data_content.tours_df['tour_date'] = data_content.tours_df['tour_date'].apply(
lambda x: datetime(int(x.split('-')[2]), int(x.split('-')[1]), int(x.split('-')[0]), 23, 59))
def process_tour_convoy_df(data_content):
print('Initializing tour_convoy_df...', flush=True)
data_content.tour_convoy_df['total_going'] = 0
data_content.tour_convoy_df['total_not_going'] = 0
data_content.tour_convoy_df['total_maybe'] = 0
data_content.tour_convoy_df['total_invited'] = 0
data_content.tour_convoy_df['fraction_going'] = 0
data_content.tour_convoy_df['fraction_not_going'] = 0
data_content.tour_convoy_df['fraction_maybe'] = 0
known_bikers = set()
lis = ['going', 'not_going', 'maybe', 'invited']
pbar = tqdm(total=data_content.tour_convoy_df.shape[0],
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 1 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
s = [0, 0, 0]
for j, l in enumerate(lis):
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
data_content.tour_convoy_df.loc[idx, 'total_' + l] = len(biker)
if j != 3:
s[j] = len(biker)
for bik in biker:
known_bikers.add(bik)
if sum(s) != 0:
for j in range(3):
data_content.tour_convoy_df.loc[idx, 'fraction_' + lis[j]] = s[j] / sum(s)
pbar.update(1)
pbar.close()
mean = data_content.tour_convoy_df['total_invited'].mean()
std = data_content.tour_convoy_df['total_invited'].std()
data_content.tour_convoy_df['fraction_invited'] = data_content.tour_convoy_df['total_invited'].apply(
lambda x: (x - mean) / std)
biker_tour_convoy_df = dict()
for biker in list(known_bikers):
biker_tour_convoy_df[biker] = [[], [], [], []]
pbar = tqdm(total=data_content.tour_convoy_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 2 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
for l in lis:
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
for bik in biker:
biker_tour_convoy_df[bik][lis.index(l)] += \
[data_content.tour_convoy_df.loc[idx, 'tour_id']]
pbar.update(1)
pbar.close()
for key, _ in biker_tour_convoy_df.items():
for i in range(4):
biker_tour_convoy_df[key][i] = ' '.join(list(set(biker_tour_convoy_df[key][i])))
biker_tour_convoy_df = pd.DataFrame.from_dict(biker_tour_convoy_df, orient='index')
biker_tour_convoy_df.reset_index(inplace=True)
biker_tour_convoy_df.columns = ['biker_id'] + lis
print('tour_convoy_df ready...', flush=True)
return biker_tour_convoy_df
def get_coordinates(locations, data_content):
geolocation_map = {}
locator = Nominatim(user_agent="Kolibri")
for i in tqdm(range(len(locations)),
disable=False,
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
# noinspection PyBroadException
try:
location = locator.geocode(locations[i])
geolocation_map[locations[i]] = [location.latitude, location.longitude]
except:
# Called when there is presumably some noise in the Address location
# noinspection PyBroadException
data_content.noise += [locations[i]]
geolocation_map[locations[i]] = [np.nan, np.nan]
location_df = pd.DataFrame({'location': list(locations),
'latitude': np.array(list(geolocation_map.values()))[:, 0],
'longitude': np.array(list(geolocation_map.values()))[:, 1]})
return geolocation_map, location_df
def initialize_locations(data_content):
# noinspection PyBroadException
try:
location_df = pd.read_csv(data_content.base_dir + 'temp/location.csv')
location_from_csv = True
except:
location_df = None
location_from_csv = False
if location_from_csv:
geolocation = {}
print('Initializing Locations from DataFrame...', flush=True)
for i, l in enumerate(location_df['location'].tolist()):
geolocation[l] = [location_df.loc[i, 'latitude'], location_df.loc[i, 'longitude']]
else:
print('Initializing Locations from Nominatim...', flush=True)
biker_location = data_content.bikers_df['area'].dropna().drop_duplicates().tolist()
geolocation, location_df = get_coordinates(biker_location, data_content)
return geolocation, location_df
def impute_location_from_tour_convoy(data_content):
# From tour_convoy
unk_loc = data_content.bikers_df[pd.isna(data_content.bikers_df['latitude'])]
org_bik = list(set(data_content.convoy_df['biker_id'].drop_duplicates().tolist()).intersection(
data_content.bikers_df['biker_id'].tolist()))
groups = ['going', 'not_going', 'maybe', 'invited']
rest_trs = data_content.tours_df[data_content.tours_df['tour_id'].isin(
data_content.tour_convoy_df['tour_id'])]
rest_con = data_content.convoy_df[data_content.convoy_df['biker_id'].isin(org_bik)]
pbar = tqdm(total=unk_loc.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, _ in unk_loc.iterrows():
if unk_loc.loc[idx, 'biker_id'] in org_bik:
cdf = rest_con[rest_con['biker_id'] == unk_loc.loc[idx, 'biker_id']]
if cdf.shape[0] > 0:
tours = []
for g in groups:
tours += cdf[g].tolist()[0].split()
tours = (' '.join(tours)).split()
trs = rest_trs[rest_trs['tour_id'].isin(tours)]
if trs.shape[0] > 0:
m, _ = mode(trs[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = trs[trs['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = trs.loc[index, 'latitude'], trs.loc[index, 'longitude']
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def impute_location_from_tours(data_content):
# From tours_df
unk_loc = data_content.bikers_df[pd.isna(data_content.bikers_df['latitude'])]
org_bik = list(set(data_content.tours_df['biker_id'].drop_duplicates().tolist()).intersection(
data_content.bikers_df['biker_id'].tolist()))
pbar = tqdm(total=unk_loc.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, _ in unk_loc.iterrows():
if unk_loc.loc[idx, 'biker_id'] in org_bik:
tours = data_content.tours_df[data_content.tours_df['biker_id'] == unk_loc.loc[idx, 'biker_id']]
if tours.shape[0] > 0:
m, _ = mode(tours[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = tours[tours['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = tours.loc[index, 'latitude'], tours.loc[index, 'longitude']
if not np.isnan(lat):
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def impute_lcoation_from_friends(data_content):
biker_df = pd.merge(data_content.bikers_df,
data_content.bikers_network_df, on='biker_id', how='left').copy()
bikers_df_ids = set(data_content.bikers_df['biker_id'].tolist())
# From friends
for i in range(data_content.location_recursion):
pbar = tqdm(total=biker_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, rows in biker_df.iterrows():
if not | pd.isna(biker_df.loc[idx, 'friends']) | pandas.isna |
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn import preprocessing
df = pd.read_csv('rating.csv', sep=',', names=['user', 'item', 'rating', 'timestamp'], header=None)
df = df.drop('timestamp', axis=1)
num_items = df.item.nunique()
num_users = df.user.nunique()
print("USERS: {} ITEMS: {}".format(num_users, num_items))
#Pandas will load training set into a DataFrame with three columns: user, item and ratings.
# Normalize in [0, 1]
r = df['rating'].values.astype(float)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(r.reshape(-1,1))
df_normalized = pd.DataFrame(x_scaled)
df['rating'] = df_normalized
#Pandas DataFrames cannot be directly used to feed a model, the best option is to convert a DataFrame into a matrix:
# Convert DataFrame in user-item matrix
matrix = df.pivot(index='user', columns='item', values='rating')
matrix.fillna(0, inplace=True)
#Rows in the matrix correspond to users and columns to items;
#therefore entries correspond to ratings given by users to items.
#Our matrix is still an object of DataFrame type, we need to convert it to a numpy matrix.
# Users and items ordered as they are in matrix
users = matrix.index.tolist()
items = matrix.columns.tolist()
matrix = matrix.as_matrix()
# Network Parameters
num_input = num_items
num_hidden_1 = 10
num_hidden_2 = 5
X = tf.placeholder(tf.float64, [None, num_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1], dtype=tf.float64)),
'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2], dtype=tf.float64)),
'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1], dtype=tf.float64)),
'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input], dtype=tf.float64)),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1], dtype=tf.float64)),
'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2], dtype=tf.float64)),
'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1], dtype=tf.float64)),
'decoder_b2': tf.Variable(tf.random_normal([num_input], dtype=tf.float64)),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
# Encoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Decoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
loss = tf.losses.mean_squared_error(y_true, y_pred)
optimizer = tf.train.RMSPropOptimizer(0.03).minimize(loss)
predictions = | pd.DataFrame() | pandas.DataFrame |
import math
import pandas as pd
import numpy as np
import pickle
from datetime import datetime
# Library for generating the random numbers
import random
from operator import itemgetter
import sys, os
sys.path.insert(0, os.path.abspath(".."))
from plugin.seg import remove_outliers, helper_object_points, max_min
from plugin.encode import input_nn
from plugin.load_model import object_names_func
from plugin.projections import prespective_project
# Fixed Outliers
def get_outliers(file_path):
# Take the outliers
outliers, others = helper_outliers("{}/Atm/in.csv".format(file_path))
# get the min and max values for each outlier range
for i in outliers:
i["radiusSquare"] = i["X"] ** 2 + i["Y"] ** 2 + i["Z"] ** 2
i["radius"] = np.sqrt(i["radiusSquare"]).round(1)
i = i[i["radius"] > 0]
i["max"] = i.groupby(["lz"])["radius"].transform("max")
i["min"] = i.groupby(["lz"])["radius"].transform("min")
i = i[["lz", "max", "min"]]
i.drop_duplicates(subset=["lz", "max", "min"], inplace=True)
# Save the values
max_rad = []
min_rad = []
for j in range(64):
max_rad.append(i[i["lz"] == j]["max"].tolist()[0])
min_rad.append(i[i["lz"] == j]["min"].tolist()[0])
total = (np.array(max_rad*1125),np.array(min_rad*1125))
out_file = open("../data/outliers.pkl", 'wb')
pickle.dump(total,out_file)
out_file.close()
# Remove the outliers
def helper_outliers(file_path):
# return the list of dataframes
dataframe_lists = []
outliers = []
# Creating the dataframe and selecting the required columns
for i in range(1):
temp_out = | pd.DataFrame() | pandas.DataFrame |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = | pd.concat([loading_outlier_scale_df, line_group_df], axis=1) | pandas.concat |
"""integration test for loanpy.sanity.py (2.0 BETA) for pytest 7.1.1"""
from ast import literal_eval
from datetime import datetime
from os import remove
from pathlib import Path
from pandas import read_csv, DataFrame, RangeIndex
from pandas.testing import assert_frame_equal, assert_series_equal
from pytest import raises
from unittest.mock import call, patch
from loanpy.adrc import Adrc
from loanpy.sanity import (
ArgumentsAlreadyTested,
cache,
check_cache,
eval_adapt,
eval_recon,
eval_all,
eval_one,
get_crossval_data,
get_dist,
get_nse4df,
get_noncrossval_sc,
get_tpr_fpr_opt,
loop_thru_data,
make_stat,
plot_roc,
postprocess,
postprocess2,
phonotactics_predicted,
write_to_cache)
PATH2FORMS = Path(__file__).parent / "input_files" / "forms_3cogs_wot.csv"
PATH2SC_AD = Path(__file__).parent / "input_files" / "sc_ad_3cogs.txt"
PATH2SC_RC = Path(__file__).parent / "input_files" / "sc_rc_3cogs.txt"
MOCK_CACHE_PATH = Path(__file__).parent / "mock_cache.csv"
def test_check_cache():
"""test if DIY cache is initiated correctly and args checked in it"""
# make sure this file does not exist (e.g. from previous tests)
try:
remove(MOCK_CACHE_PATH)
except FileNotFoundError:
pass
# set up first expected outcome, a pandas data frame
exp1 = DataFrame(columns=["arg1", "arg2", "arg3", "opt_tpr",
"optimal_howmany", "opt_tp", "timing", "date"])
# assert first break works: cache not found
check_cache(MOCK_CACHE_PATH, {"arg1": "x", "arg2": "y", "arg3": "z"})
assert_frame_equal(read_csv(MOCK_CACHE_PATH), exp1)
# check if nothing happens if arguments were NOT tested already
# assert that the function runs, does nothing, and returns None
assert check_cache(MOCK_CACHE_PATH,
{"arg1": "a", "arg2": "b", "arg3": "c"}) is None
# tear down
remove(MOCK_CACHE_PATH)
# check if exception is rased if these params were tested already
# set up mock cache with stored args
DataFrame({"arg1": ["x"], "arg2": ["y"], "arg3": ["z"]}).to_csv(
MOCK_CACHE_PATH, encoding="utf-8", index=False)
# assert exception is raised bc args exist in cache already
with raises(ArgumentsAlreadyTested) as aat_mock:
check_cache(MOCK_CACHE_PATH,
{"arg1": "x", "arg2": "y", "arg3": "z"})
assert str(aat_mock.value) == f"These arguments were tested \
already, see {MOCK_CACHE_PATH} line 1! (start counting at 1 in 1st row)"
# tear down
remove(MOCK_CACHE_PATH)
def test_write_to_cache():
"""Test if the writing-part of cache functions."""
init_args_mock = {"forms_csv": "forms.csv", "tgt_lg": "EAH",
"src_lg": "WOT", "crossval": True,
"path2cache": MOCK_CACHE_PATH,
"guesslist": [[2, 4, 6, 8]],
"max_phonotactics": 1, "max_paths": 1, "writesc": False,
"writesc_phonotactics": False, "vowelharmony": False,
"only_documented_clusters": False, "sort_by_nse": False,
"phonotactics_filter": False, "show_workflow": False,
"write": False,
"outname": "viz", "plot_to": None, "plotldnld": False}
DataFrame(
columns=list(init_args_mock) + [
"optimal_howmany",
"opt_tp",
"opt_tpr",
"timing",
"date"]).to_csv(
MOCK_CACHE_PATH,
index=False,
encoding="utf-8") # empty cache
df_exp = DataFrame(
{"forms_csv": "forms.csv", "tgt_lg": "EAH",
"src_lg": "WOT", "crossval": True,
"path2cache": str(MOCK_CACHE_PATH), "guesslist": str([[2, 4, 6, 8]]),
"max_phonotactics": 1, "max_paths": 1, "writesc": False,
"writesc_phonotactics": False, "vowelharmony": False,
"only_documented_clusters": False, "sort_by_nse": False,
"phonotactics_filter": False, "show_workflow": False, "write": False,
"outname": "viz", "plot_to": "None", "plotldnld": False,
"optimal_howmany": 0.501, "opt_tp": 0.6,
"opt_tpr": 0.099, "timing": "00:00:01",
"date": datetime.now().strftime("%x %X")},
index= | RangeIndex(start=0, stop=1, step=1) | pandas.RangeIndex |
import numpy as np
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Series
import pandas._testing as tm
class TestCategoricalConcat:
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories -> uses concat_compat, which casts to object
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
result = pd.concat([df2, df3])
expected = pd.concat(
[
df2.set_axis(df2.index.astype(object), 0),
df3.set_axis(df3.index.astype(object), 0),
]
)
tm.assert_frame_equal(result, expected)
def test_concat_categorical_tz(self):
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
"a",
"b",
]
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_unchanged(self):
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
result = pd.concat([df, ser], axis=1)
expected = DataFrame(
{
"A": Series(["a", "b", "c", np.nan], dtype="category"),
"B": Series([0, 1, np.nan, 2], dtype="float"),
}
)
tm.assert_equal(result, expected)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
| tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories) | pandas._testing.assert_index_equal |
import pandas as pd
import numpy as np
from sys import argv
import csv
# read merged cuffcompare reports of lncRNA transcripts
report = argv[1]
inputgtf = argv[2]
data = pd.read_table(report)
# annotate each transcript from lncRNA-expressing genes, mark overlapped genes
cs = [i for i in data.columns if i.find('ref_gene_id')!=-1]
lncgenes = sorted(list(set(data['cuff_gene_id'])))
anno = np.zeros((len(lncgenes), len(cs)))
agene = np.empty([len(lncgenes), len(cs)], dtype=object)
for i in range(len(lncgenes)):
select = data[data['cuff_gene_id']==lncgenes[i]]
for j in range(len(cs)):
if list(set(select.loc[:,cs[j]]))==["-"]:
agene[i,j]="NA"
else:
agene[i,j]=",".join([s for s in set(select.ref_gene_id) if s!="-"])
anno[i,j] = 1
df1 = pd.DataFrame(agene, columns=["GeneName_ref"+str(i+1) for i in range(len(cs))])
df2 = pd.DataFrame(anno, columns=["Overlap_ref"+str(i+1) for i in range(len(cs))])
df2['novelLoci'] = 0
for i in range(df2.shape[0]):
if sum(df2.iloc[i,:-1])==0:
df2.iloc[i,-1] = "Novel lncRNA"
else:
df2.iloc[i,-1] = "Overlap with annotated genes"
# generate a report of novel lncRNAs and lncRNAs overlapped with annotations
df = | pd.concat([df1, df2], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 13:54:55 2020
@author: akurnizk
"""
import csv
import math
import time
import sys,os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import pylab
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
map_dir = r'E:\Maps' # retrieved files from https://viewer.nationalmap.gov/basic/
data_dir = os.path.join('E:\Data')
#%% Interpolate nans in arrays
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
#%% Loading Information from HR Dike Sensors (Make sure times are in EDT)
with open(os.path.join(data_dir,"General Dike Data","USGS 011058798 Herring R at Chequessett Neck Rd.txt")) as f:
reader = csv.reader(f, delimiter="\t")
HR_dike_all_info = list(reader)
HR_dike_lev_disch_cond = HR_dike_all_info[32:]
HR_dike_all_df = pd.DataFrame(HR_dike_lev_disch_cond[2:], columns=HR_dike_lev_disch_cond[0])
HR_dike_all_df.drop(HR_dike_all_df.columns[[0,1,3,5,7,9,11,13]],axis=1,inplace=True)
HR_dike_all_df.columns = ["datetime","Gage height, ft, Ocean side","Discharge, cfs","Gage height, ft, HR side",
"Spec Con, microsiemens/cm, HR side","Spec Con, microsiemens/cm, Ocean side"]
# Make strings numeric
HR_dike_all_df = HR_dike_all_df.replace("Eqp", '', regex=True)
HR_dike_all_df["datetime"] = pd.to_datetime(HR_dike_all_df["datetime"])
HR_dike_all_df["Gage height, ft, Ocean side"] = pd.to_numeric(HR_dike_all_df["Gage height, ft, Ocean side"])
HR_dike_all_df["Discharge, cfs"] = pd.to_numeric(HR_dike_all_df["Discharge, cfs"])
HR_dike_all_df["Gage height, ft, HR side"] = pd.to_numeric(HR_dike_all_df["Gage height, ft, HR side"])
HR_dike_all_df["Spec Con, microsiemens/cm, HR side"] = pd.to_numeric(HR_dike_all_df["Spec Con, microsiemens/cm, HR side"])
HR_dike_all_df["Spec Con, microsiemens/cm, Ocean side"] = pd.to_numeric(HR_dike_all_df["Spec Con, microsiemens/cm, Ocean side"])
# Merging Duplicate Entries
HR_dike_all_df.set_index('datetime',inplace=True)
HR_dike_all_df = HR_dike_all_df.mean(level=0)
HR_dike_all_df.reset_index(inplace=True)
# Remove conductivity columns, convert to metric system
HR_dike_lev_disch_ft = HR_dike_all_df[["datetime","Gage height, ft, Ocean side","Gage height, ft, HR side","Discharge, cfs"]]
HR_dike_lev_disch_m = HR_dike_lev_disch_ft.copy()
HR_dike_lev_disch_m.columns = ["datetime","Gage height, m, Ocean side","Gage height, m, HR side","Discharge, cms"]
HR_dike_lev_disch_m["Gage height, m, Ocean side"] = HR_dike_lev_disch_ft["Gage height, ft, Ocean side"]*0.3048
HR_dike_lev_disch_m["Gage height, m, HR side"] = HR_dike_lev_disch_ft["Gage height, ft, HR side"]*0.3048
HR_dike_lev_disch_m["Discharge, cms"] = HR_dike_lev_disch_ft["Discharge, cfs"]*0.02832
#%% Load HR Geometry and CTD data
out_x_stacked = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_xcoords.csv'), delimiter=',')
out_y_stacked = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_ycoords.csv'), delimiter=',')
elevs_interp = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_elevs.csv'), delimiter=',')
intersect_newxy = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_inscts.csv'), delimiter=',')
min_dist_dx = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_dx.csv'), delimiter=',')
# make top of array the upstream-most section?
out_x_stacked = np.flip(out_x_stacked,axis=0)
out_y_stacked = np.flip(out_y_stacked,axis=0)
elevs_interp = np.flip(elevs_interp,axis=0)
intersect_newxy = np.flip(intersect_newxy,axis=0)
min_dist_dx = np.flip(min_dist_dx,axis=0)
"""
Loading Information from HR CTD Sensors (Update to USGS filtered CNR U/S data)
"""
with open(os.path.join(data_dir,"General Dike Data","Water_Elevation,_NAVD88-File_Import-01-22-2020_15-04.txt")) as f:
reader = csv.reader(f, delimiter="\t")
HR_CTD_all_info = list(reader)
HR_CTD_lev = HR_CTD_all_info[1:]
HR_CTD_all_df = pd.DataFrame(HR_CTD_lev[2:], columns=HR_CTD_lev[0])
# If time needs adjustment
HR_CTD_all_df.drop(HR_CTD_all_df.columns[[0,2,4]],axis=1,inplace=True)
HR_CTD_all_df = HR_CTD_all_df.rename(columns={"Time (MDT to EDT)":"datetime"})
# If time is just mislabled
# HR_CTD_all_df.drop(HR_CTD_all_df.columns[[1,2,4]],axis=1,inplace=True)
# HR_CTD_all_df = HR_CTD_all_df.rename(columns={"Time (America/Denver)":"datetime"})
HR_CTD_all_df["datetime"] = pd.to_datetime(HR_CTD_all_df["datetime"])
HR_CTD_all_df["High Toss Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["High Toss Water Level, NAVD88"])
HR_CTD_all_df["CNR U/S Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["CNR U/S Water Level, NAVD88"])
HR_CTD_all_df["Dog Leg Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["Dog Leg Water Level, NAVD88"])
HR_CTD_all_df["Old Saw Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["Old Saw Water Level, NAVD88"])
# Merging Duplicate Entries
HR_CTD_all_df.set_index('datetime',inplace=True)
HR_CTD_all_df = HR_CTD_all_df.mean(level=0)
HR_CTD_all_df.reset_index(inplace=True)
# Filtering
HR_CTD_all_df["High Toss Water Level, NAVD88"].loc[HR_CTD_all_df["High Toss Water Level, NAVD88"] > 1.00] = np.nan
HR_CTD_all_df["High Toss Water Level, NAVD88"].loc[HR_CTD_all_df["High Toss Water Level, NAVD88"] < -0.67] = np.nan
HR_CTD_all_df["CNR U/S Water Level, NAVD88"].loc[HR_CTD_all_df["CNR U/S Water Level, NAVD88"] < -0.90] = np.nan
HR_CTD_all_df["CNR U/S Water Level, NAVD88"].loc[HR_CTD_all_df["CNR U/S Water Level, NAVD88"] > 0.55] = np.nan
HR_CTD_all_df["Old Saw Water Level, NAVD88"].loc[HR_CTD_all_df["Old Saw Water Level, NAVD88"] < -2.14] = np.nan
#%% CNR U/S Updates from USGS
with open(os.path.join(data_dir,"General Dike Data","CNR_WL_USGS_Fixed.txt")) as f:
reader = csv.reader(f, delimiter="\t")
CNRUS_CTD_elevs = list(reader)
CNRUS_CTD_df = pd.DataFrame(CNRUS_CTD_elevs[3:], columns=["datetime", "CNR U/S Water Level, NAVD88"])
CNRUS_CTD_df["datetime"] = pd.to_datetime(CNRUS_CTD_df["datetime"])
CNRUS_CTD_df["CNR U/S Water Level, NAVD88"] = pd.to_numeric(CNRUS_CTD_df["CNR U/S Water Level, NAVD88"])
# Plot comparison of old and updates
ax = HR_CTD_all_df.plot.scatter(x="datetime", y="CNR U/S Water Level, NAVD88", color = 'Red', label = 'CNR U/S Levels, Original')
CNRUS_CTD_df.plot.scatter(x="datetime", y="CNR U/S Water Level, NAVD88", color = 'Blue', label = 'CNR U/S Levels, USGS Fix', ax=ax)
# Replace old with new in dataframe
HR_CTD_all_df = pd.merge(HR_CTD_all_df, CNRUS_CTD_df, how="left", left_on="datetime", right_on="datetime")
df_cols = list(HR_CTD_all_df)
HR_CTD_all_df[[df_cols[2], df_cols[5]]] = HR_CTD_all_df[[df_cols[5], df_cols[2]]]
HR_CTD_all_df = HR_CTD_all_df.drop(columns=HR_CTD_all_df[[df_cols[-1]]])
HR_CTD_all_df = HR_CTD_all_df.rename(columns={"CNR U/S Water Level, NAVD88_x":"CNR U/S Water Level, NAVD88"})
#%% Load Calculated Dike Discharge Data
with open(os.path.join(data_dir,"Discharge Data","Q_total_dikecalc.csv")) as f:
reader = csv.reader(f, delimiter=",")
Q_total_dikecalc = list(reader)
Q_total_dikecalc_df = | pd.DataFrame(Q_total_dikecalc, columns=["datetime", "Discharge, Dike Calc, cms"]) | pandas.DataFrame |
"""Plot diagrams for a single hole."""
import gc
import io
import logging
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
__all__ = ["plot_hole"]
BBOX = dict(facecolor="white", alpha=0.75, edgecolor="none", boxstyle="round,pad=0.1") # text boxes
logger = logging.getLogger("pyinfraformat")
def strip_date(x):
"""Strip str date to datetime."""
x = str(x)
try:
if len(x) == 6:
date = datetime.strptime(x, "%d%m%y")
elif len(x) == 8:
date = datetime.strptime(x, "%d%m%Y")
else:
date = pd.to_datetime(x)
except ValueError:
date = pd.NaT
return date
def fig_to_hmtl(fig, clear_memory=True):
"""Transform matplotlib figure to html with mpld3.
Parameters
----------
fig: matplotlib figure
clear_memory: bool
Returns
-------
html
"""
str_io = io.StringIO()
fig.savefig(str_io, format="svg")
str_io.seek(0)
if clear_memory:
fig.clear()
plt.close()
gc.collect()
return str_io.read()
def plot_po(one_survey):
"""Plot a diagram of PO (Porakonekairaus) with matplotlib.
Parameters
----------
one_survey : hole object
Returns
-------
figure : matplotlib figure
"""
df = pd.DataFrame(one_survey.survey.data)
if "Soil type" in df.columns: # pylint: disable=unsupported-membership-test
soils = df.dropna(subset=["Soil type"])
else:
soils = None
fig, (ax_left, ax_right) = plt.subplots(
1, 2, sharey=True, figsize=(4, 4), gridspec_kw={"wspace": 0, "width_ratios": [2, 2]}
)
fig.set_figwidth(4)
ax_left.step(df["Time (s)"], df["Depth (m)"], where="post", c="k")
ax_left.invert_yaxis()
ax_left.spines["top"].set_visible(False)
ax_left.spines["left"].set_visible(False)
ax_left.get_yaxis().set_visible(False)
ax_left.set_xlim([110, 0])
plt.setp(ax_left.get_yticklabels(), visible=False)
ax_right.yaxis.set_tick_params(which="both", labelbottom=True)
ax_right.spines["top"].set_visible(False)
ax_right.spines["right"].set_visible(False)
ax_right.spines["bottom"].set_visible(False)
ax_right.set_xticks([])
ax_right.set_title(one_survey.header.date.isoformat().split("T")[0])
ax_left.set_title("{:+.2f}".format(float(one_survey.header["XY"]["Z-start"])))
ymax_atleast = 5 # hard limit minimum for aestics
ymax = max(ymax_atleast, ax_right.get_ylim()[0])
ax_right.set_ylim(ymax, 0)
if soils is not None:
for _, row in soils.iterrows():
ax_right.text(0.03, row["Depth (m)"], s=row["Soil type"], bbox=BBOX)
last = df["Depth (m)"].iloc[-1]
# ax_right.plot(0, last, marker="_", zorder=10, clip_on=False, ms=20, c="k")
if hasattr(one_survey.header, "-1") and "Ending" in one_survey.header["-1"]:
ax_right.text(0.03, last, s=one_survey.header["-1"]["Ending"], va="top", bbox=BBOX)
return fig
def plot_pa(one_survey):
"""Plot a diagram of PA (Painokairaus) with matplotlib.
Parameters
----------
one_survey : hole object
Returns
-------
figure : matplotlib figure
"""
df = pd.DataFrame(one_survey.survey.data)
df.loc[df["Load (kN)"] >= 100, "Load (kN)"] = 0
fig, (ax_left, ax_right) = plt.subplots(
1, 2, sharey=True, figsize=(4, 4), gridspec_kw={"wspace": 0, "width_ratios": [1, 3]}
)
fig.set_figwidth(4)
ax_left.step(df["Load (kN)"], df["Depth (m)"], where="post", c="k")
ax_left.invert_yaxis()
ax_left.spines["top"].set_visible(False)
ax_left.spines["left"].set_visible(False)
ax_left.get_yaxis().set_visible(False)
plt.setp(ax_left.get_yticklabels(), visible=False)
ax_left.set_xlim([100, 0])
ax_right.step(df["Rotation of half turns (-)"], df["Depth (m)"], where="post", c="k")
ax_right.yaxis.set_tick_params(which="both", labelbottom=True)
ax_right.spines["top"].set_visible(False)
ax_right.spines["right"].set_visible(False)
ax_right.set_xlim([0, 110])
ax_right.set_title(one_survey.header.date.isoformat().split("T")[0])
ax_left.set_title("{:+.2f}".format(float(one_survey.header["XY"]["Z-start"])))
ymax_atleast = 5 # hard limit minimum for aestics
ymax = max(ymax_atleast, ax_right.get_ylim()[0])
ax_right.set_ylim(ymax, 0)
last = df["Depth (m)"].iloc[-1]
ax_right.plot(0, last, marker="_", zorder=10, clip_on=False, ms=20, c="k")
if hasattr(one_survey.header, "-1") and "Ending" in one_survey.header["-1"]:
ax_right.text(8, last, s=one_survey.header["-1"]["Ending"], va="top", bbox=BBOX)
return fig
def plot_hp(one_survey):
"""Plot a diagram of HP (Puristinheijarikairaus) with matplotlib.
Parameters
----------
one_survey : hole object
Returns
-------
figure : matplotlib figure
"""
df = pd.DataFrame(one_survey.survey.data)
fig, (ax_left, ax_right) = plt.subplots(
1, 2, sharey=True, figsize=(4, 4), gridspec_kw={"wspace": 0, "width_ratios": [1, 3]}
)
fig.set_figwidth(4)
ax_left.plot(df["Torque (Nm)"], df["Depth (m)"], c="k")
ax_left.invert_yaxis()
ax_left.spines["top"].set_visible(False)
ax_left.spines["left"].set_visible(False)
ax_left.get_yaxis().set_visible(False)
plt.setp(ax_left.get_yticklabels(), visible=False)
ax_left.set_xlim([200, 0])
ax_left.set_xticks([200, 100, 0])
ax_right.barh(
[0] + list(df["Depth (m)"])[:-1],
df["Blows"],
align="edge",
fill=False,
height=df["Depth (m)"].diff(periods=1),
linewidth=1.5,
)
if "Pressure (MN/m^2)" in df.columns:
ax_right.plot(df["Pressure (MN/m^2)"] * 5, df["Depth (m)"], c="k")
ax_right.yaxis.set_tick_params(which="both", labelbottom=True)
ax_right.set_xlim([0, 110])
ax_right.set_xticks(list(range(0, 120, 20)))
ymax_atleast = 5 # hard limit minimum for aestics
ymax = max(ymax_atleast, ax_right.get_ylim()[0])
ax_right.set_ylim(ymax, 0)
locs = ax_right.get_xticks()
y_min, y_max = ax_right.get_ylim()
y = y_min + (y_max - y_min) * 0.005
for x in locs[1:]:
ax_right.text(x, y, s="{:.0f}".format(x / 5), ha="center", va="bottom")
ax_right.spines["top"].set_visible(False)
ax_right.spines["right"].set_visible(False)
ax_right.set_title(one_survey.header.date.isoformat().split("T")[0])
ax_left.set_title("{:+.2f}".format(float(one_survey.header["XY"]["Z-start"])))
last = df["Depth (m)"].iloc[-1]
ax_right.plot(0, last, marker="_", zorder=10, clip_on=False, ms=20, c="k")
if hasattr(one_survey.header, "-1") and "Ending" in one_survey.header["-1"]:
ax_right.text(3, last, s=one_survey.header["-1"]["Ending"], va="top", bbox=BBOX)
return fig
def plot_si(one_survey):
"""Plot a diagram of SI (Siipikairaus) with matplotlib.
Parameters
----------
one_survey : hole object
Returns
-------
figure : matplotlib figure
"""
df = | pd.DataFrame(one_survey.survey.data) | pandas.DataFrame |
import imageio
import cv2 as cv
import pandas as pd
from musket_core import datasources as datasources, dsconfig as dsconfig
from musket_core.datasets import PredictionItem, ImageKFoldedDataSet, DataSetLoader, NullTerminatable,DataSet
from musket_core import context
import os
import numpy as np
import random
import scipy
import tqdm
import imgaug
import math
from musket_core.coders import classes_from_vals,rle2mask_relative,mask2rle_relative,rle_decode,rle_encode,\
classes_from_vals_with_sep
class NegativeDataSet:
def __init__(self, path):
self.path = path
ldir = os.listdir(path)
ldir.remove(".DS_Store")
self.ids = [x[0:x.index('.')] for x in ldir]
self.exts = [x[x.index('.') + 1:] for x in ldir]
def __getitem__(self, item):
in_ext = self.exts[item]
image = imageio.imread(os.path.join(self.path, self.ids[item] + "." + in_ext))
out = np.zeros(image.shape)
if len(out.shape) < 3:
out = np.expand_dims(out, axis=2)
out = out.astype(np.float32)
out = np.sum(out, axis=2)
out = np.expand_dims(out, axis=2)
#out = out / np.max(out)
return PredictionItem(self.ids[item] + str(), image, out)
class BlendedDataSet:
def __init__(self, child, blendwith, size=(320, 320)):
self.child = child
self.blend = blendwith
self.bids = list(range(len(blendwith)))
self.size = size
self.rnd = random.Random(23232)
def item(self,item,isTrain):
if not isTrain:
return self.child[item]
return self[item]
def __getitem__(self, item):
child_item = self.child[item]
return PredictionItem(child_item.id, self.get_new_image(child_item.x), child_item.y)
def __len__(self):
return len(self.child)
def get_new_image(self, image):
new_image = cv.resize(image, self.size)
if self.rnd.choice([True, False]):
return new_image
bid = self.rnd.choice(self.bids)
bland_image = cv.resize(self.blend[bid].x, self.size)
return cv.addWeighted(new_image, 0.6, bland_image, 0.4, 0)
class TextMaskGenerator:
def __init__(self, textures, band = False):
self.fonts = [x for x in dir(cv) if x.startswith('FONT_')]
self.letters = list(" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
weights = np.ones(len(self.letters))
weights[0] = 15
weights = weights / np.sum(weights)
self.weights = weights
self.textures = textures
self.band = band
def getFont(self):
return getattr(cv, random.choice(self.fonts))
def generateText(self, lines, lineLength):
text = ""
for lineNum in range(lines):
line = np.random.choice(self.letters, size=lineLength, p=self.weights)
text += "".join(line)
if lineNum == lines - 1:
continue
text += "\n"
return text
def getLineSize(self, text, font, scale, thickness):
lines = text.split("\n")
width = -1
heights = []
baselines = []
for line in lines:
size = cv.getTextSize(text=line, fontFace=font, fontScale=scale, thickness=thickness)
if width < size[0][0]:
width = size[0][0]
heights.append(size[0][1])
baselines.append(size[1])
return width, heights, baselines, lines
def getInitialMask(self):
lines = random.randint(1, 2)
length = random.randint(5, 10)
thickness = 5
scale = 3
text = self.generateText(lines, length)
font = self.getFont()
lineWidth, lineHeights, baselines, lines = self.getLineSize(text, font, scale, thickness)
image = np.zeros((sum(lineHeights) + sum(baselines), lineWidth, 3), np.uint8)
count = 0
linePos = 0
for line in lines:
lineHeight = lineHeights[count]
baseLine = baselines[count]
linePos += lineHeight
cv.putText(image, line, org=(0, linePos), fontFace=font, fontScale=scale, color=(255,255,255), lineType=cv.LINE_8, thickness=thickness)
linePos += baseLine
count += 1
return image
def getImageAndMask(self):
initialMask = []
if self.band:
initialMask = np.ones(( random.randint(100, 200),random.randint(500, 1000), 3), np.uint8)
else:
initialMask = self.getInitialMask()
texture = random.choice(self.textures).x.astype(np.uint8)
maskTexture = initialMask * 0
baseWidth, baseHeight = self.getTextureBaseSize(texture, initialMask)
texture = cv.resize(texture, (baseWidth, baseHeight))
ids = np.indices((initialMask.shape[0], initialMask.shape[1]))
maskTexture[ids[0], ids[1]] = texture[np.mod(ids[0], baseHeight), np.mod(ids[1], baseWidth)]
angle = random.randint(-30, 30)
mask = scipy.ndimage.rotate(initialMask, angle)
maskTexture = scipy.ndimage.rotate(maskTexture, angle)
return maskTexture, mask[:, :, 0]
def getTextureBaseSize(self, texture, mask):
width = mask.shape[1]
height = mask.shape[0]
textureWidth = texture.shape[1]
textureHeight = texture.shape[0]
textureAspectRatio = textureWidth / textureHeight
maskAspectRatio = width / height
multiplier = 0
if textureAspectRatio > maskAspectRatio:
height = width * textureHeight / textureWidth
else:
width = height * textureWidth / textureHeight
return int(width), int(height)
def __len__(self):
return 10
def __getitem__(self, item):
image, mask = self.getImageAndMask()
return PredictionItem(str(item), image, mask)
class DropItemsDataset:
def __init__(self, child, drop_items,times=5):
self.child = child
self.drop_items = drop_items
self.rnd = random.Random(23232)
self.drop_size = 1
self.times = times
def __len__(self):
return len(self.child)
def item(self,item,isTrain):
if not isTrain:
return self.child[item]
return self[item]
def __getitem__(self, item_):
original_item = self.child[item_]
input = original_item.x
mask = self.rescale_mask_to_input(input, original_item.y)
for time in range(self.times):
drop_item, drop_mask = self.get_drop_item()
rescaled_drop_item, rescaled_drop_mask = self.rescale_drop_item(input, drop_item, drop_mask, self.drop_size)
self.apply_drop_item(input, mask, rescaled_drop_item, rescaled_drop_mask, original_item.id + "_" + str(time))
return PredictionItem(original_item.id, input, mask.astype(np.bool))
def apply_drop_item(self, item, mask, drop_item, drop_mask, id=""):
x = self.rnd.randrange(0, item.shape[1])
y = self.rnd.randrange(0, item.shape[0])
self.draw_drop(item, mask, drop_item, drop_mask, x, y, self.rnd.choice(["behind", "above"]), id)
def draw_drop(self, item, mask, drop_item, drop_mask, x, y, mode="above", id=""):
half_width = drop_item.shape[1] // 2
half_height = drop_item.shape[0] // 2
left = x - half_width
right = x + half_width
down = y - half_height
up = y + half_height
if left < 0: left = 0
if down < 0: down = 0
if up > item.shape[0]: up = item.shape[0]
if right > item.shape[1]: right = item.shape[1]
drop_left = left - x + half_width
drop_right = right - x + half_width
drop_down = down - y + half_height
drop_up = up - y + half_height
temp_mask = mask * 0
temp_item = item * 0
temp_mask[down:up, left:right] = drop_mask[drop_down:drop_up,drop_left:drop_right]
temp_item[down:up, left:right]= drop_item[drop_down:drop_up,drop_left:drop_right]
temp_mask = np.where(np.sum(temp_mask, 2))
if mode == "above":
item[temp_mask] = temp_item[temp_mask]
mask[temp_mask] = 0
else:
old_mask = np.where(np.sum(mask, 2))
old_item = item * 0
old_item[old_mask] = item[old_mask] + 0
item[temp_mask] = temp_item[temp_mask]
item[old_mask] = old_item[old_mask]
def rescale_drop_item(self, item, drop_item, drop_mask, scale):
input_area = item.shape[0] * item.shape[1]
target_area = scale * input_area
drop_area = drop_item.shape[0] * drop_item.shape[1]
sqrt = np.sqrt([target_area / drop_area])[0]
new_size = (int(sqrt * drop_item.shape[1]), int(sqrt * drop_item.shape[0]))
new_drop_item = (cv.resize(drop_item / 255, new_size) * 255).astype(np.int32)
return new_drop_item, self.rescale_mask_to_input(new_drop_item, drop_mask)
def mask_box_size(self, mask_):
mask = np.sum(mask_, 2)
hp = np.sum(mask, 0) > 0
vp = np.sum(mask, 1) > 0
return (np.sum(hp), np.sum(vp))
def rescale_mask_to_input(self, input, mask):
rescaled_mask = (cv.resize(mask.astype(np.float32), (input.shape[1], input.shape[0])) > 0.5).astype(np.int32)
rescaled_mask = np.expand_dims(rescaled_mask, 2)
return rescaled_mask
def get_drop_item(self):
drop_item = self.rnd.choice(self.drop_items)
drop_item_id = drop_item.id
drop_mask = (cv.resize(drop_item.y, (drop_item.x.shape[1], drop_item.x.shape[0])) > 0.5).astype(np.int32)
hp = np.sum(drop_mask, 0) > 0
vp = np.sum(drop_mask, 1) > 0
hp = np.where(hp)[0]
vp = np.where(vp)[0]
drop_mask = np.expand_dims(drop_mask, 2)
drop_item = drop_item.x * drop_mask
drop_item = drop_item[vp[0] : vp[-1] + 1, hp[0] : hp[-1] + 1]
drop_mask = drop_mask[vp[0] : vp[-1] + 1, hp[0] : hp[-1] + 1]
return drop_item, drop_mask
class Backgrounds:
def __init__(self,path,erosion=0,augmenters:imgaug.augmenters.Augmenter=None):
self.path=path;
self.rate=0.5
self.augs=augmenters
self.erosion=erosion
self.options=[os.path.join(path,x) for x in os.listdir(self.path)]
def next(self,i,i2):
fl=random.choice(self.options)
im=imageio.imread(fl)
r=cv.resize(im,(i.shape[1],i.shape[0]))
if isinstance(self.erosion,list):
er=random.randint(self.erosion[0],self.erosion[1])
kernel = np.ones((er, er), np.uint8)
i2 = cv.erode(i2, kernel)
elif self.erosion>0:
kernel = np.ones((self.erosion, self.erosion), np.uint8)
i2=cv.erode(i2,kernel)
i2=i2!=0
i2=np.squeeze(i2)
if i.shape[2]!=3:
zr=np.copy(i)
zr[:,:,0:3]=r
zr[i2] = i[i2]
return zr
else:
r[i2] = i[i2]
return r;
def augment_item(self,i):
if self.augs!=None:
b=imgaug.Batch(images=[i.x],
segmentation_maps=[imgaug.SegmentationMapOnImage(i.y, shape=i.y.shape)])
for v in self.augs.augment_batches([b]):
bsa:imgaug.Batch=v
break
xa=bsa.images_aug[0]
xa=cv.resize(xa,(i.x.shape[1],i.x.shape[0]))
ya=bsa.segmentation_maps_aug[0].arr
ya = cv.resize(ya, (i.x.shape[1], i.x.shape[0]))
r = self.next(xa, ya)
return PredictionItem(i.id, r, ya>0.5)
else:
r=self.next(i.x,i.y)
return PredictionItem(i.id,r,i.y)
class WithBackgrounds:
def __init__(self, ds,bg):
self.ds=ds
self.bg=bg
self.rate=bg.rate
def __len__(self):
return len(self.ds)
def item(self,item,isTrain):
if not isTrain:
return self.ds[item]
return self[item]
def __getitem__(self, item):
i=self.ds[item]
if random.random()>self.rate:
return self.bg.augment_item(i)
return i
class CropAndSplit:
def __init__(self,orig,n):
self.ds=orig
self.parts=n
self.lastPos=None
def isPositive(self, item):
pos = item // (self.parts * self.parts);
return self.ds.isPositive(pos)
def __getitem__(self, item):
pos=item//(self.parts*self.parts);
off=item%(self.parts*self.parts)
if pos==self.lastPos:
dm=self.lastImage
else:
dm=self.ds[pos]
self.lastPos=pos
self.lastImage=dm
row=off//self.parts
col=off%self.parts
x,y=dm.x,dm.y
x1,y1= self.crop(row,col,x),self.crop(row,col,y)
vs=PredictionItem(dm.id,x1,y1)
if hasattr(dm, "prediction" ) and dm.prediction is not None:
pred=self.crop(row,col,dm.prediction)
vs.prediction=pred
vs.imageId=dm.id
vs.row=row
vs.col=col
return vs
def crop(self,y,x,image):
h=image.shape[0]//self.parts
w = image.shape[1] // self.parts
return image[h*y:h*(y+1),w*x:w*(x+1), :]
def __len__(self):
return len(self.ds)*self.parts*self.parts
def get_train_item(self,item):
return self[item]
class AspectRatioDataSet:
def __init__(self, child, target_ratio=(1, 1), strategy="center"):
self.child = child
self.target_size = target_ratio
self.strategy = strategy
def __getitem__(self, item):
child_item = self.child[item]
new_size_in = self.get_new_size((child_item.x.shape[0], child_item.x.shape[1]))
new_size_out = self.get_new_size((child_item.y.shape[0], child_item.y.shape[1]))
rnd = 0.5;
if self.strategy == "random":
rnd = random.random();
return PredictionItem(child_item.id, self.get_new_image(new_size_in, child_item.x, rnd), self.get_new_image(new_size_out, child_item.y, rnd))
def __len__(self):
return len(self.child)
def get_new_size(self, input_size):
input_x = input_size[0]
input_y = input_size[1]
target_x = self.target_size[1]
target_y = self.target_size[0]
input_ratio = input_x / input_y
output_ratio = target_x / target_y
if input_ratio > output_ratio:
input_x = round(input_y * output_ratio)
elif input_ratio < output_ratio:
input_y = round(input_x / output_ratio)
return (input_x, input_y)
def get_new_image(self, new_size, image, rnd):
shift_x = 0
shift_y = 0
shift = 0
if new_size[0] != image.shape[0]:
shift = image.shape[0] - new_size[0]
elif new_size[1] != image.shape[1]:
shift = image.shape[1] - new_size[1]
shift = round(rnd * shift)
if new_size[0] != image.shape[0]:
shift_x = shift
elif new_size[1] != image.shape[1]:
shift_y = shift
return image[shift_x:new_size[0] + shift_x, shift_y:new_size[1] + shift_y, :]
class DS_Wrapper:
def __init__(self, name, datasource_cfg, from_directory):
abs_path = os.path.abspath(from_directory)
dirname = os.path.dirname(abs_path)
self.datasource = datasources.GenericDataSource(dsconfig.unpack_config(name, datasource_cfg, dirname))
def __len__(self):
return len(self.datasource)
def __getitem__(self, item):
ds_item = self.datasource[item]
return PredictionItem(ds_item.id, ds_item.inputs[0], ds_item.outputs[0])
def item_by_id(self, id):
item = self.datasource.ids.index(id)
return self[item]
def isPositive(self, item):
return True
class NoChangeDataSetImageClassificationImage(ImageKFoldedDataSet):
def generator_from_indexes(self, indexes,isTrain=True,returnBatch=False):
m = DataSetLoader(self.ds, indexes, self.batchSize,isTrain=isTrain).generator
#aug = self.augmentor(isTrain)
def r():
num = 0;
while True:
for v in m():
r = v;
x,y= np.array([x for x in r.images]), np.array([x for x in r.data[1]])
num=num+1
if returnBatch:
yield x,y,r
else: yield x,y
return NullTerminatable(),NullTerminatable(),r
class AbstractImagePathDataSet(DataSet):
def __init__(self,imagePath):
self.images={}
if imagePath is None:
return;
if isinstance(imagePath, list):
for v in imagePath:
self.addPath(v)
else:
self.addPath(imagePath)
self.dim=3
def addPath(self, imagePath):
current_project_data_path = context.get_current_project_data_path()
print("addPath context path: " + current_project_data_path)
print("addPath image_path: " + imagePath)
p0 = os.path.join(current_project_data_path, imagePath)
print("p0: " + p0)
if not os.path.exists(p0):
p0 = imagePath
ld0 = os.listdir(p0)
for x in ld0:
fp = os.path.join(p0, x)
self.images[x] = fp
self.images[x[:-4]] = fp
def get_value(self,im_id):
im=imageio.imread(self.images[im_id])
if len(im.shape)!=3:
im=np.expand_dims(im, -1)
if im.shape[2]!=self.dim:
if self.dim==3:
im=np.concatenate([im,im,im],axis=2)
elif self.dim==1:
im=np.mean(im,axis=2)
else:
raise ValueError("Unsupported conversion")
return im
def __getitem__(self, item)->PredictionItem:
raise ValueError()
class CSVReferencedDataSet(AbstractImagePathDataSet):
def readCSV(self,csvPath):
try:
self.data=pd.read_csv(os.path.join(context.get_current_project_data_path(), csvPath))
except:
try:
self.data=pd.read_csv(os.path.join(context.get_current_project_data_path(), csvPath),encoding="cp1251")
except:
self.data=pd.read_csv(csvPath)
def ordered_vals(self, imColumn):
return sorted(list(set(self.get_values(imColumn))))
def __init__(self,imagePath,csvPath,imColumn):
super().__init__(imagePath)
self.imColumn=imColumn
if isinstance(csvPath, str):
self.readCSV(csvPath)
else:
self.data=csvPath
self.splitColumns={}
for m in self.data.columns:
parts=m.split("_")
ind=0
for col in parts:
if not col in self.data.columns:
try:
vl=[x[ind] for x in self.data[m].str.split("_")]
self.data.insert(0,col,value=vl)
self.splitColumns[col]=m
except:
pass
ind=ind+1
self.imageIds=self.ordered_vals(imColumn)
def _id(self,item):
imageId=self.imageIds[item]
return imageId
def get_values(self,col):
return self.data[col]
def __len__(self):
return len(self.imageIds)
def get_all_about(self,item):
return self.data[self.data[self.imColumn]==item]
def __getitem__(self, item)->PredictionItem:
raise ValueError()
def _encode_template(self,template_id,template,val):
rs=[]
for q in template:
v=val[q]
rs.append(v)
del val[q]
val[template_id]="_".join(rs)
return val
def _recode(self,seq):
templates={}
for q in self.splitColumns:
r=self.splitColumns[q]
templates[r]=r.split("_")
for item in seq:
for t in templates:
self._encode_template(t,templates[t],item)
return seq
class BinarySegmentationDataSet(CSVReferencedDataSet):
def __init__(self,imagePath,csvPath,imColumn,rleColumn=None,maskShape=None,rMask=True,isRel=False):
super().__init__(imagePath,csvPath,imColumn)
self.rleColumn=rleColumn
self.maskShape=maskShape
self.rMask=rMask
self.rle_decode=rle_decode
self.rle_encode=rle_encode
if isRel:
self.rle_decode=rle2mask_relative
self.rle_encode=mask2rle_relative
def get_target(self,item):
imageId=self.imageIds[item]
vl = self.get_all_about(imageId)
rleString = vl[self.rleColumn].values[0]
if isinstance(rleString, str):
if rleString.strip() != "-1" and len(rleString.strip())>0:
return 1
return 0
def isPositive(self,item):
return self.get_target(item)==True
def get_rleString(self, item):
imageId=item.id
vl = self.get_all_about(imageId)
rleString = vl[self.rleColumn].values[0]
if isinstance(rleString,float):
if math.isnan(rleString):
return ""
return rleString
def get_mask(self, image,imShape):
prediction = None
vl = self.get_all_about(image)
rleString = vl[self.rleColumn].values[0]
if isinstance(rleString, str):
if rleString.strip() != "-1":
shape = (imShape[0], imShape[1])
if self.maskShape is not None:
shape = self.maskShape
if self.rMask:
prediction = self.rle_decode(rleString, (shape[1],shape[0]))
else:
prediction = self.rle_decode(rleString, shape)
prediction=np.rot90(prediction)
prediction=np.flipud(prediction)
prediction = np.expand_dims(prediction,2).astype(np.bool)
if prediction is None:
prediction = np.zeros((imShape[0], imShape[1], 1), dtype=np.bool)
return prediction
def __getitem__(self, item)->PredictionItem:
imageId=self.imageIds[item]
image=self.get_value(imageId)
prediction = self.get_mask(imageId,image.shape)
return PredictionItem(self._id(item),image,prediction)
def _to_rle(self, o):
o = np.flipud(o)
o = np.rot90(o, -1)
rle = self.rle_encode(o)
return rle
def encode(self,item:PredictionItem,encode_y=False,treshold=0.5):
if isinstance(item, PredictionItem):
imageId=item.id
if encode_y:
o=item.y
else:
o=item.prediction
if (o.dtype!=np.bool):
o=o>treshold
rle = self._to_rle(o)
return { self.imColumn:imageId,self.rleColumn:rle}
if isinstance(item, DataSet):
res=[]
for i in tqdm.tqdm(range(len(item)),"Encoding dataset"):
q=item[i]
res.append(self.encode(q,encode_y,treshold))
return pd.DataFrame(res,columns=[self.imColumn,self.rleColumn])
class MultiClassSegmentationDataSet(BinarySegmentationDataSet):
def __init__(self,imagePath,csvPath,imColumn,rleColumn,clazzColumn,maskShape=None,rMask=True,isRel=False):
super().__init__(imagePath,csvPath,imColumn,rleColumn,maskShape,rMask,isRel)
self.clazzColumn=clazzColumn
self.classes=sorted(list(set(self.data[clazzColumn].values)))
self.class2Num={}
self.num2class={}
num=0
for c in self.classes:
self.class2Num[c]=num
self.num2class[num]=c
num=num+1
def encode(self,item:PredictionItem,encode_y=False,treshold=0.5):
if isinstance(item, PredictionItem):
raise NotImplementedError("Multiclass segmentation is only capable to encode datasets")
if isinstance(item, DataSet):
res=[]
for i in tqdm.tqdm(range(len(item)),"Encoding dataset"):
q=item[i]
imageId=q.id
for j in range(len(self.classes)):
if encode_y:
vl=q.y[:,:,j:j+1]>treshold
else:
vl=q.prediction[:,:,j:j+1]>treshold
rle=self._to_rle(vl)
res.append({ self.imColumn:imageId,self.rleColumn:rle,self.clazzColumn:self.num2class[j]})
res=self._recode(res)
clns=[]
for c in self.splitColumns:
if not self.splitColumns[c] in clns:
clns.append(self.splitColumns[c])
r=[self.imColumn,self.clazzColumn,self.rleColumn]
for c in r:
if not c in self.splitColumns:
clns.append(c)
return pd.DataFrame(res,columns=clns)
def get_target(self,item):
imageId=self.imageIds[item]
vl = self.get_all_about(imageId)
for i in range(len(vl)):
rleString = vl[self.rleColumn].values[i]
if isinstance(rleString, str):
if rleString.strip() != "-1":
return 1
return 0
def get_mask(self, image,imShape):
prediction = np.zeros((imShape[0], imShape[1], len(self.classes)), dtype=np.bool)
vl = self.get_all_about(image)
rle=vl[self.rleColumn].values
classes=vl[self.clazzColumn].values
for i in range(len(vl)):
rleString = rle[i]
clazz=classes[i]
if isinstance(rleString, str):
if rleString.strip() != "-1":
shape = (imShape[0], imShape[1])
if self.maskShape is not None:
shape = self.maskShape
if self.rMask:
lp = self.rle_decode(rleString, (shape[1],shape[0]))
else:
lp = self.rle_decode(rleString, shape)
lp=np.rot90(lp)
lp=np.flipud(lp)
prediction[:,:,self.class2Num[clazz]]=lp
return prediction
def __getitem__(self, item)->PredictionItem:
imageId=self.imageIds[item]
image=self.get_value(imageId)
prediction = self.get_mask(imageId,image.shape)
return PredictionItem(imageId,image,prediction)
class InstanceSegmentationDataSet(MultiClassSegmentationDataSet):
def __init__(self, imagePath, csvPath, imColumn, rleColumn, clazzColumn, maskShape=None, rMask=True, isRel=False):
super().__init__(imagePath,csvPath,imColumn,rleColumn,clazzColumn,maskShape,rMask,isRel)
rawClasses = self.data[clazzColumn].values
def refineClass(x):
if "_" in str(x):
return x[:x.index("_")]
else:
return x
self.classes = sorted(list(set([ refineClass(x) for x in rawClasses ])))
self.class2Num = {}
self.num2class = {}
num = 0
for c in self.classes:
self.class2Num[c] = num
self.num2class[num] = c
num = num + 1
def meta(self):
return { 'CLASSES': self.classes }
def get_mask(self, image, imShape):
prediction = []
vl = self.get_all_about(image)
rle = vl[self.rleColumn].values
classes = vl[self.clazzColumn].values
for i in range(len(vl)):
rleString = rle[i]
clazz = classes[i]
if "_" in str(clazz):
clazz = clazz[:clazz.index("_")]
if isinstance(rleString, str):
if rleString.strip() != "-1":
shape = (imShape[0], imShape[1])
if self.maskShape is not None:
shape = self.maskShape
if self.rMask:
lp = self.rle_decode(rleString, (shape[1], shape[0]))
else:
lp = self.rle_decode(rleString, shape)
lp = np.rot90(lp)
lp = np.flipud(lp)
prediction.append((lp, int(clazz)))
return prediction
def encode(self, item, encode_y=False, treshold=0.5):
if isinstance(item, PredictionItem):
raise NotImplementedError("Instance segmentation is only capable to encode datasets")
if isinstance(item, DataSet):
res = []
for i in tqdm.tqdm(range(len(item)), "Encoding dataset"):
q = item[i]
imageId = q.id
for j in range(len(self.classes)):
if encode_y:
vl = q.y[:, :, j:j + 1] > treshold
else:
vl = q.prediction[:, :, j:j + 1] > treshold
labels = vl[0]
masks = vl[2]
if len(labels) != len(masks):
raise Exception(f"{imageId} does not have same ammount of masks and labels")
for i in range(len(masks)):
mask = masks[i]
label = labels[i]
rle = self._to_rle(mask)
res.append({self.imColumn: imageId, self.rleColumn: rle, self.clazzColumn: label})
res = self._recode(res)
clns = []
for c in self.splitColumns:
if not self.splitColumns[c] in clns:
clns.append(self.splitColumns[c])
r = [self.imColumn, self.clazzColumn, self.rleColumn]
for c in r:
if not c in self.splitColumns:
clns.append(c)
return | pd.DataFrame(res, columns=clns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
def read_csv_chunks_simple(file, passed_df=None, join_how='inner', sep=',', chunksize=10000, dtype=None, index_col=None):
temp_df = pd.DataFrame()
if passed_df is not None and join_how == 'inner':
if join_how == 'inner':
for c in pd.read_csv(file, sep=sep, chunksize=chunksize, index_col=index_col, dtype=dtype):
temp_df = temp_df.append(c[c.index.isin(passed_df.index)])
return passed_df.join(temp_df, how=join_how)
else:
for c in pd.read_csv(file, sep=sep, chunksize=chunksize, index_col=index_col, dtype=dtype):
temp_df = temp_df.append(c)
return passed_df.join(temp_df, how=join_how)
else:
if join_how == 'inner':
for c in pd.read_csv(file, sep=sep, chunksize=chunksize, index_col=index_col, dtype=dtype):
temp_df = temp_df.append(c[c.index.isin(passed_df.index)])
return temp_df
else:
for c in pd.read_csv(file, sep=sep, chunksize=chunksize, index_col=index_col, dtype=dtype):
temp_df = temp_df.append(c)
return temp_df
# Title Ratings
def clean_ratings(thresh=1000, passed_df=None, join_how='inner'):
file = '../input/title_ratings.tsv'
logging.debug('Cleaning {}'.format(file))
ratings = pd.read_csv(file, delimiter='\t', index_col='tconst')
ratings = ratings[ratings['numVotes'] >= thresh]
if passed_df is not None:
return passed_df.join(ratings, how=join_how)
else:
return ratings
# Title Basics
def clean_basics(passed_df=None, join_how='inner'):
file = '../input/title_basics.tsv'
logging.debug('Cleaning {}'.format(file))
data_types = {'titleType': str, 'primaryTitle': str, 'originalTitle': str, 'isAdult': str, 'startYear': str,
'endYear': str, 'runtimeMinutes': str, 'genres': str}
title_basics = pd.DataFrame(columns=data_types.keys())
if passed_df is not None:
for c in pd.read_csv(file, delimiter='\t', dtype=data_types, chunksize=10000,
index_col='tconst'):
title_basics = title_basics.append(c[(c['titleType'] == 'movie') & c.index.isin(passed_df.index)])
return passed_df.join(title_basics, how=join_how)
else:
for c in pd.read_csv(file, delimiter='\t', dtype=data_types, chunksize=10000,
index_col='tconst'):
title_basics = title_basics.append(c[(c['titleType'] == 'movie')])
return title_basics
# Title AKAS
def clean_akas(passed_df=None, join_how='inner'):
file = '../input/title_akas.tsv'
logging.debug('Cleaning {}'.format(file))
data_types = {'ordering': str, 'title': str, 'region': str, 'language': str,
'types': str, 'attributes': str, 'isOriginalTitle': str}
title_akas = pd.DataFrame()
if passed_df is not None:
for c in | pd.read_csv(file, sep='\t', chunksize=10000, index_col='titleId', dtype=data_types) | pandas.read_csv |
import pandas as pd
import json
import time
import numpy as np
from datetime import datetime
import re
import string
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
#nltk.download()
stop_words= set(stopwords.words('english'))
## Cleaning the dataframe
def clean_df(df):
df['created_at'] = pd.to_datetime(df['created_at'])
pub_metz = pd.Series(df["public_metrics"])
pm_df = pd.DataFrame(columns=['retweet_count', 'reply_count', 'like_count', 'quote_count'])
k = []
for d in pub_metz:
k.append(d)
pub_metz_clean = | pd.DataFrame(k) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # <<<<<<<<<<<<<<<<<<<< Tarea Número 4>>>>>>>>>>>>>>>>>>>>>>>>
# ## Estudiante: <NAME>
# ## Ejercicio 1
# In[1]:
import os
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
import numpy as np
from math import pi
# #### a) Cargue la tabla de datos SpotifyTop2018 40 V2
# In[2]:
# Cargando datos
data = pd.read_csv("SpotifyTop2018_40_V2.csv", delimiter = ',', decimal = '.', index_col=0)
print(data)
print(data.head())
# In[3]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(data)
data.loc[:,:] = scaled_values
print(data)
datos = data
# #### b) Ejecute el metodo k−medias para k = 3. Modificaremos los atributos de la clase KMeans(...) como sigue: max iter : int, default: 300: Numero maximo de iteraciones del algoritmo kmedias para una sola ejecucion. Para este ejercicio utilice max iter = 1000. n init : int, default: 10 (Formas Fuertes): Numero de veces que el algoritmo kmedias se ejecutara con diferentes semillas de centroides. Los resultados finales seran la mejor salida de n init ejecuciones consecutivas en terminos de inercia intra-clases. Para este ejercicio utilice n init = 100.
# In[4]:
# Función para graficar los gráficos de Barras para la interpretación de clústeres
def bar_plot(centros, labels, cluster = None, var = None):
from math import ceil, floor
from seaborn import color_palette
colores = color_palette()
minimo = floor(centros.min()) if floor(centros.min()) < 0 else 0
def inside_plot(valores, labels, titulo):
plt.barh(range(len(valores)), valores, 1/1.5, color = colores)
plt.xlim(minimo, ceil(centros.max()))
plt.title(titulo)
if var is not None:
centros = np.array([n[[x in var for x in labels]] for n in centros])
colores = [colores[x % len(colores)] for x, i in enumerate(labels) if i in var]
labels = labels[[x in var for x in labels]]
if cluster is None:
for i in range(centros.shape[0]):
plt.subplot(1, centros.shape[0], i + 1)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if i == 0 else plt.yticks([])
else:
pos = 1
for i in cluster:
plt.subplot(1, len(cluster), pos)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if pos == 1 else plt.yticks([])
pos += 1
# In[5]:
# Función para graficar los gráficos tipo Radar para la interpretación de clústeres
def radar_plot(centros, labels):
from math import pi
centros = np.array([((n - min(n)) / (max(n) - min(n)) * 100) if
max(n) != min(n) else (n/n * 50) for n in centros.T])
angulos = [n / float(len(labels)) * 2 * pi for n in range(len(labels))]
angulos += angulos[:1]
ax = plt.subplot(111, polar = True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
plt.xticks(angulos[:-1], labels)
ax.set_rlabel_position(0)
plt.yticks([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
["10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"],
color = "grey", size = 8)
plt.ylim(-10, 100)
for i in range(centros.shape[1]):
valores = centros[:, i].tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth = 1, linestyle = 'solid',
label = 'Cluster ' + str(i))
ax.fill(angulos, valores, alpha = 0.3)
plt.legend(loc='upper right', bbox_to_anchor = (0.1, 0.1))
# #### Formas fuertes (n_init) y Número de Iteraciones (max_iter) [Default]
# In[11]:
# Solo 3 iteraciones y una forma fuerte.
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=10) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# #### Formas fuertes (n_init) y Número de Iteraciones (max_iter) [Modificado]
# In[12]:
# Volviendo a recargar datos para ver la asignacion final de los clusters
kmedias = KMeans(n_clusters=3, max_iter=1000, n_init=100)
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# #### c) Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[14]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(datos)
print(kmedias.predict(datos))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[15]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos.columns)
# ### Interpretacion
# In[15]:
# En cuanto a la interpretacion se puede ver lo siguiente:
# Despues de haber corrido el K-medias con el primer set de parametros, donde init = 10 y max_iter = 300:
# se obtiene:
# primer cluster de color azul, en el que spechiness, loudness, tempo y acoustiness son las variables mas altas, es decir
# las canciones clasificadas en un primer momento tienen registros de palabras en sus canciones, son acosticas y tienen
# los mayores tiempos por minutos expresados en beats, ademas de time_signature y danceability, es decir, las canciones
# son bailables y hay altos volumenes de beats en cada barra, las demas variables son bajas.
# Un segundo cluster naranja, tiene registros altos en cuanto a danceability, time signature, energy, loudness, valence
# e instrumentalness, es decir, estas canciones sosn buenas para bailar, hay altos beats por barra por minuto, tienen
# intensidades buenas, tienen alta sonoridad por pista, ademas de que son canciones bastante positivas asi como instrumen-
# tales, presentan cierto speechiness pero no mas de un 50% lo que quiere decir, es que hay moderada cantidad de palabras
# y la duracion en milisegundos de las canciones es muy baja, es decir, son canciones energeticas y buenas para bailar
# pero duran poco.
# En el cluster 3 (verde): se observa que son las canciones que tienen mayor duracion en milisegundos de todas, y
# presentan cierta acustica, asi como sonoridad y cierta intensidad pero es baja, en las demas variables son bajas.
# Segunda interpretacion con init = 100 y max_iter = 1000
# En este punto, se ve como las iteraciones estabilizan los clusters y estos cambian en algunas representaciones de
# variables ya que se tiene:
# Cluster 1 (azul): se mantienen spechiness, time_signature, danceability, acoustiness, y se agregan liveness y valance
# lo que quiere decir que las canciones en este cluster se caracterizan por tener niveles altos de beats por cada barra
# o medida, son canciones que registran altos registros de letras, son canciones bailables, son acusticas, y se detecta
# presencia de publica en ellas asi como alta positividad musical, es decir son canciones alegres y en la que la gente
# al escucharlas puede bailar y cantar, aunque por otro lado, son canciones cortas ya que presentan bajos registros de
# duration_ms, es decir su duracion en milisegundo es poca, al igual que su intensidad y su deteccion de instrumentalidad.
# Cluster 2 (naranja): se caracteriza por tener las variables mas altas en time_signature, danceability, energy, loudness,
# valence y liveness con respecto a los valores por default, no fue mucho el cambio que hubo y solo instrumentals fue el
# que se cambio, este cluster se caracteriza por tener canciones beats por barra de volumen muy altos, ser canciones
# aptas para bailar, poseen alta sonoridad en la pista medida en decibeles, son canciones que tienen alta presencia de
# positivismo en las letras y que presentan alta presencia de publico. En realidad este cluster es muy parecido al numero 1
# solo que presenta variables como energy y loudness que el cluster 1 no presenta, por otro lado en este cluster estan
# las canciones que registran baja presencia de palabras, acustica e instrumentalidad, y son canciones que tienen duraciones
# mayores en milisegundos que las del cluster 1, es decir, son aptas para bailar, son positivas pero quiza no son canciones
# aptas para cantar, porque registran indices bajos de esta variable.
# Cluster 3 (verde): con respecto al primer cluster por default, en este nuevo cluster ahora se presenta la variable
# de instrumentalidad, y otras como tempo y duration_ms siguen manteniendose, asi como ahora hay presencia moderada de
# energy y loudness. En este cluster va a estar representado por todas aquellas canciones que tienen lo registros mas
# altos de duracion por milisegundos, asi como las que poseen mayor instrumentalidad y tiempo aproximado por beats, asi
# como las que transmiten un relativo alto grado de positividad y presencia de publico pero bajos registros de intensidad
# y de sonoridad. Presenta bajos niveles de palabras en canciones y no son para nada bailables.
# Comparacion con Clustering Jerarquico:
# Se puede ver como el cluster 1 (azul) es bastante similar, habiendo solo uno ligero cambio a nivel de duration_ms ya que
# en Clustering Jerarquico se ve como mas de un 25% de los datos presentaban algo de duration_ms (duracion en milisegundos)
# sin embargo es apenas notorio.
# Con respecto al cluster 2 (naranja) hay muchis cambios, ya que en Jerarquico solo se tenian altas las variables de
# duration_ms, tempo y un poco acoustiness, mientras que en k-medias estas mismas variables no se encuentra altas
# y mas bien en k-medias estas estan bastante bajas y las que estaban bajas en Jerarquico aqui estan altas como es el caso
# de danceability, energy, etc.
# Con el cluster 3 (verde): las variables que se siguen manteniendo son intrsumentalness, tempo y un poco de loudness, aunque
# en Jerarquico instrumentalness estaba alta y en K-Medias esta en menos del 50% sin embargo este cluster sigue siendo
# caracterizado por canciones bastante instumentales y con beats por minuto bastante altos.
# #### d) Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[22]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(datos)
componentes
print(datos.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(datos))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### e) Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?
# In[7]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[10]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(datos).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# #### Interpretacion
# In[11]:
# En este caso no hay mucha claridad, ya que en realidad en ningun punto se ve que se estabilice y se forme la linea
# recta, aunque tal vez se podria decir que en K = 5, K = 7 o K = 13 podrian ser opciones viables.
# ## Ejercicio #2
# #### a) Repita el ejercicio 1 usando k = 3 usando esta tabla de datos, usando solo las variables numericas. Modificaremos los atributos de la clase KMeans (...) como sigue: max iter : int, default: 300: Numero maximo de iteraciones del algoritmo kmedias para una sola ejecucion. Para este ejercicio utilice max iter = 2000, n init : int, default: 10 (Formas Fuertes): Numero de veces que el algoritmo kmedias se ejecutara con diferentes semillas de centroides. Los resultados finales sera la mejor salida de n init ejecuciones consecutivas en terminos de inercia intra-clases. Para este ejercicio utilice n init = 150.
# #### Carga de la Tabla de Datos SAHeart
# In[43]:
corazon = pd.read_csv('SAheart.csv', delimiter = ';', decimal = '.')
print(corazon)
# In[44]:
# Seleccionando solo variables numericas
corazon2 = pd.DataFrame(data = corazon2, columns = (['sbp', 'tobacco', 'ldl', 'adiposity', 'typea', 'obesity',
'alcohol', 'age']))
print(corazon2)
corazon2.head()
# In[45]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(corazon2)
corazon2.loc[:,:] = scaled_values
print(corazon2)
# In[25]:
# Solo 3 iteraciones y valores modificables en max_iter = 2000 e init = 150
kmedias = KMeans(n_clusters=3, max_iter=2000, n_init=150) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# In[46]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(corazon2)
print(kmedias.predict(corazon2))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[27]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos.columns)
# #### Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[41]:
# Comparando con el ejercicio pasado con Clustering Jerarquico se puede apreciar que en realidad el plot de radar con
# K - Means es practicamente identico al plot de radar pasado, se puede observar como los clusters mantienen igual
# casi todas sus variables, sin embargo el cambio mas grande que se tiene es en el numero de Cluster, ya que para el Jerarquico
# el Cluster 1, eran los individuos que tenian un alto typea A y las demas variables eran bastante bajas, en este caso
# con el k-means este paso a ser el cluster 2.
# El cluster 2 en el Jerarquico, representado por los indidvuos con un sbp alto, las edades mas altas, asi como presencia
# de alto colesterol, adiposidad y alto sobrepeso en el K - Means paso a ser el cluster 3 y ahora los individuos presentan
# mediciones mas altas de SBP y de adiposidad (llegando a lo mas alto) comparadi con el pasado.
# Finalmente el cluster 3 en el Jerarquico, ahora pasa a ser el cluster 1 en el K - medias y sigue teniendo las mismas variables
# originales, como alto colesterol, adiposidad, obesidad, relativamente alta presencia de mediciones alta de SBP y edad,
# pero ahora el K - medias incluyo a la variable typea A alta, y no en un estado medio como el clustering Jerarquico, haciendo
# que los individuos de este cluster sean los que presentan altas edades y enfermedades como obesidad, alto colesterol y
# adiposidad, pero ahora sumado con mayor medida un factor de tipo A asociado a personas mas competitivas y orientada a
# resultados pero que pasan mas estresadas y ansiosas.
# #### Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[47]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(corazon2)
componentes
print(corazon2.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(corazon2))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?
# In[48]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(corazon2)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[49]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(corazon2).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# ### Interpretacion
# In[ ]:
# En este caso no hay mucha claridad, pero se podria decir que en K = 2 o K = 6 podrian ser opciones viables.
# #### b)Repita los ejercicios anteriores pero esta vez incluya las variables categoricas usando codigos disyuntivos completos. ¿Son mejores los resultados?
# In[28]:
# Recodificacion
def recodificar(col, nuevo_codigo):
col_cod = pd.Series(col, copy=True)
for llave, valor in nuevo_codigo.items():
col_cod.replace(llave, valor, inplace=True)
return col_cod
# #### Cargando las variables numericas asi como categoricas y convirtiendolas a codigo disyuntivo completo
# In[54]:
# Conviertiendo la variables en Dummy
datos_dummies = | pd.get_dummies(corazon) | pandas.get_dummies |
__all__ = [
"make_hull",
"hull_data",
"hull_rotate",
"get_avs",
"get_mass_properties",
"get_moment_curve",
"get_buoyant_properties",
"get_equ_waterline",
"RHO_WATER",
"fun_avs",
"fun_moment",
"fun_performance",
"var_correct",
]
from numpy import array, average, argmin, concatenate, linspace, meshgrid, sum, max, min, isfinite, trapz
from numpy import abs, sin, cos, pi, NaN, nanmax, nanmin
from pandas import concat, DataFrame
from scipy.optimize import bisect
from warnings import catch_warnings, simplefilter
# Global constants
RHO_WATER = 0.03613 # Density of water (lb / in^3)
RHO_0 = 0.04516 # Filament density (lb / in^3)
G = 386 # Gravitational acceleration (in / s^2)
## Reference stuff
# --------------------------------------------------
var_correct = ["H", "W", "n", "d", "f_com"]
## Boat generator
# --------------------------------------------------
def make_hull(X):
r"""
Args:
X (iterable): [H, W, n, d, f_com] = X
H = height of boat [in]
W = width of boat [in]
n = shape parameter [-]
d = displacement ratio [-]
= weight of boat / weight of max displaced water
f_com = height of COM from bottom, as frac of H [-]
Returns:
DataFrame: Hull points
DataFrame: Mass properties
"""
H, W, n, d, f_com = X
y_com = H * f_com
f_hull = lambda x: H * abs(2 * x / W)**n
g_top = lambda x, y: y <= H
rho_hull = lambda x, y: d * RHO_WATER
df_hull, dx, dy = hull_data(
f_hull,
g_top,
rho_hull,
n_marg=100,
)
df_mass = get_mass_properties(df_hull, dx, dy)
# Override COM
df_mass.x = 0
df_mass.y = y_com
return df_hull, df_mass
## Hull manipulation
# --------------------------------------------------
def hull_data(f_hull, g_top, rho_hull, n_marg=50, x_wid=3, y_lo=+0, y_hi=+4):
r"""
Args:
f_hull (lambda): Function of signature y = f(x);
defines lower surface of boat
g_top (lambda): Function of signature g (bool) = g(x, y);
True indicates within boat
rho_hull (lambda): Function of signature rho = rho(x, y);
returns local hull density
Returns:
DataFrame: x, y, dm boat hull points and element masses
float: dx
float: dy
"""
Xv = linspace(-x_wid, +x_wid, num=n_marg)
Yv = linspace(y_lo, y_hi, num=n_marg)
dx = Xv[1] - Xv[0]
dy = Yv[1] - Yv[0]
Xm, Ym = meshgrid(Xv, Yv)
n_tot = Xm.shape[0] * Xm.shape[1]
Z = concatenate(
(Xm.reshape(n_tot, -1), Ym.reshape(n_tot, -1)),
axis=1,
)
M = array([rho_hull(x, y) * dx * dy for x, y in Z])
I_hull = [
(f_hull(x) <= y) & g_top(x, y)
for x, y in Z
]
Z_hull = Z[I_hull]
M_hull = M[I_hull]
df_hull = DataFrame(dict(
x=Z_hull[:, 0],
y=Z_hull[:, 1],
dm=M_hull,
))
return df_hull, dx, dy
def hull_rotate(df_hull, df_mass, angle):
r"""
Args:
df_hull (DataFrame): Hull points
df_mass (DataFrame): Mass properties, gives COM
angle (float, radians): Heel angle
Returns:
DataFrame: Hull points rotated about COM
"""
R = array([
[cos(angle), -sin(angle)],
[sin(angle), cos(angle)]
])
Z_hull_r = (
df_hull[["x", "y"]].values - df_mass[["x", "y"]].values
).dot(R.T) + df_mass[["x", "y"]].values
return DataFrame(dict(
x=Z_hull_r[:, 0],
y=Z_hull_r[:, 1],
dm=df_hull.dm,
))
## Evaluate hull
# --------------------------------------------------
def get_width(X, y_w):
H, W, n, d, f_com = X
x_w = 0.5 * W * (y_w / H) ** (1 / n)
return x_w
def get_mass_properties(df_hull, dx, dy):
x_com = average(df_hull.x, weights=df_hull.dm)
y_com = average(df_hull.y, weights=df_hull.dm)
mass = df_hull.dm.sum()
return DataFrame(dict(
x=[x_com],
y=[y_com],
dx=[dx],
dy=[dy],
mass=[mass]
))
def get_buoyant_properties(df_hull_rot, df_mass, y_water):
r"""
Args:
df_hull_rot (DataFrame): Rotated hull points
df_mass (DataFrame): Mass properties
y_water (float): Location of waterline (in absolute coordinate system)
"""
dx = df_mass.dx[0]
dy = df_mass.dy[0]
I_under = df_hull_rot.y <= y_water
x_cob = average(df_hull_rot[I_under].x)
y_cob = average(df_hull_rot[I_under].y)
m_water = RHO_WATER * sum(I_under) * dx * dy
F_net = (m_water - df_mass.mass[0]) * G
M_net = G * m_water * (x_cob - df_mass.x[0])
# Righting moment == opposite true moment?
## Could just use moment arm
return DataFrame(dict(
x_cob=[x_cob],
y_cob=[y_cob],
F_net=[F_net],
M_net=[M_net],
))
def get_equ_waterline(df_hull, df_mass, angle, y_l=-4, y_h=4):
r"""
Args:
df_hull (DataFrame): Unrotated hull points
df_mass (DataFrame): Mass properties
angle (float): Angle of rotation
y_l (float): Low-bound for waterline
y_h (float): High-bound for waterline
Returns:
float: Waterline of zero net vertical force (heave-steady)
"""
dx = df_mass.dx[0]
dy = df_mass.dy[0]
df_hull_r = hull_rotate(df_hull, df_mass, angle)
def fun(y_g):
df_buoy = get_buoyant_properties(
df_hull_r,
df_mass,
y_g,
)
return df_buoy.F_net[0]
try:
with catch_warnings():
simplefilter("ignore")
y_star = bisect(fun, y_l, y_h, maxiter=1000)
df_res = get_buoyant_properties(
df_hull_r,
df_mass,
y_star,
)
df_res["y_w"] = y_star
except ValueError:
df_res = DataFrame(dict(y_cob=[NaN], M_net=[NaN], y_w=[NaN]))
return df_res
def get_moment_curve(df_hull, df_mass, a_l=0, a_h=pi, num=50):
r"""Generate a righting moment curve
Args:
df_hull (DataFrame): Unrotated hull points
df_mass (DataFrame): Mass properties
a_l (float): Low-bound for angle
a_h (float): High-bound for angle
num (int): Number of points to sample (linearly) between a_l, a_h
Returns:
DataFrame: Data from angle sweep
"""
df_res = DataFrame()
a_all = linspace(a_l, a_h, num=num)
for angle in a_all:
df_tmp = get_equ_waterline(df_hull, df_mass, angle)
df_tmp["angle"] = angle
df_res = | concat((df_res, df_tmp), axis=0) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.